mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch '2015.5' of https://github.com/saltstack/salt into 2015.5
This commit is contained in:
commit
e7442d3b1e
42 changed files with 1021 additions and 354 deletions
|
@ -28,14 +28,16 @@ This has also been tested to work with pipes, if needed:
|
|||
script_args: | head
|
||||
|
||||
|
||||
Use SFTP to transfer files
|
||||
==========================
|
||||
Some distributions do not have scp distributed with the ssh package. The
|
||||
solution is to use sftp with the `use_sftp` flag
|
||||
Selecting the File Transport
|
||||
============================
|
||||
By default, Salt Cloud uses SFTP to transfer files to Linux hosts. However, if
|
||||
SFTP is not available, or specific SCP functionality is needed, Salt Cloud can
|
||||
be configured to use SCP instead.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
use_sftp: True
|
||||
file_transport: sftp
|
||||
file_transport: scp
|
||||
|
||||
|
||||
Sync After Install
|
||||
|
|
13
doc/topics/releases/2014.7.6.rst
Normal file
13
doc/topics/releases/2014.7.6.rst
Normal file
|
@ -0,0 +1,13 @@
|
|||
===========================
|
||||
Salt 2014.7.6 Release Notes
|
||||
===========================
|
||||
|
||||
:release: TBA
|
||||
|
||||
Version 2014.7.6 is a bugfix release for :doc:`2014.7.0
|
||||
</topics/releases/2014.7.0>`.
|
||||
|
||||
Changes:
|
||||
|
||||
- salt.runners.cloud.action() has changed the `fun` keyword argument to `func`.
|
||||
Please update any calls to this function in the cloud runner.
|
|
@ -351,7 +351,7 @@ class CloudClient(object):
|
|||
'''
|
||||
Destroy the named VMs
|
||||
'''
|
||||
mapper = salt.cloud.Map(self._opts_defaults())
|
||||
mapper = salt.cloud.Map(self._opts_defaults(destroy=True))
|
||||
if isinstance(names, str):
|
||||
names = names.split(',')
|
||||
return salt.utils.cloud.simple_types_filter(
|
||||
|
@ -446,7 +446,7 @@ class CloudClient(object):
|
|||
kwargs={'image': 'ami-10314d79'}
|
||||
)
|
||||
'''
|
||||
mapper = salt.cloud.Map(self._opts_defaults(action=fun))
|
||||
mapper = salt.cloud.Map(self._opts_defaults(action=fun, names=names))
|
||||
if names and not provider:
|
||||
self.opts['action'] = fun
|
||||
return mapper.do_action(names, kwargs)
|
||||
|
|
|
@ -377,7 +377,7 @@ def destroy(vm_, call=None):
|
|||
transport=__opts__['transport']
|
||||
)
|
||||
cret = _salt('lxc.destroy', vm_, stop=True)
|
||||
ret['result'] = cret['change']
|
||||
ret['result'] = cret['result']
|
||||
if ret['result']:
|
||||
ret['comment'] = '{0} was destroyed'.format(vm_)
|
||||
salt.utils.cloud.fire_event(
|
||||
|
@ -424,10 +424,22 @@ def create(vm_, call=None):
|
|||
kwarg['host'] = prov['target']
|
||||
cret = _runner().cmd('lxc.cloud_init', [vm_['name']], kwarg=kwarg)
|
||||
ret['runner_return'] = cret
|
||||
if cret['result']:
|
||||
ret['result'] = False
|
||||
ret['result'] = cret['result']
|
||||
if not ret['result']:
|
||||
ret['Error'] = 'Error while creating {0},'.format(vm_['name'])
|
||||
else:
|
||||
ret['changes']['created'] = 'created'
|
||||
|
||||
# When using cloud states to manage LXC containers
|
||||
# __opts__['profile'] is not implicitly reset between operations
|
||||
# on different containers. However list_nodes will hide container
|
||||
# if profile is set in opts assuming that it have to be created.
|
||||
# But in cloud state we do want to check at first if it really
|
||||
# exists hence the need to remove profile from global opts once
|
||||
# current container is created.
|
||||
if 'profile' in __opts__:
|
||||
del __opts__['profile']
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
|
|
|
@ -13,8 +13,6 @@ file. However, profiles must still be configured, as described in the
|
|||
from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import os
|
||||
import copy
|
||||
import logging
|
||||
|
||||
# Import salt libs
|
||||
|
@ -23,7 +21,6 @@ import salt.utils
|
|||
# Import salt cloud libs
|
||||
import salt.utils.cloud
|
||||
import salt.config as config
|
||||
from salt.exceptions import SaltCloudConfigError
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -64,157 +61,11 @@ def create(vm_):
|
|||
'''
|
||||
Provision a single machine
|
||||
'''
|
||||
if config.get_cloud_config_value('deploy', vm_, __opts__) is False:
|
||||
return {
|
||||
'Error': {
|
||||
'No Deploy': '\'deploy\' is not enabled. Not deploying.'
|
||||
}
|
||||
}
|
||||
|
||||
key_filename = config.get_cloud_config_value(
|
||||
'key_filename', vm_, __opts__, search_global=False, default=None
|
||||
)
|
||||
|
||||
if key_filename is not None and not os.path.isfile(key_filename):
|
||||
raise SaltCloudConfigError(
|
||||
'The defined ssh_keyfile {0!r} does not exist'.format(
|
||||
key_filename
|
||||
)
|
||||
)
|
||||
|
||||
ret = {}
|
||||
|
||||
log.info('Provisioning existing machine {0}'.format(vm_['name']))
|
||||
|
||||
ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__)
|
||||
deploy_script = script(vm_)
|
||||
deploy_kwargs = {
|
||||
'opts': __opts__,
|
||||
'host': vm_['ssh_host'],
|
||||
'username': ssh_username,
|
||||
'script': deploy_script,
|
||||
'name': vm_['name'],
|
||||
'tmp_dir': config.get_cloud_config_value(
|
||||
'tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'
|
||||
),
|
||||
'deploy_command': config.get_cloud_config_value(
|
||||
'deploy_command', vm_, __opts__,
|
||||
default='/tmp/.saltcloud/deploy.sh',
|
||||
),
|
||||
'start_action': __opts__['start_action'],
|
||||
'parallel': __opts__['parallel'],
|
||||
'sock_dir': __opts__['sock_dir'],
|
||||
'conf_file': __opts__['conf_file'],
|
||||
'minion_pem': vm_['priv_key'],
|
||||
'minion_pub': vm_['pub_key'],
|
||||
'keep_tmp': __opts__['keep_tmp'],
|
||||
'sudo': config.get_cloud_config_value(
|
||||
'sudo', vm_, __opts__, default=(ssh_username != 'root')
|
||||
),
|
||||
'sudo_password': config.get_cloud_config_value(
|
||||
'sudo_password', vm_, __opts__, default=None
|
||||
),
|
||||
'tty': config.get_cloud_config_value(
|
||||
'tty', vm_, __opts__, default=True
|
||||
),
|
||||
'password': config.get_cloud_config_value(
|
||||
'password', vm_, __opts__, search_global=False
|
||||
),
|
||||
'key_filename': key_filename,
|
||||
'script_args': config.get_cloud_config_value('script_args', vm_, __opts__),
|
||||
'script_env': config.get_cloud_config_value('script_env', vm_, __opts__),
|
||||
'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_),
|
||||
'preseed_minion_keys': vm_.get('preseed_minion_keys', None),
|
||||
'display_ssh_output': config.get_cloud_config_value(
|
||||
'display_ssh_output', vm_, __opts__, default=True
|
||||
)
|
||||
}
|
||||
if 'ssh_port' in vm_:
|
||||
deploy_kwargs.update({'port': vm_['ssh_port']})
|
||||
if 'salt_host' in vm_:
|
||||
deploy_kwargs.update({'salt_host': vm_['salt_host']})
|
||||
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
|
||||
|
||||
# forward any info about possible ssh gateway to deploy script
|
||||
# as some providers need also a 'gateway' configuration
|
||||
if 'gateway' in vm_:
|
||||
deploy_kwargs.update({'gateway': vm_['gateway']})
|
||||
|
||||
# Deploy salt-master files, if necessary
|
||||
if config.get_cloud_config_value('make_master', vm_, __opts__) is True:
|
||||
deploy_kwargs['make_master'] = True
|
||||
deploy_kwargs['master_pub'] = vm_['master_pub']
|
||||
deploy_kwargs['master_pem'] = vm_['master_pem']
|
||||
master_conf = salt.utils.cloud.master_config(__opts__, vm_)
|
||||
deploy_kwargs['master_conf'] = master_conf
|
||||
|
||||
if master_conf.get('syndic_master', None):
|
||||
deploy_kwargs['make_syndic'] = True
|
||||
|
||||
deploy_kwargs['make_minion'] = config.get_cloud_config_value(
|
||||
'make_minion', vm_, __opts__, default=True
|
||||
)
|
||||
|
||||
win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__)
|
||||
if win_installer:
|
||||
deploy_kwargs['win_installer'] = win_installer
|
||||
minion = salt.utils.cloud.minion_config(__opts__, vm_)
|
||||
deploy_kwargs['master'] = minion['master']
|
||||
deploy_kwargs['username'] = config.get_cloud_config_value(
|
||||
'win_username', vm_, __opts__, default='Administrator'
|
||||
)
|
||||
deploy_kwargs['password'] = config.get_cloud_config_value(
|
||||
'win_password', vm_, __opts__, default=''
|
||||
)
|
||||
|
||||
# Store what was used to the deploy the VM
|
||||
event_kwargs = copy.deepcopy(deploy_kwargs)
|
||||
del event_kwargs['minion_pem']
|
||||
del event_kwargs['minion_pub']
|
||||
del event_kwargs['sudo_password']
|
||||
if 'password' in event_kwargs:
|
||||
del event_kwargs['password']
|
||||
ret['deploy_kwargs'] = event_kwargs
|
||||
|
||||
salt.utils.cloud.fire_event(
|
||||
'event',
|
||||
'executing deploy script',
|
||||
'salt/cloud/{0}/deploying'.format(vm_['name']),
|
||||
{'kwargs': event_kwargs},
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
|
||||
if win_installer:
|
||||
deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs)
|
||||
else:
|
||||
deployed = salt.utils.cloud.deploy_script(**deploy_kwargs)
|
||||
|
||||
if deployed:
|
||||
ret['deployed'] = deployed
|
||||
log.info('Salt installed on {0}'.format(vm_['name']))
|
||||
return ret
|
||||
|
||||
log.error('Failed to start Salt on host {0}'.format(vm_['name']))
|
||||
return {
|
||||
'Error': {
|
||||
'Not Deployed': 'Failed to start Salt on host {0}'.format(
|
||||
vm_['name']
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def script(vm_):
|
||||
'''
|
||||
Return the script deployment object
|
||||
'''
|
||||
return salt.utils.cloud.os_script(
|
||||
config.get_cloud_config_value('script', vm_, __opts__),
|
||||
vm_,
|
||||
__opts__,
|
||||
salt.utils.cloud.salt_config_to_yaml(
|
||||
salt.utils.cloud.minion_config(__opts__, vm_)
|
||||
)
|
||||
)
|
||||
return ret
|
||||
|
||||
|
||||
def get_configured_provider():
|
||||
|
|
|
@ -48,7 +48,7 @@ import salt.ext.six as six
|
|||
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
|
||||
HAS_PSUTIL = False
|
||||
try:
|
||||
import psutil
|
||||
import salt.utils.psutil_compat as psutil
|
||||
HAS_PSUTIL = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
@ -520,7 +520,7 @@ class SaltLoadModules(ioflo.base.deeding.Deed):
|
|||
)
|
||||
modules_max_memory = True
|
||||
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
|
||||
rss, vms = psutil.Process(os.getpid()).get_memory_info()
|
||||
rss, vms = psutil.Process(os.getpid()).memory_info()
|
||||
mem_limit = rss + vms + self.opts.value['modules_max_memory']
|
||||
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
|
||||
elif self.opts.value.get('modules_max_memory', -1) > 0:
|
||||
|
|
85
salt/ext/win_inet_pton.py
Normal file
85
salt/ext/win_inet_pton.py
Normal file
|
@ -0,0 +1,85 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# This software released into the public domain. Anyone is free to copy,
|
||||
# modify, publish, use, compile, sell, or distribute this software,
|
||||
# either in source code form or as a compiled binary, for any purpose,
|
||||
# commercial or non-commercial, and by any means.
|
||||
|
||||
import socket
|
||||
import ctypes
|
||||
import os
|
||||
|
||||
|
||||
class sockaddr(ctypes.Structure):
|
||||
_fields_ = [("sa_family", ctypes.c_short),
|
||||
("__pad1", ctypes.c_ushort),
|
||||
("ipv4_addr", ctypes.c_byte * 4),
|
||||
("ipv6_addr", ctypes.c_byte * 16),
|
||||
("__pad2", ctypes.c_ulong)]
|
||||
|
||||
if hasattr(ctypes, 'windll'):
|
||||
WSAStringToAddressA = ctypes.windll.ws2_32.WSAStringToAddressA
|
||||
WSAAddressToStringA = ctypes.windll.ws2_32.WSAAddressToStringA
|
||||
else:
|
||||
def not_windows():
|
||||
raise SystemError(
|
||||
"Invalid platform. ctypes.windll must be available."
|
||||
)
|
||||
WSAStringToAddressA = not_windows
|
||||
WSAAddressToStringA = not_windows
|
||||
|
||||
|
||||
def inet_pton(address_family, ip_string):
|
||||
addr = sockaddr()
|
||||
addr.sa_family = address_family
|
||||
addr_size = ctypes.c_int(ctypes.sizeof(addr))
|
||||
|
||||
if WSAStringToAddressA(
|
||||
ip_string,
|
||||
address_family,
|
||||
None,
|
||||
ctypes.byref(addr),
|
||||
ctypes.byref(addr_size)
|
||||
) != 0:
|
||||
raise socket.error(ctypes.FormatError())
|
||||
|
||||
if address_family == socket.AF_INET:
|
||||
return ctypes.string_at(addr.ipv4_addr, 4)
|
||||
if address_family == socket.AF_INET6:
|
||||
return ctypes.string_at(addr.ipv6_addr, 16)
|
||||
|
||||
raise socket.error('unknown address family')
|
||||
|
||||
|
||||
def inet_ntop(address_family, packed_ip):
|
||||
addr = sockaddr()
|
||||
addr.sa_family = address_family
|
||||
addr_size = ctypes.c_int(ctypes.sizeof(addr))
|
||||
ip_string = ctypes.create_string_buffer(128)
|
||||
ip_string_size = ctypes.c_int(ctypes.sizeof(ip_string))
|
||||
|
||||
if address_family == socket.AF_INET:
|
||||
if len(packed_ip) != ctypes.sizeof(addr.ipv4_addr):
|
||||
raise socket.error('packed IP wrong length for inet_ntoa')
|
||||
ctypes.memmove(addr.ipv4_addr, packed_ip, 4)
|
||||
elif address_family == socket.AF_INET6:
|
||||
if len(packed_ip) != ctypes.sizeof(addr.ipv6_addr):
|
||||
raise socket.error('packed IP wrong length for inet_ntoa')
|
||||
ctypes.memmove(addr.ipv6_addr, packed_ip, 16)
|
||||
else:
|
||||
raise socket.error('unknown address family')
|
||||
|
||||
if WSAAddressToStringA(
|
||||
ctypes.byref(addr),
|
||||
addr_size,
|
||||
None,
|
||||
ip_string,
|
||||
ctypes.byref(ip_string_size)
|
||||
) != 0:
|
||||
raise socket.error(ctypes.FormatError())
|
||||
|
||||
return ip_string[:ip_string_size.value - 1]
|
||||
|
||||
# Adding our two functions to the socket library
|
||||
if os.name == 'nt':
|
||||
socket.inet_pton = inet_pton
|
||||
socket.inet_ntop = inet_ntop
|
|
@ -42,7 +42,7 @@ except ImportError:
|
|||
|
||||
HAS_PSUTIL = False
|
||||
try:
|
||||
import psutil
|
||||
import salt.utils.psutil_compat as psutil
|
||||
HAS_PSUTIL = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
@ -924,7 +924,7 @@ class Minion(MinionBase):
|
|||
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
|
||||
modules_max_memory = True
|
||||
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
|
||||
rss, vms = psutil.Process(os.getpid()).get_memory_info()
|
||||
rss, vms = psutil.Process(os.getpid()).memory_info()
|
||||
mem_limit = rss + vms + self.opts['modules_max_memory']
|
||||
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
|
||||
elif self.opts.get('modules_max_memory', -1) > 0:
|
||||
|
|
|
@ -6,6 +6,7 @@ A module to wrap (non-Windows) archive calls
|
|||
'''
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import contextlib # For < 2.7 compat
|
||||
|
||||
# Import salt libs
|
||||
from salt.exceptions import SaltInvocationError, CommandExecutionError
|
||||
|
@ -510,7 +511,7 @@ def unzip(zip_file, dest, excludes=None, template=None, runas=None):
|
|||
# variable from being defined and cause a NameError in the return
|
||||
# statement at the end of the function.
|
||||
cleaned_files = []
|
||||
with zipfile.ZipFile(zip_file) as zfile:
|
||||
with contextlib.closing(zipfile.ZipFile(zip_file, "r")) as zfile:
|
||||
files = zfile.namelist()
|
||||
|
||||
if isinstance(excludes, string_types):
|
||||
|
|
|
@ -157,11 +157,18 @@ def item(*args, **kwargs):
|
|||
salt '*' grains.item host sanitize=True
|
||||
'''
|
||||
ret = {}
|
||||
for arg in args:
|
||||
try:
|
||||
ret[arg] = __grains__[arg]
|
||||
except KeyError:
|
||||
pass
|
||||
default = kwargs.get('default', '')
|
||||
delimiter = kwargs.get('delimiter', ':')
|
||||
|
||||
try:
|
||||
for arg in args:
|
||||
ret[arg] = salt.utils.traverse_dict_and_list(__grains__,
|
||||
arg,
|
||||
default,
|
||||
delimiter)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if salt.utils.is_true(kwargs.get('sanitize')):
|
||||
for arg, func in _SANITIZERS.items():
|
||||
if arg in ret:
|
||||
|
@ -295,7 +302,11 @@ def append(key, val, convert=False, delimiter=':'):
|
|||
return 'The key {0} is not a valid list'.format(key)
|
||||
if val in grains:
|
||||
return 'The val {0} was already in the list {1}'.format(val, key)
|
||||
grains.append(val)
|
||||
if isinstance(val, list):
|
||||
for item in val:
|
||||
grains.append(item)
|
||||
else:
|
||||
grains.append(val)
|
||||
|
||||
while delimiter in key:
|
||||
key, rest = key.rsplit(delimiter, 1)
|
||||
|
|
|
@ -2635,17 +2635,19 @@ def set_dns(name, dnsservers=None, searchdomains=None):
|
|||
|
||||
def _need_install(name):
|
||||
ret = 0
|
||||
has_minion = retcode(name, "command -v salt-minion")
|
||||
has_minion = retcode(name,
|
||||
'which salt-minion',
|
||||
ignore_retcode=True)
|
||||
# we assume that installing is when no minion is running
|
||||
# but testing the executable presence is not enougth for custom
|
||||
# installs where the bootstrap can do much more than installing
|
||||
# the bare salt binaries.
|
||||
if has_minion:
|
||||
processes = run_stdout(name, "ps aux")
|
||||
processes = run_stdout(name, 'ps aux')
|
||||
if 'salt-minion' not in processes:
|
||||
ret = 1
|
||||
else:
|
||||
retcode(name, "salt-call --local service.stop salt-minion")
|
||||
retcode(name, 'salt-call --local service.stop salt-minion')
|
||||
else:
|
||||
ret = 1
|
||||
return ret
|
||||
|
@ -2748,7 +2750,9 @@ def bootstrap(name,
|
|||
needs_install = _need_install(name)
|
||||
else:
|
||||
needs_install = True
|
||||
seeded = retcode(name, 'test -e \'{0}\''.format(SEED_MARKER)) == 0
|
||||
seeded = retcode(name,
|
||||
'test -e \'{0}\''.format(SEED_MARKER),
|
||||
ignore_retcode=True) == 0
|
||||
tmp = tempfile.mkdtemp()
|
||||
if seeded and not unconditional_install:
|
||||
ret = True
|
||||
|
@ -2838,8 +2842,12 @@ def attachable(name):
|
|||
_ensure_exists(name)
|
||||
# Can't use run() here because it uses attachable() and would
|
||||
# endlessly recurse, resulting in a traceback
|
||||
log.debug('Checking if LXC container {0} is attachable'.format(name))
|
||||
cmd = 'lxc-attach --clear-env -n {0} -- /usr/bin/env'.format(name)
|
||||
result = __salt__['cmd.retcode'](cmd, python_shell=False) == 0
|
||||
result = __salt__['cmd.retcode'](cmd,
|
||||
python_shell=False,
|
||||
output_loglevel='quiet',
|
||||
ignore_retcode=True) == 0
|
||||
__context__['lxc.attachable'] = result
|
||||
return __context__['lxc.attachable']
|
||||
|
||||
|
@ -3401,8 +3409,8 @@ def cp(name, source, dest, makedirs=False):
|
|||
if not os.path.isabs(dest):
|
||||
raise SaltInvocationError('Destination path must be absolute')
|
||||
if retcode(name,
|
||||
'test -d \'{0}\''.format(dest),
|
||||
ignore_retcode=True) == 0:
|
||||
'test -d \'{0}\''.format(dest),
|
||||
ignore_retcode=True) == 0:
|
||||
# Destination is a directory, full path to dest file will include the
|
||||
# basename of the source file.
|
||||
dest = os.path.join(dest, source_name)
|
||||
|
@ -3412,8 +3420,8 @@ def cp(name, source, dest, makedirs=False):
|
|||
# parent directory.
|
||||
dest_dir, dest_name = os.path.split(dest)
|
||||
if retcode(name,
|
||||
'test -d \'{0}\''.format(dest_dir),
|
||||
ignore_retcode=True) != 0:
|
||||
'test -d \'{0}\''.format(dest_dir),
|
||||
ignore_retcode=True) != 0:
|
||||
if makedirs:
|
||||
result = run_all(name, 'mkdir -p \'{0}\''.format(dest_dir))
|
||||
if result['retcode'] != 0:
|
||||
|
|
|
@ -1055,7 +1055,7 @@ def user_exists(user,
|
|||
qry += ' AND Password = \'\''
|
||||
elif password:
|
||||
qry += ' AND Password = PASSWORD(%(password)s)'
|
||||
args['password'] = password
|
||||
args['password'] = str(password)
|
||||
elif password_hash:
|
||||
qry += ' AND Password = %(password)s'
|
||||
args['password'] = password_hash
|
||||
|
@ -1167,7 +1167,7 @@ def user_create(user,
|
|||
args['host'] = host
|
||||
if password is not None:
|
||||
qry += ' IDENTIFIED BY %(password)s'
|
||||
args['password'] = password
|
||||
args['password'] = str(password)
|
||||
elif password_hash is not None:
|
||||
qry += ' IDENTIFIED BY PASSWORD %(password)s'
|
||||
args['password'] = password_hash
|
||||
|
@ -1721,6 +1721,11 @@ def grant_revoke(grant,
|
|||
# _ and % are authorized on GRANT queries and should get escaped
|
||||
# on the db name, but only if not requesting a table level grant
|
||||
s_database = quote_identifier(dbc, for_grants=(table is '*'))
|
||||
if dbc is '*':
|
||||
# add revoke for *.*
|
||||
# before the modification query send to mysql will looks like
|
||||
# REVOKE SELECT ON `*`.* FROM %(user)s@%(host)s
|
||||
s_database = dbc
|
||||
if table is not '*':
|
||||
table = quote_identifier(table)
|
||||
# identifiers cannot be used as values, same thing for grants
|
||||
|
|
|
@ -28,7 +28,7 @@ def __virtual__():
|
|||
'''
|
||||
try:
|
||||
if salt.utils.which('npm') is not None:
|
||||
_check_valid_version()
|
||||
_check_valid_version(__salt__)
|
||||
return True
|
||||
else:
|
||||
return (False, 'npm execution module could not be loaded '
|
||||
|
@ -37,14 +37,14 @@ def __virtual__():
|
|||
return (False, str(exc))
|
||||
|
||||
|
||||
def _check_valid_version():
|
||||
def _check_valid_version(salt):
|
||||
'''
|
||||
Check the version of npm to ensure this module will work. Currently
|
||||
npm must be at least version 1.2.
|
||||
'''
|
||||
# pylint: disable=no-member
|
||||
npm_version = distutils.version.LooseVersion(
|
||||
__salt__['cmd.run']('npm --version'))
|
||||
salt['cmd.run']('npm --version'))
|
||||
valid_version = distutils.version.LooseVersion('1.2')
|
||||
# pylint: enable=no-member
|
||||
if npm_version < valid_version:
|
||||
|
|
|
@ -11,7 +11,8 @@ Salt now uses a portable python. As a result the entire pip module is now
|
|||
functional on the salt installation itself. You can pip install dependencies
|
||||
for your custom modules. You can even upgrade salt itself using pip. For this
|
||||
to work properly, you must specify the Current Working Directory (``cwd``) and
|
||||
the Pip Binary (``bin_env``) salt should use.
|
||||
the Pip Binary (``bin_env``) salt should use. The variable ``pip_bin`` can
|
||||
be either a virtualenv path or the path to the pip binary itself.
|
||||
|
||||
For example, the following command will list all software installed using pip
|
||||
to your current salt environment:
|
||||
|
@ -86,9 +87,6 @@ import salt.utils
|
|||
from salt.ext.six import string_types
|
||||
from salt.exceptions import CommandExecutionError, CommandNotFoundError
|
||||
|
||||
# It would be cool if we could use __virtual__() in this module, though, since
|
||||
# pip can be installed on a virtualenv anywhere on the filesystem, there's no
|
||||
# definite way to tell if pip is installed on not.
|
||||
|
||||
logger = logging.getLogger(__name__) # pylint: disable=C0103
|
||||
|
||||
|
@ -100,10 +98,19 @@ __func_alias__ = {
|
|||
VALID_PROTOS = ['http', 'https', 'ftp', 'file']
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
There is no way to verify that pip is installed without inspecting the
|
||||
entire filesystem. If it's not installed in a conventional location, the
|
||||
user is required to provide the location of pip each time it is used.
|
||||
'''
|
||||
return 'pip'
|
||||
|
||||
|
||||
def _get_pip_bin(bin_env):
|
||||
'''
|
||||
Return the pip command to call, either from a virtualenv, an argument
|
||||
passed in, or from the global modules options
|
||||
Locate the pip binary, either from `bin_env` as a virtualenv, as the
|
||||
executable itself, or from searching conventional filesystem locations
|
||||
'''
|
||||
if not bin_env:
|
||||
which_result = __salt__['cmd.which_bin'](['pip2', 'pip', 'pip-python'])
|
||||
|
@ -113,7 +120,7 @@ def _get_pip_bin(bin_env):
|
|||
return which_result.encode('string-escape')
|
||||
return which_result
|
||||
|
||||
# try to get pip bin from env
|
||||
# try to get pip bin from virtualenv, bin_env
|
||||
if os.path.isdir(bin_env):
|
||||
if salt.utils.is_windows():
|
||||
pip_bin = os.path.join(bin_env, 'Scripts', 'pip.exe').encode('string-escape')
|
||||
|
@ -121,10 +128,15 @@ def _get_pip_bin(bin_env):
|
|||
pip_bin = os.path.join(bin_env, 'bin', 'pip')
|
||||
if os.path.isfile(pip_bin):
|
||||
return pip_bin
|
||||
msg = 'Could not find a `pip` binary in virtualenv {0}'.format(bin_env)
|
||||
raise CommandNotFoundError(msg)
|
||||
# bin_env is the pip binary
|
||||
elif os.access(bin_env, os.X_OK):
|
||||
if os.path.isfile(bin_env) or os.path.islink(bin_env):
|
||||
return bin_env
|
||||
else:
|
||||
raise CommandNotFoundError('Could not find a `pip` binary')
|
||||
|
||||
return bin_env
|
||||
|
||||
|
||||
def _process_salt_url(path, saltenv):
|
||||
'''
|
||||
|
@ -442,7 +454,9 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
|
|||
# Backwards compatibility
|
||||
saltenv = __env__
|
||||
|
||||
cmd = [_get_pip_bin(bin_env), 'install']
|
||||
pip_bin = _get_pip_bin(bin_env)
|
||||
|
||||
cmd = [pip_bin, 'install']
|
||||
|
||||
cleanup_requirements, error = _process_requirements(requirements=requirements, cmd=cmd,
|
||||
saltenv=saltenv, user=user,
|
||||
|
@ -587,11 +601,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
|
|||
|
||||
if pre_releases:
|
||||
# Check the locally installed pip version
|
||||
pip_version_cmd = '{0} --version'.format(_get_pip_bin(bin_env))
|
||||
output = __salt__['cmd.run_all'](pip_version_cmd,
|
||||
use_vt=use_vt,
|
||||
python_shell=False).get('stdout', '')
|
||||
pip_version = output.split()[1]
|
||||
pip_version = version(pip_bin)
|
||||
|
||||
# From pip v1.4 the --pre flag is available
|
||||
if salt.utils.compare_versions(ver1=pip_version, oper='>=', ver2='1.4'):
|
||||
|
@ -743,7 +753,9 @@ def uninstall(pkgs=None,
|
|||
salt '*' pip.uninstall <package name> bin_env=/path/to/pip_bin
|
||||
|
||||
'''
|
||||
cmd = [_get_pip_bin(bin_env), 'uninstall', '-y']
|
||||
pip_bin = _get_pip_bin(bin_env)
|
||||
|
||||
cmd = [pip_bin, 'uninstall', '-y']
|
||||
|
||||
if isinstance(__env__, string_types):
|
||||
salt.utils.warn_until(
|
||||
|
@ -836,7 +848,9 @@ def freeze(bin_env=None,
|
|||
|
||||
salt '*' pip.freeze /home/code/path/to/virtualenv/
|
||||
'''
|
||||
cmd = [_get_pip_bin(bin_env), 'freeze']
|
||||
pip_bin = _get_pip_bin(bin_env)
|
||||
|
||||
cmd = [pip_bin, 'freeze']
|
||||
cmd_kwargs = dict(runas=user, cwd=cwd, use_vt=use_vt, python_shell=False)
|
||||
if bin_env and os.path.isdir(bin_env):
|
||||
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
|
||||
|
@ -865,7 +879,7 @@ def list_(prefix=None,
|
|||
packages = {}
|
||||
|
||||
pip_bin = _get_pip_bin(bin_env)
|
||||
pip_version_cmd = [pip_bin, '--version']
|
||||
|
||||
cmd = [pip_bin, 'freeze']
|
||||
|
||||
cmd_kwargs = dict(runas=user, cwd=cwd, python_shell=False)
|
||||
|
@ -873,11 +887,7 @@ def list_(prefix=None,
|
|||
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
|
||||
|
||||
if not prefix or prefix in ('p', 'pi', 'pip'):
|
||||
pip_version_result = __salt__['cmd.run_all'](' '.join(pip_version_cmd),
|
||||
**cmd_kwargs)
|
||||
if pip_version_result['retcode'] > 0:
|
||||
raise CommandExecutionError(pip_version_result['stderr'])
|
||||
packages['pip'] = pip_version_result['stdout'].split()[1]
|
||||
packages['pip'] = version(bin_env)
|
||||
|
||||
result = __salt__['cmd.run_all'](' '.join(cmd), **cmd_kwargs)
|
||||
if result['retcode'] > 0:
|
||||
|
@ -924,7 +934,9 @@ def version(bin_env=None):
|
|||
|
||||
salt '*' pip.version
|
||||
'''
|
||||
output = __salt__['cmd.run']('{0} --version'.format(_get_pip_bin(bin_env)), python_shell=False)
|
||||
pip_bin = _get_pip_bin(bin_env)
|
||||
|
||||
output = __salt__['cmd.run']('{0} --version'.format(pip_bin), python_shell=False)
|
||||
try:
|
||||
return re.match(r'^pip (\S+)', output).group(1)
|
||||
except AttributeError:
|
||||
|
@ -943,8 +955,8 @@ def list_upgrades(bin_env=None,
|
|||
|
||||
salt '*' pip.list_upgrades
|
||||
'''
|
||||
|
||||
pip_bin = _get_pip_bin(bin_env)
|
||||
|
||||
cmd = [pip_bin, "list", "--outdated"]
|
||||
|
||||
cmd_kwargs = dict(cwd=cwd, runas=user)
|
||||
|
|
|
@ -17,7 +17,7 @@ from salt.exceptions import SaltInvocationError, CommandExecutionError
|
|||
|
||||
# Import third party libs
|
||||
try:
|
||||
import psutil
|
||||
import salt.utils.psutil_compat as psutil
|
||||
|
||||
HAS_PSUTIL = True
|
||||
PSUTIL2 = psutil.version_info >= (2, 0)
|
||||
|
@ -126,10 +126,10 @@ def top(num_processes=5, interval=3):
|
|||
'''
|
||||
result = []
|
||||
start_usage = {}
|
||||
for pid in psutil.get_pid_list():
|
||||
for pid in psutil.pids():
|
||||
try:
|
||||
process = psutil.Process(pid)
|
||||
user, system = process.get_cpu_times()
|
||||
user, system = process.cpu_times()
|
||||
except psutil.NoSuchProcess:
|
||||
continue
|
||||
start_usage[process] = user + system
|
||||
|
@ -137,7 +137,7 @@ def top(num_processes=5, interval=3):
|
|||
usage = set()
|
||||
for process, start in start_usage.items():
|
||||
try:
|
||||
user, system = process.get_cpu_times()
|
||||
user, system = process.cpu_times()
|
||||
except psutil.NoSuchProcess:
|
||||
continue
|
||||
now = user + system
|
||||
|
@ -159,9 +159,9 @@ def top(num_processes=5, interval=3):
|
|||
'cpu': {},
|
||||
'mem': {},
|
||||
}
|
||||
for key, value in process.get_cpu_times()._asdict().items():
|
||||
for key, value in process.cpu_times()._asdict().items():
|
||||
info['cpu'][key] = value
|
||||
for key, value in process.get_memory_info()._asdict().items():
|
||||
for key, value in process.memory_info()._asdict().items():
|
||||
info['mem'][key] = value
|
||||
result.append(info)
|
||||
|
||||
|
@ -178,7 +178,7 @@ def get_pid_list():
|
|||
|
||||
salt '*' ps.get_pid_list
|
||||
'''
|
||||
return psutil.get_pid_list()
|
||||
return psutil.pids()
|
||||
|
||||
|
||||
def proc_info(pid, attrs=None):
|
||||
|
@ -538,7 +538,7 @@ def boot_time(time_format=None):
|
|||
except AttributeError:
|
||||
# get_boot_time() has been removed in newer psutil versions, and has
|
||||
# been replaced by boot_time() which provides the same information.
|
||||
b_time = int(psutil.get_boot_time())
|
||||
b_time = int(psutil.boot_time())
|
||||
if time_format:
|
||||
# Load epoch timestamp as a datetime.datetime object
|
||||
b_time = datetime.datetime.fromtimestamp(b_time)
|
||||
|
@ -562,9 +562,9 @@ def network_io_counters(interface=None):
|
|||
salt '*' ps.network_io_counters interface=eth0
|
||||
'''
|
||||
if not interface:
|
||||
return dict(psutil.network_io_counters()._asdict())
|
||||
return dict(psutil.net_io_counters()._asdict())
|
||||
else:
|
||||
stats = psutil.network_io_counters(pernic=True)
|
||||
stats = psutil.net_io_counters(pernic=True)
|
||||
if interface in stats:
|
||||
return dict(stats[interface]._asdict())
|
||||
else:
|
||||
|
@ -604,7 +604,7 @@ def get_users():
|
|||
salt '*' ps.get_users
|
||||
'''
|
||||
try:
|
||||
recs = psutil.get_users()
|
||||
recs = psutil.users()
|
||||
return [dict(x._asdict()) for x in recs]
|
||||
except AttributeError:
|
||||
# get_users is only present in psutil > v0.5.0
|
||||
|
|
|
@ -379,7 +379,8 @@ def status(name, sig=None):
|
|||
'''
|
||||
if _untracked_custom_unit_found(name) or _unit_file_changed(name):
|
||||
systemctl_reload()
|
||||
return not __salt__['cmd.retcode'](_systemctl_cmd('is-active', name))
|
||||
return not __salt__['cmd.retcode'](_systemctl_cmd('is-active', name),
|
||||
ignore_retcode=True)
|
||||
|
||||
|
||||
def enable(name, **kwargs):
|
||||
|
|
105
salt/pillar/neutron.py
Normal file
105
salt/pillar/neutron.py
Normal file
|
@ -0,0 +1,105 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Use Openstack Neutron data as a Pillar source. Will list all networks listed
|
||||
inside of Neutron, to all minions.
|
||||
|
||||
.. versionadded:: 2015.5.1
|
||||
|
||||
:depends: - python-neutronclient
|
||||
|
||||
A keystone profile must be used for the pillar to work (no generic keystone
|
||||
configuration here). For example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my openstack_config:
|
||||
keystone.user: 'admin'
|
||||
keystone.password: 'password'
|
||||
keystone.tenant: 'admin'
|
||||
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
|
||||
keystone.region_name: 'RegionOne'
|
||||
keystone.service_type: 'network'
|
||||
|
||||
After the profile is created, configure the external pillar system to use it.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- neutron: my_openstack_config
|
||||
|
||||
Using these configuration profiles, multiple neutron sources may also be used:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- neutron: my_openstack_config
|
||||
- neutron: my_other_openstack_config
|
||||
|
||||
By default, these networks will be returned as a pillar item called
|
||||
``networks``. In order to have them returned under a different name, add the
|
||||
name after the Keystone profile name:
|
||||
|
||||
ext_pillar:
|
||||
- neutron: my_openstack_config neutron_networks
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Import Salt Libs
|
||||
try:
|
||||
import salt.utils.openstack.neutron as suoneu
|
||||
HAS_NEUTRON = True
|
||||
except NameError as exc:
|
||||
HAS_NEUTRON = False
|
||||
|
||||
# Set up logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only return if python-neutronclient is installed
|
||||
'''
|
||||
return HAS_NEUTRON
|
||||
|
||||
|
||||
def _auth(profile=None):
|
||||
'''
|
||||
Set up neutron credentials
|
||||
'''
|
||||
credentials = __salt__['config.option'](profile)
|
||||
kwargs = {
|
||||
'username': credentials['keystone.user'],
|
||||
'password': credentials['keystone.password'],
|
||||
'tenant_name': credentials['keystone.tenant'],
|
||||
'auth_url': credentials['keystone.auth_url'],
|
||||
'region_name': credentials.get('keystone.region_name', None),
|
||||
'service_type': credentials['keystone.service_type'],
|
||||
}
|
||||
|
||||
return suoneu.SaltNeutron(**kwargs)
|
||||
|
||||
|
||||
def ext_pillar(minion_id,
|
||||
pillar, # pylint: disable=W0613
|
||||
conf):
|
||||
'''
|
||||
Check neutron for all data
|
||||
'''
|
||||
comps = conf.split()
|
||||
|
||||
profile = None
|
||||
if comps[0]:
|
||||
profile = comps[0]
|
||||
|
||||
conn = _auth(profile)
|
||||
ret = {}
|
||||
networks = conn.list_networks()
|
||||
for network in networks['networks']:
|
||||
ret[network['name']] = network
|
||||
|
||||
if len(comps) < 2:
|
||||
comps.append('networks')
|
||||
return {comps[1]: ret}
|
|
@ -1,7 +1,24 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Use varstack data as a Pillar source
|
||||
Use `Varstack <https://github.com/conversis/varstack>`_ data as a Pillar source
|
||||
|
||||
Configuring Varstack
|
||||
====================
|
||||
|
||||
Using varstack in Salt is fairly simple. Just put the following into the
|
||||
config file of your master:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- varstack: /etc/varstack.yaml
|
||||
|
||||
Varstack will then use /etc/varstack.yaml to determine which configuration
|
||||
data to return as pillar information. From there you can take a look at the
|
||||
`README <https://github.com/conversis/varstack/blob/master/README.md>`_ of
|
||||
varstack on how this file is evaluated.
|
||||
'''
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
|
|
|
@ -49,7 +49,7 @@ def render(yaml_data, saltenv='base', sls='', argline='', **kws):
|
|||
try:
|
||||
data = load(yaml_data, Loader=get_yaml_loader(argline))
|
||||
except ScannerError as exc:
|
||||
err_type = _ERROR_MAP.get(exc.problem, 'Unknown yaml render error')
|
||||
err_type = _ERROR_MAP.get(exc.problem, exc.problem)
|
||||
line_num = exc.problem_mark.line + 1
|
||||
raise SaltRenderError(err_type, line_num, exc.problem_mark.buffer)
|
||||
except ConstructorError as exc:
|
||||
|
|
|
@ -78,7 +78,7 @@ class Roster(object):
|
|||
try:
|
||||
targets.update(self.rosters[f_str](tgt, tgt_type))
|
||||
except salt.exceptions.SaltRenderError as exc:
|
||||
log.debug('Unable to render roster file: {0}'.format(exc.error))
|
||||
log.error('Unable to render roster file: {0}'.format(exc))
|
||||
except IOError as exc:
|
||||
pass
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ def destroy(instances):
|
|||
|
||||
|
||||
def action(
|
||||
fun=None,
|
||||
func=None,
|
||||
cloudmap=None,
|
||||
instances=None,
|
||||
provider=None,
|
||||
|
@ -136,7 +136,7 @@ def action(
|
|||
Execute a single action on the given map/provider/instance
|
||||
'''
|
||||
client = _get_client()
|
||||
info = client.action(fun, cloudmap, instances, provider, instance, kwargs)
|
||||
info = client.action(func, cloudmap, instances, provider, instance, kwargs)
|
||||
return info
|
||||
|
||||
|
||||
|
|
|
@ -50,6 +50,15 @@ def _valid(name, comment='', changes=None):
|
|||
'comment': comment}
|
||||
|
||||
|
||||
def _get_instance(names):
|
||||
# for some reason loader overwrites __opts__['test'] with default
|
||||
# value of False, thus store and then load it again after action
|
||||
test = __opts__.get('test', False)
|
||||
instance = __salt__['cloud.action'](fun='show_instance', names=names)
|
||||
__opts__['test'] = test
|
||||
return instance
|
||||
|
||||
|
||||
def present(name, cloud_provider, onlyif=None, unless=None, **kwargs):
|
||||
'''
|
||||
Spin up a single instance on a cloud provider, using salt-cloud. This state
|
||||
|
@ -245,7 +254,7 @@ def profile(name, profile, onlyif=None, unless=None, **kwargs):
|
|||
elif isinstance(unless, string_types):
|
||||
if retcode(unless) == 0:
|
||||
return _valid(name, comment='unless execution succeeded')
|
||||
instance = __salt__['cloud.action'](fun='show_instance', names=[name])
|
||||
instance = _get_instance([name])
|
||||
prov = str(next(instance.iterkeys()))
|
||||
if instance and 'Not Actioned' not in prov:
|
||||
ret['result'] = True
|
||||
|
|
|
@ -229,11 +229,13 @@ def _reinterpreted_state(state):
|
|||
key, val = item.split('=')
|
||||
data[key] = val
|
||||
except ValueError:
|
||||
return _failout(
|
||||
state = _failout(
|
||||
state,
|
||||
'Failed parsing script output! '
|
||||
'Stdout must be JSON or a line of name=value pairs.'
|
||||
)
|
||||
state['changes'].update(ret)
|
||||
return state
|
||||
|
||||
changed = _is_true(data.get('changed', 'no'))
|
||||
|
||||
|
|
|
@ -1764,6 +1764,14 @@ def directory(name,
|
|||
if not os.path.isdir(os.path.dirname(name)):
|
||||
# The parent directory does not exist, create them
|
||||
if makedirs:
|
||||
# Make sure the drive is mapped before trying to create the
|
||||
# path in windows
|
||||
if salt.utils.is_windows():
|
||||
drive, path = os.path.splitdrive(name)
|
||||
if not os.path.isdir(drive):
|
||||
return _error(
|
||||
ret, 'Drive {0} is not mapped'.format(drive))
|
||||
# Everything's good, create the path
|
||||
__salt__['file.makedirs'](
|
||||
name, user=user, group=group, mode=dir_mode
|
||||
)
|
||||
|
|
|
@ -103,10 +103,14 @@ def list_present(name, value):
|
|||
ret['result'] = False
|
||||
ret['comment'] = 'Grain {0} is not a valid list'.format(name)
|
||||
return ret
|
||||
|
||||
if value in grain:
|
||||
ret['comment'] = 'Value {1} is already in grain {0}'.format(name, value)
|
||||
return ret
|
||||
if isinstance(value, list):
|
||||
if set(value).issubset(set(__grains__.get(name))):
|
||||
ret['comment'] = 'Value {1} is already in grain {0}'.format(name, value)
|
||||
return ret
|
||||
else:
|
||||
if value in grain:
|
||||
ret['comment'] = 'Value {1} is already in grain {0}'.format(name, value)
|
||||
return ret
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Value {1} is set to be appended to grain {0}'.format(name, value)
|
||||
|
@ -118,12 +122,17 @@ def list_present(name, value):
|
|||
ret['comment'] = 'Grain {0} is set to be added'.format(name)
|
||||
ret['changes'] = {'new': grain}
|
||||
return ret
|
||||
|
||||
new_grains = __salt__['grains.append'](name, value)
|
||||
if value not in __grains__.get(name):
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Failed append value {1} to grain {0}'.format(name, value)
|
||||
return ret
|
||||
if isinstance(value, list):
|
||||
if not set(value).issubset(set(__grains__.get(name))):
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Failed append value {1} to grain {0}'.format(name, value)
|
||||
return ret
|
||||
else:
|
||||
if value not in __grains__.get(name):
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Failed append value {1} to grain {0}'.format(name, value)
|
||||
return ret
|
||||
ret['comment'] = 'Append value {1} to grain {0}'.format(name, value)
|
||||
ret['changes'] = {'new': new_grains}
|
||||
return ret
|
||||
|
|
|
@ -1165,6 +1165,10 @@ def latest(
|
|||
skip_verify
|
||||
Skip the GPG verification check for the package to be installed
|
||||
|
||||
refresh
|
||||
Update the repo database of available packages prior to installing the
|
||||
requested package.
|
||||
|
||||
|
||||
Multiple Package Installation Options:
|
||||
|
||||
|
|
|
@ -567,6 +567,14 @@ def present(name,
|
|||
' {1}'.format(name, expire)
|
||||
ret['result'] = False
|
||||
ret['changes']['expire'] = expire
|
||||
elif salt.utils.is_windows():
|
||||
if password and not empty_password:
|
||||
if not __salt__['user.setpassword'](name, password):
|
||||
ret['comment'] = 'User {0} created but failed to set' \
|
||||
' password to' \
|
||||
' {1}'.format(name, password)
|
||||
ret['result'] = False
|
||||
ret['changes']['passwd'] = password
|
||||
else:
|
||||
ret['comment'] = 'Failed to create new user {0}'.format(name)
|
||||
ret['result'] = False
|
||||
|
|
|
@ -1015,7 +1015,7 @@ def fopen(*args, **kwargs):
|
|||
NB! We still have small race condition between open and fcntl.
|
||||
|
||||
'''
|
||||
# Remove lock, uid, gid and mode from kwargs if present
|
||||
# Remove lock from kwargs if present
|
||||
lock = kwargs.pop('lock', False)
|
||||
|
||||
if lock is True:
|
||||
|
@ -1028,6 +1028,19 @@ def fopen(*args, **kwargs):
|
|||
)
|
||||
return flopen(*args, **kwargs)
|
||||
|
||||
# ensure 'binary' mode is always used on windows
|
||||
if is_windows():
|
||||
if len(args) > 1:
|
||||
args = list(args)
|
||||
if 'b' not in args[1]:
|
||||
args[1] += 'b'
|
||||
elif kwargs.get('mode', None):
|
||||
if 'b' not in kwargs['mode']:
|
||||
kwargs['mode'] += 'b'
|
||||
else:
|
||||
# the default is to read
|
||||
kwargs['mode'] = 'rb'
|
||||
|
||||
fhandle = open(*args, **kwargs)
|
||||
if is_fcntl_available():
|
||||
# modify the file descriptor on systems with fcntl
|
||||
|
|
|
@ -324,6 +324,11 @@ def bootstrap(vm_, opts):
|
|||
'ssh_username', vm_, opts, default='root'
|
||||
)
|
||||
|
||||
if 'file_transport' not in opts:
|
||||
opts['file_transport'] = vm_.get('file_transport', 'sftp')
|
||||
|
||||
# NOTE: deploy_kwargs is also used to pass inline_script variable content
|
||||
# to run_inline_script function
|
||||
deploy_kwargs = {
|
||||
'opts': opts,
|
||||
'host': vm_['ssh_host'],
|
||||
|
@ -1084,25 +1089,32 @@ def deploy_script(host,
|
|||
|
||||
if remote_dir not in remote_dirs:
|
||||
root_cmd('mkdir -p \'{0}\''.format(remote_dir), tty, sudo, **ssh_kwargs)
|
||||
if ssh_kwargs['username'] != 'root':
|
||||
root_cmd(
|
||||
'chown {0} \'{1}\''.format(
|
||||
ssh_kwargs['username'], remote_dir
|
||||
),
|
||||
tty, sudo, **ssh_kwargs
|
||||
)
|
||||
remote_dirs.append(remote_dir)
|
||||
sftp_file(
|
||||
remote_file, kwargs=ssh_kwargs, local_file=local_file
|
||||
ssh_file(
|
||||
opts, remote_file, kwargs=ssh_kwargs, local_file=local_file
|
||||
)
|
||||
file_map_success.append({local_file: remote_file})
|
||||
|
||||
# Minion configuration
|
||||
if minion_pem:
|
||||
sftp_file('{0}/minion.pem'.format(tmp_dir), minion_pem, ssh_kwargs)
|
||||
ssh_file(opts, '{0}/minion.pem'.format(tmp_dir), minion_pem, ssh_kwargs)
|
||||
ret = root_cmd('chmod 600 \'{0}/minion.pem\''.format(tmp_dir),
|
||||
tty, sudo, **ssh_kwargs)
|
||||
if ret:
|
||||
raise SaltCloudSystemExit(
|
||||
'Cant set perms on {0}/minion.pem'.format(tmp_dir))
|
||||
if minion_pub:
|
||||
sftp_file('{0}/minion.pub'.format(tmp_dir), minion_pub, ssh_kwargs)
|
||||
ssh_file(opts, '{0}/minion.pub'.format(tmp_dir), minion_pub, ssh_kwargs)
|
||||
|
||||
if master_sign_pub_file:
|
||||
sftp_file('{0}/master_sign.pub'.format(tmp_dir), kwargs=ssh_kwargs, local_file=master_sign_pub_file)
|
||||
ssh_file(opts, '{0}/master_sign.pub'.format(tmp_dir), kwargs=ssh_kwargs, local_file=master_sign_pub_file)
|
||||
|
||||
if minion_conf:
|
||||
if not isinstance(minion_conf, dict):
|
||||
|
@ -1115,12 +1127,14 @@ def deploy_script(host,
|
|||
)
|
||||
minion_grains = minion_conf.pop('grains', {})
|
||||
if minion_grains:
|
||||
sftp_file(
|
||||
ssh_file(
|
||||
opts,
|
||||
'{0}/grains'.format(tmp_dir),
|
||||
salt_config_to_yaml(minion_grains),
|
||||
ssh_kwargs
|
||||
)
|
||||
sftp_file(
|
||||
ssh_file(
|
||||
opts,
|
||||
'{0}/minion'.format(tmp_dir),
|
||||
salt_config_to_yaml(minion_conf),
|
||||
ssh_kwargs
|
||||
|
@ -1128,7 +1142,7 @@ def deploy_script(host,
|
|||
|
||||
# Master configuration
|
||||
if master_pem:
|
||||
sftp_file('{0}/master.pem'.format(tmp_dir), master_pem, ssh_kwargs)
|
||||
ssh_file(opts, '{0}/master.pem'.format(tmp_dir), master_pem, ssh_kwargs)
|
||||
ret = root_cmd('chmod 600 \'{0}/master.pem\''.format(tmp_dir),
|
||||
tty, sudo, **ssh_kwargs)
|
||||
if ret:
|
||||
|
@ -1136,7 +1150,7 @@ def deploy_script(host,
|
|||
'Cant set perms on {0}/master.pem'.format(tmp_dir))
|
||||
|
||||
if master_pub:
|
||||
sftp_file('{0}/master.pub'.format(tmp_dir), master_pub, ssh_kwargs)
|
||||
ssh_file(opts, '{0}/master.pub'.format(tmp_dir), master_pub, ssh_kwargs)
|
||||
|
||||
if master_conf:
|
||||
if not isinstance(master_conf, dict):
|
||||
|
@ -1148,7 +1162,8 @@ def deploy_script(host,
|
|||
'Loading from YAML ...'
|
||||
)
|
||||
|
||||
sftp_file(
|
||||
ssh_file(
|
||||
opts,
|
||||
'{0}/master'.format(tmp_dir),
|
||||
salt_config_to_yaml(master_conf),
|
||||
ssh_kwargs
|
||||
|
@ -1187,7 +1202,7 @@ def deploy_script(host,
|
|||
rpath = os.path.join(
|
||||
preseed_minion_keys_tempdir, minion_id
|
||||
)
|
||||
sftp_file(rpath, minion_key, ssh_kwargs)
|
||||
ssh_file(opts, rpath, minion_key, ssh_kwargs)
|
||||
|
||||
if ssh_kwargs['username'] != 'root':
|
||||
root_cmd(
|
||||
|
@ -1205,7 +1220,7 @@ def deploy_script(host,
|
|||
if script:
|
||||
# got strange escaping issues with sudoer, going onto a
|
||||
# subshell fixes that
|
||||
sftp_file('{0}/deploy.sh'.format(tmp_dir), script, ssh_kwargs)
|
||||
ssh_file(opts, '{0}/deploy.sh'.format(tmp_dir), script, ssh_kwargs)
|
||||
ret = root_cmd(
|
||||
('sh -c "( chmod +x \'{0}/deploy.sh\' )";'
|
||||
'exit $?').format(tmp_dir),
|
||||
|
@ -1267,7 +1282,8 @@ def deploy_script(host,
|
|||
environ_script_contents.append(deploy_command)
|
||||
|
||||
# Upload our environ setter wrapper
|
||||
sftp_file(
|
||||
ssh_file(
|
||||
opts,
|
||||
'{0}/environ-deploy-wrapper.sh'.format(tmp_dir),
|
||||
'\n'.join(environ_script_contents),
|
||||
ssh_kwargs
|
||||
|
@ -1470,13 +1486,14 @@ def _exec_ssh_cmd(cmd, error_msg=None, allow_failure=False, **kwargs):
|
|||
return 1
|
||||
|
||||
|
||||
def scp_file(dest_path, contents, kwargs):
|
||||
def scp_file(dest_path, contents=None, kwargs=None, local_file=None):
|
||||
'''
|
||||
Use scp or sftp to copy a file to a server
|
||||
'''
|
||||
tmpfh, tmppath = tempfile.mkstemp()
|
||||
with salt.utils.fopen(tmppath, 'w') as tmpfile:
|
||||
tmpfile.write(contents)
|
||||
if contents is not None:
|
||||
tmpfh, tmppath = tempfile.mkstemp()
|
||||
with salt.utils.fopen(tmppath, 'w') as tmpfile:
|
||||
tmpfile.write(contents)
|
||||
|
||||
log.debug('Uploading {0} to {1}'.format(dest_path, kwargs['hostname']))
|
||||
|
||||
|
@ -1488,6 +1505,12 @@ def scp_file(dest_path, contents, kwargs):
|
|||
# Don't re-use the SSH connection. Less failures.
|
||||
'-oControlPath=none'
|
||||
]
|
||||
|
||||
if local_file is not None:
|
||||
tmppath = local_file
|
||||
if os.path.isdir(local_file):
|
||||
ssh_args.append('-r')
|
||||
|
||||
if 'key_filename' in kwargs:
|
||||
# There should never be both a password and an ssh key passed in, so
|
||||
ssh_args.extend([
|
||||
|
@ -1534,20 +1557,15 @@ def scp_file(dest_path, contents, kwargs):
|
|||
ssh_gateway_port
|
||||
)
|
||||
)
|
||||
if kwargs.get('use_sftp', False) is True:
|
||||
cmd = 'sftp {0} {2[username]}@{2[hostname]} <<< "put {1} {3}"'.format(
|
||||
cmd = (
|
||||
'scp {0} {1} {2[username]}@{2[hostname]}:{3} || '
|
||||
'echo "put {1} {3}" | sftp {0} {2[username]}@{2[hostname]} || '
|
||||
'rsync -avz -e "ssh {0}" {1} {2[username]}@{2[hostname]}:{3}'.format(
|
||||
' '.join(ssh_args), tmppath, kwargs, dest_path
|
||||
)
|
||||
log.debug('SFTP command: {0!r}'.format(cmd))
|
||||
else:
|
||||
cmd = (
|
||||
'scp {0} {1} {2[username]}@{2[hostname]}:{3} || '
|
||||
'echo "put {1} {3}" | sftp {0} {2[username]}@{2[hostname]} || '
|
||||
'rsync -avz -e "ssh {0}" {1} {2[username]}@{2[hostname]}:{3}'.format(
|
||||
' '.join(ssh_args), tmppath, kwargs, dest_path
|
||||
)
|
||||
)
|
||||
log.debug('SCP command: {0!r}'.format(cmd))
|
||||
)
|
||||
|
||||
log.debug('SCP command: {0!r}'.format(cmd))
|
||||
retcode = _exec_ssh_cmd(cmd,
|
||||
error_msg='Failed to upload file {0!r}: {1}\n{2}',
|
||||
password_retries=3,
|
||||
|
@ -1555,6 +1573,16 @@ def scp_file(dest_path, contents, kwargs):
|
|||
return retcode
|
||||
|
||||
|
||||
def ssh_file(opts, dest_path, contents=None, kwargs=None, local_file=None):
|
||||
'''
|
||||
Copies a file to the remote SSH target using either sftp or scp, as
|
||||
configured.
|
||||
'''
|
||||
if opts.get('file_transport', 'sftp') == 'sftp':
|
||||
return sftp_file(dest_path, contents, kwargs, local_file)
|
||||
return scp_file(dest_path, contents, kwargs, local_file)
|
||||
|
||||
|
||||
def sftp_file(dest_path, contents=None, kwargs=None, local_file=None):
|
||||
'''
|
||||
Use sftp to upload a file to a server
|
||||
|
|
|
@ -443,14 +443,17 @@ def get_ca_bundle(opts=None):
|
|||
return opts_bundle
|
||||
|
||||
file_roots = opts.get('file_roots', {'base': [syspaths.SRV_ROOT_DIR]})
|
||||
salt_root = file_roots['base'][0]
|
||||
log.debug('file_roots is {0}'.format(salt_root))
|
||||
|
||||
# Please do not change the order without good reason
|
||||
for path in (
|
||||
# Check Salt first
|
||||
os.path.join(salt_root, 'cacert.pem'),
|
||||
os.path.join(salt_root, 'ca-bundle.crt'),
|
||||
|
||||
# Check Salt first
|
||||
for salt_root in file_roots.get('base', []):
|
||||
log.debug('file_roots is {0}'.format(salt_root))
|
||||
for path in ('cacert.pem', 'ca-bundle.crt'):
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
|
||||
locations = (
|
||||
# Debian has paths that often exist on other distros
|
||||
'/etc/ssl/certs/ca-certificates.crt',
|
||||
# RedHat is also very common
|
||||
|
@ -460,7 +463,8 @@ def get_ca_bundle(opts=None):
|
|||
'/etc/ssl/certs/ca-bundle.crt',
|
||||
# Suse has an unusual path
|
||||
'/var/lib/ca-certificates/ca-bundle.pem',
|
||||
):
|
||||
)
|
||||
for path in locations:
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
|
||||
|
|
100
salt/utils/psutil_compat.py
Normal file
100
salt/utils/psutil_compat.py
Normal file
|
@ -0,0 +1,100 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Version agnostic psutil hack to fully support both old (<2.0) and new (>=2.0)
|
||||
psutil versions.
|
||||
|
||||
The old <1.0 psutil API is dropped in psutil 3.0
|
||||
|
||||
Should be removed once support for psutil <2.0 is dropped. (eg RHEL 6)
|
||||
|
||||
Built off of http://grodola.blogspot.com/2014/01/psutil-20-porting.html
|
||||
'''
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
# No exception handling, as we want ImportError if psutil doesn't exist
|
||||
import psutil
|
||||
|
||||
if psutil.version_info >= (2, 0):
|
||||
from psutil import * # pylint: disable=wildcard-import
|
||||
else:
|
||||
# Import hack to work around bugs in old psutil's
|
||||
# Psuedo "from psutil import *"
|
||||
_globals = globals()
|
||||
for attr in psutil.__all__:
|
||||
_temp = __import__('psutil', globals(), locals(), [attr], -1)
|
||||
try:
|
||||
_globals[attr] = getattr(_temp, attr)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# Import functions not in __all__
|
||||
from psutil import disk_partitions # pylint: disable=unused-import
|
||||
from psutil import disk_usage # pylint: disable=unused-import
|
||||
|
||||
# Alias new module functions
|
||||
def boot_time():
|
||||
return psutil.BOOT_TIME
|
||||
|
||||
def cpu_count():
|
||||
return psutil.NUM_CPUS
|
||||
|
||||
# Alias renamed module functions
|
||||
pids = psutil.get_pid_list
|
||||
users = psutil.get_users
|
||||
|
||||
# Deprecated in 1.0.1, but not mentioned in blog post
|
||||
if psutil.version_info < (1, 0, 1):
|
||||
net_io_counters = psutil.network_io_counters()
|
||||
|
||||
class Process(psutil.Process): # pylint: disable=no-init
|
||||
# Reimplement overloaded getters/setters
|
||||
def cpu_affinity(self, *args, **kwargs):
|
||||
if args or kwargs:
|
||||
return self.set_cpu_affinity(*args, **kwargs)
|
||||
else:
|
||||
return self.get_cpu_affinity()
|
||||
|
||||
def ionice(self, *args, **kwargs):
|
||||
if args or kwargs:
|
||||
return self.set_ionice(*args, **kwargs)
|
||||
else:
|
||||
return self.get_ionice()
|
||||
|
||||
def nice(self, *args, **kwargs):
|
||||
if args or kwargs:
|
||||
return self.set_nice(*args, **kwargs)
|
||||
else:
|
||||
return self.get_nice()
|
||||
|
||||
def rlimit(self, *args, **kwargs):
|
||||
if args or kwargs:
|
||||
return self.set_rlimit(*args, **kwargs)
|
||||
else:
|
||||
return self.get_rlimit()
|
||||
|
||||
# Alias renamed Process functions
|
||||
_PROCESS_FUNCTION_MAP = {
|
||||
"children": "get_children",
|
||||
"connections": "get_connections",
|
||||
"cpu_percent": "get_cpu_percent",
|
||||
"cpu_times": "get_cpu_times",
|
||||
"io_counters": "get_io_counters",
|
||||
"memory_info": "get_memory_info",
|
||||
"memory_info_ex": "get_ext_memory_info",
|
||||
"memory_maps": "get_memory_maps",
|
||||
"memory_percent": "get_memory_percent",
|
||||
"num_ctx_switches": "get_num_ctx_switches",
|
||||
"num_fds": "get_num_fds",
|
||||
"num_threads": "get_num_threads",
|
||||
"open_files": "get_open_files",
|
||||
"threads": "get_threads",
|
||||
"cwd": "getcwd",
|
||||
|
||||
}
|
||||
|
||||
for new, old in _PROCESS_FUNCTION_MAP.iteritems():
|
||||
try:
|
||||
setattr(Process, new, psutil.Process.__dict__[old])
|
||||
except KeyError:
|
||||
pass
|
|
@ -10,6 +10,11 @@ import socket
|
|||
|
||||
# Import salt libs
|
||||
from salt.ext.six import string_types
|
||||
import salt.utils
|
||||
|
||||
# Import third party libs
|
||||
if salt.utils.is_windows():
|
||||
from salt.ext import win_inet_pton # pylint: disable=unused-import
|
||||
|
||||
|
||||
def mac(addr):
|
||||
|
|
|
@ -54,10 +54,10 @@ class PipModuleTest(integration.ModuleCase):
|
|||
# Let's run a pip depending functions
|
||||
for func in ('pip.freeze', 'pip.list'):
|
||||
ret = self.run_function(func, bin_env=self.venv_dir)
|
||||
self.assertEqual(
|
||||
ret,
|
||||
'Command required for \'{0}\' not found: Could not find '
|
||||
'a `pip` binary'.format(func)
|
||||
self.assertIn(
|
||||
'Command required for \'{0}\' not found: '
|
||||
'Could not find a `pip` binary in virtualenv'.format(func),
|
||||
ret
|
||||
)
|
||||
|
||||
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
|
||||
|
|
1
tests/unit/cloud/__init__.py
Normal file
1
tests/unit/cloud/__init__.py
Normal file
|
@ -0,0 +1 @@
|
|||
# -*- coding: utf-8 -*-
|
1
tests/unit/cloud/clouds/__init__.py
Normal file
1
tests/unit/cloud/clouds/__init__.py
Normal file
|
@ -0,0 +1 @@
|
|||
# -*- coding: utf-8 -*-
|
39
tests/unit/cloud/clouds/saltify_test.py
Normal file
39
tests/unit/cloud/clouds/saltify_test.py
Normal file
|
@ -0,0 +1,39 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Alexander Schwartz <alexander.schwartz@gmx.net>`
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import TestCase
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.cloud.clouds import saltify
|
||||
|
||||
# Globals
|
||||
saltify.__opts__ = {}
|
||||
saltify.__opts__['providers'] = {}
|
||||
|
||||
|
||||
class SaltifyTestCase(TestCase):
|
||||
'''
|
||||
Test cases for salt.cloud.clouds.saltify
|
||||
'''
|
||||
# 'create' function tests: 1
|
||||
|
||||
def test_create_no_deploy(self):
|
||||
'''
|
||||
Test if deployment fails. This is the most basic test as saltify doesn't contain much logic
|
||||
'''
|
||||
vm = {'deploy': False,
|
||||
'provider': 'saltify',
|
||||
'name': 'dummy'
|
||||
}
|
||||
self.assertTrue(saltify.create(vm)['Error']['No Deploy'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(SaltifyTestCase, needs_daemon=False)
|
|
@ -146,7 +146,8 @@ class LocalemodTestCase(TestCase):
|
|||
'''
|
||||
ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337}
|
||||
with patch.dict(localemod.__salt__,
|
||||
{'cmd.run_all': MagicMock(return_value=ret)}):
|
||||
{'cmd.run_all': MagicMock(return_value=ret),
|
||||
'file.replace': MagicMock()}):
|
||||
self.assertTrue(localemod.gen_locale('en_US.UTF-8'))
|
||||
|
||||
@patch('salt.utils.which', MagicMock(return_value='/some/dir/path'))
|
||||
|
@ -157,7 +158,8 @@ class LocalemodTestCase(TestCase):
|
|||
'''
|
||||
ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337}
|
||||
with patch.dict(localemod.__salt__,
|
||||
{'cmd.run_all': MagicMock(return_value=ret)}):
|
||||
{'cmd.run_all': MagicMock(return_value=ret),
|
||||
'file.replace': MagicMock()}):
|
||||
self.assertEqual(localemod.gen_locale('en_US.UTF-8', verbose=True), ret)
|
||||
|
||||
|
||||
|
|
|
@ -944,38 +944,38 @@ class PipTestCase(TestCase):
|
|||
'bbfreeze-loader==1.1.0',
|
||||
'pycrypto==2.6'
|
||||
]
|
||||
mock = MagicMock(
|
||||
side_effect=[
|
||||
{'retcode': 0, 'stdout': 'pip MOCKED_VERSION'},
|
||||
{'retcode': 0, 'stdout': '\n'.join(eggs)}
|
||||
]
|
||||
)
|
||||
mock_version = '6.1.1'
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': '\n'.join(eggs)})
|
||||
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
|
||||
ret = pip.list_()
|
||||
mock.assert_called_with(
|
||||
'pip freeze',
|
||||
runas=None,
|
||||
cwd=None,
|
||||
python_shell=False,
|
||||
)
|
||||
self.assertEqual(
|
||||
ret, {
|
||||
'SaltTesting-dev': 'git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8',
|
||||
'M2Crypto': '0.21.1',
|
||||
'bbfreeze-loader': '1.1.0',
|
||||
'bbfreeze': '1.1.0',
|
||||
'pip': 'MOCKED_VERSION',
|
||||
'pycrypto': '2.6'
|
||||
}
|
||||
)
|
||||
with patch('salt.modules.pip.version',
|
||||
MagicMock(return_value=mock_version)):
|
||||
ret = pip.list_()
|
||||
mock.assert_called_with(
|
||||
'pip freeze',
|
||||
runas=None,
|
||||
cwd=None,
|
||||
python_shell=False,
|
||||
)
|
||||
self.assertEqual(
|
||||
ret, {
|
||||
'SaltTesting-dev': 'git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8',
|
||||
'M2Crypto': '0.21.1',
|
||||
'bbfreeze-loader': '1.1.0',
|
||||
'bbfreeze': '1.1.0',
|
||||
'pip': mock_version,
|
||||
'pycrypto': '2.6'
|
||||
}
|
||||
)
|
||||
|
||||
# Non zero returncode raises exception?
|
||||
mock = MagicMock(return_value={'retcode': 1, 'stderr': 'CABOOOOMMM!'})
|
||||
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
|
||||
self.assertRaises(
|
||||
CommandExecutionError,
|
||||
pip.list_,
|
||||
)
|
||||
with patch('salt.modules.pip.version',
|
||||
MagicMock(return_value='6.1.1')):
|
||||
self.assertRaises(
|
||||
CommandExecutionError,
|
||||
pip.list_,
|
||||
)
|
||||
|
||||
def test_list_command_with_prefix(self):
|
||||
eggs = [
|
||||
|
@ -1014,34 +1014,35 @@ class PipTestCase(TestCase):
|
|||
{'retcode': 0, 'stdout': ''}
|
||||
])
|
||||
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
|
||||
pip.install(
|
||||
'pep8', pre_releases=True
|
||||
)
|
||||
mock.assert_called_with(
|
||||
'pip install \'pep8\'',
|
||||
saltenv='base',
|
||||
runas=None,
|
||||
cwd=None,
|
||||
use_vt=False,
|
||||
python_shell=False,
|
||||
)
|
||||
with patch('salt.modules.pip.version',
|
||||
MagicMock(return_value='1.3')):
|
||||
pip.install(
|
||||
'pep8', pre_releases=True
|
||||
)
|
||||
mock.assert_called_with(
|
||||
'pip install \'pep8\'',
|
||||
saltenv='base',
|
||||
runas=None,
|
||||
cwd=None,
|
||||
use_vt=False,
|
||||
python_shell=False,
|
||||
)
|
||||
|
||||
mock = MagicMock(side_effect=[
|
||||
{'retcode': 0, 'stdout': 'pip 1.4.0 /path/to/site-packages/pip'},
|
||||
{'retcode': 0, 'stdout': ''}
|
||||
])
|
||||
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
|
||||
pip.install(
|
||||
'pep8', pre_releases=True
|
||||
)
|
||||
mock.assert_called_with(
|
||||
'pip install --pre \'pep8\'',
|
||||
saltenv='base',
|
||||
runas=None,
|
||||
cwd=None,
|
||||
use_vt=False,
|
||||
python_shell=False,
|
||||
)
|
||||
mock_run = MagicMock(return_value='pip 1.4.1 /path/to/site-packages/pip')
|
||||
mock_run_all = MagicMock(return_value={'retcode': 0, 'stdout': ''})
|
||||
with patch.dict(pip.__salt__, {'cmd.run': mock_run,
|
||||
'cmd.run_all': mock_run_all}):
|
||||
with patch('salt.modules.pip._get_pip_bin',
|
||||
MagicMock(return_value='pip')):
|
||||
pip.install('pep8', pre_releases=True)
|
||||
mock_run_all.assert_called_with(
|
||||
'pip install --pre \'pep8\'',
|
||||
saltenv='base',
|
||||
runas=None,
|
||||
cwd=None,
|
||||
use_vt=False,
|
||||
python_shell=False,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -16,7 +16,7 @@ HAS_PSUTIL = ps.__virtual__()
|
|||
HAS_PSUTIL_VERSION = False
|
||||
|
||||
if HAS_PSUTIL:
|
||||
import psutil
|
||||
import salt.utils.psutil_compat as psutil
|
||||
from collections import namedtuple
|
||||
|
||||
PSUTIL2 = psutil.version_info >= (2, 0)
|
||||
|
@ -51,7 +51,7 @@ else:
|
|||
STUB_USER) = [None for val in range(9)]
|
||||
|
||||
STUB_PID_LIST = [0, 1, 2, 3]
|
||||
MOCK_PROC = mocked_proc = MagicMock('psutil.Process')
|
||||
MOCK_PROC = mocked_proc = MagicMock('salt.utils.psutil_compat.Process')
|
||||
|
||||
try:
|
||||
import utmp # pylint: disable=W0611
|
||||
|
@ -79,59 +79,59 @@ class PsTestCase(TestCase):
|
|||
MOCK_PROC.name = 'test_mock_proc'
|
||||
MOCK_PROC.pid = 9999999999
|
||||
|
||||
@patch('psutil.get_pid_list', new=MagicMock(return_value=STUB_PID_LIST))
|
||||
@patch('salt.utils.psutil_compat.pids', new=MagicMock(return_value=STUB_PID_LIST))
|
||||
def test_get_pid_list(self):
|
||||
self.assertListEqual(STUB_PID_LIST, ps.get_pid_list())
|
||||
|
||||
@patch('psutil.Process')
|
||||
@patch('salt.utils.psutil_compat.Process')
|
||||
def test_kill_pid(self, send_signal_mock):
|
||||
ps.kill_pid(0, signal=999)
|
||||
self.assertEqual(send_signal_mock.call_args, call(0))
|
||||
|
||||
@patch('psutil.Process.send_signal')
|
||||
@patch('psutil.process_iter', new=MagicMock(return_value=[MOCK_PROC]))
|
||||
@patch('salt.utils.psutil_compat.Process.send_signal')
|
||||
@patch('salt.utils.psutil_compat.process_iter', new=MagicMock(return_value=[MOCK_PROC]))
|
||||
def test_pkill(self, send_signal_mock):
|
||||
mocked_proc.send_signal = MagicMock()
|
||||
test_signal = 1234
|
||||
ps.pkill(_get_proc_name(mocked_proc), signal=test_signal)
|
||||
self.assertEqual(mocked_proc.send_signal.call_args, call(test_signal))
|
||||
|
||||
@patch('psutil.process_iter', new=MagicMock(return_value=[MOCK_PROC]))
|
||||
@patch('salt.utils.psutil_compat.process_iter', new=MagicMock(return_value=[MOCK_PROC]))
|
||||
def test_pgrep(self):
|
||||
self.assertIn(_get_proc_pid(MOCK_PROC), ps.pgrep(_get_proc_name(MOCK_PROC)))
|
||||
|
||||
@patch('psutil.cpu_percent', new=MagicMock(return_value=1))
|
||||
@patch('salt.utils.psutil_compat.cpu_percent', new=MagicMock(return_value=1))
|
||||
def test_cpu_percent(self):
|
||||
self.assertEqual(ps.cpu_percent(), 1)
|
||||
|
||||
@patch('psutil.cpu_times', new=MagicMock(return_value=STUB_CPU_TIMES))
|
||||
@patch('salt.utils.psutil_compat.cpu_times', new=MagicMock(return_value=STUB_CPU_TIMES))
|
||||
def test_cpu_times(self):
|
||||
self.assertDictEqual({'idle': 4, 'nice': 2, 'system': 3, 'user': 1}, ps.cpu_times())
|
||||
|
||||
@skipIf(HAS_PSUTIL_VERSION is False, 'psutil 0.6.0 or greater is required for this test')
|
||||
@patch('psutil.virtual_memory', new=MagicMock(return_value=STUB_VIRT_MEM))
|
||||
@patch('salt.utils.psutil_compat.virtual_memory', new=MagicMock(return_value=STUB_VIRT_MEM))
|
||||
def test_virtual_memory(self):
|
||||
self.assertDictEqual({'used': 500, 'total': 1000, 'available': 500, 'percent': 50, 'free': 500},
|
||||
ps.virtual_memory())
|
||||
|
||||
@skipIf(HAS_PSUTIL_VERSION is False, 'psutil 0.6.0 or greater is required for this test')
|
||||
@patch('psutil.swap_memory', new=MagicMock(return_value=STUB_SWAP_MEM))
|
||||
@patch('salt.utils.psutil_compat.swap_memory', new=MagicMock(return_value=STUB_SWAP_MEM))
|
||||
def test_swap_memory(self):
|
||||
self.assertDictEqual({'used': 500, 'total': 1000, 'percent': 50, 'free': 500, 'sin': 0, 'sout': 0},
|
||||
ps.swap_memory())
|
||||
|
||||
@patch('psutil.disk_partitions', new=MagicMock(return_value=[STUB_DISK_PARTITION]))
|
||||
@patch('salt.utils.psutil_compat.disk_partitions', new=MagicMock(return_value=[STUB_DISK_PARTITION]))
|
||||
def test_disk_partitions(self):
|
||||
self.assertDictEqual(
|
||||
{'device': '/dev/disk0s2', 'mountpoint': '/', 'opts': 'rw,local,rootfs,dovolfs,journaled,multilabel',
|
||||
'fstype': 'hfs'},
|
||||
ps.disk_partitions()[0])
|
||||
|
||||
@patch('psutil.disk_usage', new=MagicMock(return_value=STUB_DISK_USAGE))
|
||||
@patch('salt.utils.psutil_compat.disk_usage', new=MagicMock(return_value=STUB_DISK_USAGE))
|
||||
def test_disk_usage(self):
|
||||
self.assertDictEqual({'used': 500, 'total': 1000, 'percent': 50, 'free': 500}, ps.disk_usage('DUMMY_PATH'))
|
||||
|
||||
@patch('psutil.disk_partitions', new=MagicMock(return_value=[STUB_DISK_PARTITION]))
|
||||
@patch('salt.utils.psutil_compat.disk_partitions', new=MagicMock(return_value=[STUB_DISK_PARTITION]))
|
||||
def test_disk_partition_usage(self):
|
||||
self.assertDictEqual(
|
||||
{'device': '/dev/disk0s2', 'mountpoint': '/', 'opts': 'rw,local,rootfs,dovolfs,journaled,multilabel',
|
||||
|
@ -149,26 +149,26 @@ class PsTestCase(TestCase):
|
|||
## Should only be tested in integration
|
||||
# def test_boot_time(self):
|
||||
# pass
|
||||
@patch('psutil.network_io_counters', new=MagicMock(return_value=STUB_NETWORK_IO))
|
||||
@patch('salt.utils.psutil_compat.net_io_counters', new=MagicMock(return_value=STUB_NETWORK_IO))
|
||||
def test_network_io_counters(self):
|
||||
self.assertDictEqual(
|
||||
{'packets_sent': 500, 'packets_recv': 600, 'bytes_recv': 2000, 'dropout': 4, 'bytes_sent': 1000,
|
||||
'errout': 2, 'errin': 1, 'dropin': 3}, ps.network_io_counters())
|
||||
|
||||
@patch('psutil.disk_io_counters', new=MagicMock(return_value=STUB_DISK_IO))
|
||||
@patch('salt.utils.psutil_compat.disk_io_counters', new=MagicMock(return_value=STUB_DISK_IO))
|
||||
def test_disk_io_counters(self):
|
||||
self.assertDictEqual(
|
||||
{'read_time': 2000, 'write_bytes': 600, 'read_bytes': 500, 'write_time': 3000, 'read_count': 1000,
|
||||
'write_count': 2000}, ps.disk_io_counters())
|
||||
|
||||
@patch('psutil.get_users', new=MagicMock(return_value=[STUB_USER]))
|
||||
@patch('salt.utils.psutil_compat.users', new=MagicMock(return_value=[STUB_USER]))
|
||||
def test_get_users(self):
|
||||
self.assertDictEqual({'terminal': 'ttys000', 'started': 0.0, 'host': 'localhost', 'name': 'bdobbs'},
|
||||
ps.get_users()[0])
|
||||
|
||||
## This is commented out pending discussion on https://github.com/saltstack/salt/commit/2e5c3162ef87cca8a2c7b12ade7c7e1b32028f0a
|
||||
# @skipIf(not HAS_UTMP, "The utmp module must be installed to run test_get_users_utmp()")
|
||||
# @patch('psutil.get_users', new=MagicMock(return_value=None)) # This will force the function to use utmp
|
||||
# @patch('salt.utils.psutil_compat.get_users', new=MagicMock(return_value=None)) # This will force the function to use utmp
|
||||
# def test_get_users_utmp(self):
|
||||
# pass
|
||||
|
||||
|
|
127
tests/unit/modules/svn_test.py
Normal file
127
tests/unit/modules/svn_test.py
Normal file
|
@ -0,0 +1,127 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import skipIf, TestCase
|
||||
from salttesting.mock import (
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON,
|
||||
MagicMock,
|
||||
patch)
|
||||
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.modules import svn
|
||||
|
||||
svn.__salt__ = {}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class SvnTestCase(TestCase):
|
||||
'''
|
||||
Test cases for salt.modules.svn
|
||||
'''
|
||||
def test_info(self):
|
||||
'''
|
||||
Test to display the Subversion information from the checkout.
|
||||
'''
|
||||
mock = MagicMock(side_effect=[{'retcode': 0, 'stdout': True},
|
||||
{'retcode': 0, 'stdout': 'A\n\nB'},
|
||||
{'retcode': 0, 'stdout': 'A\n\nB'}])
|
||||
with patch.dict(svn.__salt__, {'cmd.run_all': mock}):
|
||||
self.assertTrue(svn.info('cwd', fmt='xml'))
|
||||
|
||||
self.assertListEqual(svn.info('cwd', fmt='list'), [[], []])
|
||||
|
||||
self.assertListEqual(svn.info('cwd', fmt='dict'), [{}, {}])
|
||||
|
||||
def test_checkout(self):
|
||||
'''
|
||||
Test to download a working copy of the remote Subversion repository
|
||||
directory or file
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(svn.__salt__, {'cmd.run_all': mock}):
|
||||
self.assertTrue(svn.checkout('cwd', 'remote'))
|
||||
|
||||
def test_switch(self):
|
||||
'''
|
||||
Test to switch a working copy of a remote Subversion repository
|
||||
directory
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(svn.__salt__, {'cmd.run_all': mock}):
|
||||
self.assertTrue(svn.switch('cwd', 'remote'))
|
||||
|
||||
def test_update(self):
|
||||
'''
|
||||
Test to update the current directory, files, or directories from
|
||||
the remote Subversion repository
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(svn.__salt__, {'cmd.run_all': mock}):
|
||||
self.assertTrue(svn.update('cwd'))
|
||||
|
||||
def test_diff(self):
|
||||
'''
|
||||
Test to return the diff of the current directory, files, or
|
||||
directories from the remote Subversion repository
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(svn.__salt__, {'cmd.run_all': mock}):
|
||||
self.assertTrue(svn.diff('cwd'))
|
||||
|
||||
def test_commit(self):
|
||||
'''
|
||||
Test to commit the current directory, files, or directories to
|
||||
the remote Subversion repository
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(svn.__salt__, {'cmd.run_all': mock}):
|
||||
self.assertTrue(svn.commit('cwd'))
|
||||
|
||||
def test_add(self):
|
||||
'''
|
||||
Test to add files to be tracked by the Subversion working-copy
|
||||
checkout
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(svn.__salt__, {'cmd.run_all': mock}):
|
||||
self.assertTrue(svn.add('cwd', False))
|
||||
|
||||
def test_remove(self):
|
||||
'''
|
||||
Test to remove files and directories from the Subversion repository
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(svn.__salt__, {'cmd.run_all': mock}):
|
||||
self.assertTrue(svn.remove('cwd', False))
|
||||
|
||||
def test_status(self):
|
||||
'''
|
||||
Test to display the status of the current directory, files, or
|
||||
directories in the Subversion repository
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(svn.__salt__, {'cmd.run_all': mock}):
|
||||
self.assertTrue(svn.status('cwd'))
|
||||
|
||||
def test_export(self):
|
||||
'''
|
||||
Test to create an unversioned copy of a tree.
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(svn.__salt__, {'cmd.run_all': mock}):
|
||||
self.assertTrue(svn.export('cwd', 'remote'))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(SvnTestCase, needs_daemon=False)
|
74
tests/unit/states/makeconf_test.py
Normal file
74
tests/unit/states/makeconf_test.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import skipIf, TestCase
|
||||
from salttesting.mock import (
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON,
|
||||
MagicMock,
|
||||
patch)
|
||||
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.states import makeconf
|
||||
|
||||
makeconf.__salt__ = {}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class MakeconfTestCase(TestCase):
|
||||
'''
|
||||
Test cases for salt.states.makeconf
|
||||
'''
|
||||
# 'present' function tests: 1
|
||||
|
||||
def test_present(self):
|
||||
'''
|
||||
Test to verify that the variable is in the ``make.conf``
|
||||
and has the provided settings.
|
||||
'''
|
||||
name = 'makeopts'
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'comment': '',
|
||||
'changes': {}}
|
||||
|
||||
mock_t = MagicMock(return_value=True)
|
||||
with patch.dict(makeconf.__salt__, {'makeconf.get_var': mock_t}):
|
||||
comt = ('Variable {0} is already present in make.conf'.format(name))
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(makeconf.present(name), ret)
|
||||
|
||||
# 'absent' function tests: 1
|
||||
|
||||
def test_absent(self):
|
||||
'''
|
||||
Test to verify that the variable is not in the ``make.conf``.
|
||||
'''
|
||||
name = 'makeopts'
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'comment': '',
|
||||
'changes': {}}
|
||||
|
||||
mock = MagicMock(return_value=None)
|
||||
with patch.dict(makeconf.__salt__, {'makeconf.get_var': mock}):
|
||||
comt = ('Variable {0} is already absent from make.conf'
|
||||
.format(name))
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(makeconf.absent(name), ret)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(MakeconfTestCase, needs_daemon=False)
|
109
tests/unit/states/user_test.py
Normal file
109
tests/unit/states/user_test.py
Normal file
|
@ -0,0 +1,109 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Rahul Handay <rahulha@saltstack.com>`
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import TestCase, skipIf
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
from salttesting.mock import (
|
||||
MagicMock,
|
||||
patch,
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON
|
||||
)
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.states import user
|
||||
|
||||
# Globals
|
||||
user.__salt__ = {}
|
||||
user.__opts__ = {}
|
||||
user.__grains__ = {}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class UserTestCase(TestCase):
|
||||
'''
|
||||
Validate the user state
|
||||
'''
|
||||
def test_present(self):
|
||||
'''
|
||||
Test to ensure that the named user is present with
|
||||
the specified properties
|
||||
'''
|
||||
ret = {'name': 'salt',
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
mock = MagicMock(return_value=False)
|
||||
mock2 = MagicMock(return_value=[])
|
||||
with patch.dict(user.__salt__, {'group.info': mock,
|
||||
'user.info': mock2,
|
||||
"user.chkey": mock2,
|
||||
'user.add': mock}):
|
||||
ret.update({'comment': 'The following group(s) are'
|
||||
' not present: salt'})
|
||||
self.assertDictEqual(user.present('salt', groups=['salt']), ret)
|
||||
|
||||
mock = MagicMock(side_effect=[{'key': 'value'}, {'key': 'value'},
|
||||
{'key': 'value'}, False, False])
|
||||
with patch.object(user, '_changes', mock):
|
||||
with patch.dict(user.__opts__, {"test": True}):
|
||||
ret.update({'comment': 'The following user attributes are'
|
||||
' set to be changed:\nkey: value\n',
|
||||
'result': None})
|
||||
self.assertDictEqual(user.present('salt'), ret)
|
||||
|
||||
with patch.dict(user.__opts__, {"test": False}):
|
||||
with patch.dict(user.__grains__, {"kernel": False}):
|
||||
ret.update({'comment': "These values could not be"
|
||||
" changed: {'key': 'value'}",
|
||||
'result': False})
|
||||
self.assertDictEqual(user.present('salt'), ret)
|
||||
|
||||
with patch.dict(user.__opts__, {"test": True}):
|
||||
ret.update({'comment': 'User salt set to'
|
||||
' be added', 'result': None})
|
||||
self.assertDictEqual(user.present('salt'), ret)
|
||||
|
||||
with patch.dict(user.__opts__, {"test": False}):
|
||||
ret.update({'comment': 'Failed to create new'
|
||||
' user salt', 'result': False})
|
||||
self.assertDictEqual(user.present('salt'), ret)
|
||||
|
||||
def test_absent(self):
|
||||
'''
|
||||
Test to ensure that the named user is absent
|
||||
'''
|
||||
ret = {'name': 'salt',
|
||||
'changes': {},
|
||||
'result': None,
|
||||
'comment': ''}
|
||||
mock = MagicMock(side_effect=[True, True, False])
|
||||
mock1 = MagicMock(return_value=False)
|
||||
with patch.dict(user.__salt__, {'user.info': mock,
|
||||
'user.delete': mock1,
|
||||
'group.info': mock1}):
|
||||
with patch.dict(user.__opts__, {"test": True}):
|
||||
ret.update({'comment': 'User salt set for removal'})
|
||||
self.assertDictEqual(user.absent('salt'), ret)
|
||||
|
||||
with patch.dict(user.__opts__, {"test": False}):
|
||||
ret.update({'comment': 'Failed to remove user salt',
|
||||
'result': False})
|
||||
self.assertDictEqual(user.absent('salt'), ret)
|
||||
|
||||
ret.update({'comment': 'User salt is not present',
|
||||
'result': True})
|
||||
self.assertDictEqual(user.absent('salt'), ret)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(UserTestCase, needs_daemon=False)
|
Loading…
Add table
Reference in a new issue