Merge remote branch 'refs/remotes/upstream/2017.7' into 2017.7_replace_with_newer_2016.11_win_pkg

This commit is contained in:
Damon Atkins 2017-12-12 01:07:59 +11:00
commit 4b60b1ec84
29 changed files with 1159 additions and 379 deletions

View file

@ -10,6 +10,7 @@
driver:
name: docker
use_sudo: false
hostname: salt
privileged: true
username: root
volume:

View file

@ -240,7 +240,7 @@ class SyncClientMixin(object):
def low(self, fun, low, print_event=True, full_return=False):
'''
Check for deprecated usage and allow until Salt Oxygen.
Check for deprecated usage and allow until Salt Fluorine.
'''
msg = []
if 'args' in low:
@ -251,7 +251,7 @@ class SyncClientMixin(object):
low['kwarg'] = low.pop('kwargs')
if msg:
salt.utils.warn_until('Oxygen', ' '.join(msg))
salt.utils.warn_until('Fluorine', ' '.join(msg))
return self._low(fun, low, print_event=print_event, full_return=full_return)

View file

@ -723,6 +723,7 @@ class Single(object):
self.thin_dir = kwargs['thin_dir']
elif self.winrm:
saltwinshell.set_winvars(self)
self.python_env = kwargs.get('ssh_python_env')
else:
if user:
thin_dir = DEFAULT_THIN_DIR.replace('%%USER%%', user)
@ -782,6 +783,10 @@ class Single(object):
self.serial = salt.payload.Serial(opts)
self.wfuncs = salt.loader.ssh_wrapper(opts, None, self.context)
self.shell = salt.client.ssh.shell.gen_shell(opts, **args)
if self.winrm:
# Determine if Windows client is x86 or AMD64
arch, _, _ = self.shell.exec_cmd('powershell $ENV:PROCESSOR_ARCHITECTURE')
self.arch = arch.strip()
self.thin = thin if thin else salt.utils.thin.thin_path(opts['cachedir'])
def __arg_comps(self):

View file

@ -48,6 +48,10 @@ log = logging.getLogger(__name__)
# The name salt will identify the lib by
__virtualname__ = 'virtualbox'
#if no clone mode is specified in the virtualbox profile
#then default to 0 which was the old default value
DEFAULT_CLONE_MODE = 0
def __virtual__():
'''
@ -85,6 +89,30 @@ def get_configured_provider():
return configured
def map_clonemode(vm_info):
"""
Convert the virtualbox config file values for clone_mode into the integers the API requires
"""
mode_map = {
'state': 0,
'child': 1,
'all': 2
}
if not vm_info:
return DEFAULT_CLONE_MODE
if 'clonemode' not in vm_info:
return DEFAULT_CLONE_MODE
if vm_info['clonemode'] in mode_map:
return mode_map[vm_info['clonemode']]
else:
raise SaltCloudSystemExit(
"Illegal clonemode for virtualbox profile. Legal values are: {}".format(','.join(mode_map.keys()))
)
def create(vm_info):
"""
Creates a virtual machine from the given VM information.
@ -102,6 +130,7 @@ def create(vm_info):
profile: <dict>
driver: <provider>:<profile>
clonefrom: <vm_name>
clonemode: <mode> (default: state, choices: state, child, all)
}
@type vm_info dict
@return dict of resulting vm. !!!Passwords can and should be included!!!
@ -133,6 +162,9 @@ def create(vm_info):
key_filename = config.get_cloud_config_value(
'private_key', vm_info, __opts__, search_global=False, default=None
)
clone_mode = map_clonemode(vm_info)
wait_for_pattern = vm_info['waitforpattern'] if 'waitforpattern' in vm_info.keys() else None
interface_index = vm_info['interfaceindex'] if 'interfaceindex' in vm_info.keys() else 0
log.debug("Going to fire event: starting create")
__utils__['cloud.fire_event'](
@ -147,7 +179,8 @@ def create(vm_info):
# to create the virtual machine.
request_kwargs = {
'name': vm_info['name'],
'clone_from': vm_info['clonefrom']
'clone_from': vm_info['clonefrom'],
'clone_mode': clone_mode
}
__utils__['cloud.fire_event'](
@ -163,17 +196,17 @@ def create(vm_info):
# Booting and deploying if needed
if power:
vb_start_vm(vm_name, timeout=boot_timeout)
ips = vb_wait_for_network_address(wait_for_ip_timeout, machine_name=vm_name)
ips = vb_wait_for_network_address(wait_for_ip_timeout, machine_name=vm_name, wait_for_pattern=wait_for_pattern)
if len(ips):
ip = ips[0]
ip = ips[interface_index]
log.info("[ {0} ] IPv4 is: {1}".format(vm_name, ip))
# ssh or smb using ip and install salt only if deploy is True
if deploy:
vm_info['key_filename'] = key_filename
vm_info['ssh_host'] = ip
res = __utils__['cloud.bootstrap'](vm_info)
res = __utils__['cloud.bootstrap'](vm_info, __opts__)
vm_result.update(res)
__utils__['cloud.fire_event'](

View file

@ -862,6 +862,10 @@ class MinionManager(MinionBase):
failed = False
while True:
try:
if minion.opts.get('beacons_before_connect', False):
minion.setup_beacons(before_connect=True)
if minion.opts.get('scheduler_before_connect', False):
minion.setup_scheduler(before_connect=True)
yield minion.connect_master(failed=failed)
minion.tune_in(start=False)
break
@ -936,6 +940,7 @@ class Minion(MinionBase):
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = jid_queue or []
self.periodic_callbacks = {}
if io_loop is None:
if HAS_ZMQ:
@ -967,6 +972,19 @@ class Minion(MinionBase):
# post_master_init
if not salt.utils.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
else:
if self.opts.get('beacons_before_connect', False):
log.warning(
'\'beacons_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['beacons_before_connect'] = False
if self.opts.get('scheduler_before_connect', False):
log.warning(
'\'scheduler_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['scheduler_before_connect'] = False
log.info('Creating minion process manager')
@ -1070,19 +1088,22 @@ class Minion(MinionBase):
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
if not self.ready:
self._setup_core()
elif self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
@ -1136,9 +1157,6 @@ class Minion(MinionBase):
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
self.grains_cache = self.opts['grains']
self.ready = True
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
@ -2180,6 +2198,118 @@ class Minion(MinionBase):
except (ValueError, NameError):
pass
def _setup_core(self):
'''
Set up the core minion attributes.
This is safe to call multiple times.
'''
if not self.ready:
# First call. Initialize.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.grains_cache = self.opts['grains']
self.ready = True
def setup_beacons(self, before_connect=False):
'''
Set up the beacons.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'beacons' not in self.periodic_callbacks:
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
def setup_scheduler(self, before_connect=False):
'''
Set up the scheduler.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'schedule' not in self.periodic_callbacks:
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
# Main Minion Tune In
def tune_in(self, start=True):
'''
@ -2191,6 +2321,10 @@ class Minion(MinionBase):
log.debug('Minion \'{0}\' trying to tune in'.format(self.opts['id']))
if start:
if self.opts.get('beacons_before_connect', False):
self.setup_beacons(before_connect=True)
if self.opts.get('scheduler_before_connect', False):
self.setup_scheduler(before_connect=True)
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
@ -2205,31 +2339,9 @@ class Minion(MinionBase):
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = self.opts['loop_interval']
self.setup_beacons()
self.setup_scheduler()
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
self.periodic_callbacks = {}
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
@ -2247,30 +2359,7 @@ class Minion(MinionBase):
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons, sync=False)
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
if hasattr(self, 'schedule'):
self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(self.periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks['ping'].start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:

View file

@ -147,8 +147,24 @@ def _render_tab(lst):
cron['cmd']
)
)
for spec in lst['special']:
ret.append('{0} {1}\n'.format(spec['spec'], spec['cmd']))
for cron in lst['special']:
if cron['comment'] is not None or cron['identifier'] is not None:
comment = '#'
if cron['comment']:
comment += ' {0}'.format(
cron['comment'].rstrip().replace('\n', '\n# '))
if cron['identifier']:
comment += ' {0}:{1}'.format(SALT_CRON_IDENTIFIER,
cron['identifier'])
comment += '\n'
ret.append(comment)
ret.append('{0}{1} {2}\n'.format(
cron['commented'] is True and '#DISABLED#' or '',
cron['spec'],
cron['cmd']
)
)
return ret
@ -317,7 +333,15 @@ def list_tab(user):
continue
dat['spec'] = comps[0]
dat['cmd'] = ' '.join(comps[1:])
dat['identifier'] = identifier
dat['comment'] = comment
dat['commented'] = False
if commented_cron_job:
dat['commented'] = True
ret['special'].append(dat)
identifier = None
comment = None
commented_cron_job = False
elif line.startswith('#'):
# It's a comment! Catch it!
comment_line = line.lstrip('# ')
@ -363,11 +387,17 @@ def list_tab(user):
ret['pre'].append(line)
return ret
# For consistency's sake
ls = salt.utils.alias_function(list_tab, 'ls')
def set_special(user, special, cmd):
def set_special(user,
special,
cmd,
commented=False,
comment=None,
identifier=None):
'''
Set up a special command in the crontab.
@ -379,11 +409,60 @@ def set_special(user, special, cmd):
'''
lst = list_tab(user)
for cron in lst['special']:
if special == cron['spec'] and cmd == cron['cmd']:
cid = _cron_id(cron)
if _cron_matched(cron, cmd, identifier):
test_setted_id = (
cron['identifier'] is None
and SALT_CRON_NO_IDENTIFIER
or cron['identifier'])
tests = [(cron['comment'], comment),
(cron['commented'], commented),
(identifier, test_setted_id),
(cron['spec'], special)]
if cid or identifier:
tests.append((cron['cmd'], cmd))
if any([_needs_change(x, y) for x, y in tests]):
rm_special(user, cmd, identifier=cid)
# Use old values when setting the new job if there was no
# change needed for a given parameter
if not _needs_change(cron['spec'], special):
special = cron['spec']
if not _needs_change(cron['commented'], commented):
commented = cron['commented']
if not _needs_change(cron['comment'], comment):
comment = cron['comment']
if not _needs_change(cron['cmd'], cmd):
cmd = cron['cmd']
if (
cid == SALT_CRON_NO_IDENTIFIER
):
if identifier:
cid = identifier
if (
cid == SALT_CRON_NO_IDENTIFIER
and cron['identifier'] is None
):
cid = None
cron['identifier'] = cid
if not cid or (
cid and not _needs_change(cid, identifier)
):
identifier = cid
jret = set_special(user, special, cmd, commented=commented,
comment=comment, identifier=identifier)
if jret == 'new':
return 'updated'
else:
return jret
return 'present'
spec = {'spec': special,
'cmd': cmd}
lst['special'].append(spec)
cron = {'spec': special,
'cmd': cmd,
'identifier': identifier,
'comment': comment,
'commented': commented}
lst['special'].append(cron)
comdat = _write_cron_lines(user, _render_tab(lst))
if comdat['retcode']:
# Failed to commit, return the error
@ -536,7 +615,7 @@ def set_job(user,
return 'new'
def rm_special(user, special, cmd):
def rm_special(user, cmd, special=None, identifier=None):
'''
Remove a special cron job for a specified user.
@ -544,22 +623,28 @@ def rm_special(user, special, cmd):
.. code-block:: bash
salt '*' cron.rm_job root @hourly /usr/bin/foo
salt '*' cron.rm_special root /usr/bin/foo
'''
lst = list_tab(user)
ret = 'absent'
rm_ = None
for ind in range(len(lst['special'])):
if lst['special'][ind]['cmd'] == cmd and \
lst['special'][ind]['spec'] == special:
lst['special'].pop(ind)
rm_ = ind
if rm_ is not None:
break
if _cron_matched(lst['special'][ind], cmd, identifier=identifier):
if special is None:
# No special param was specified
rm_ = ind
else:
if lst['special'][ind]['spec'] == special:
rm_ = ind
if rm_ is not None:
lst['special'].pop(rm_)
ret = 'removed'
comdat = _write_cron_lines(user, _render_tab(lst))
if comdat['retcode']:
# Failed to commit
return comdat['stderr']
comdat = _write_cron_lines(user, _render_tab(lst))
if comdat['retcode']:
# Failed to commit, return the error
return comdat['stderr']
return ret
@ -610,6 +695,7 @@ def rm_job(user,
return comdat['stderr']
return ret
rm = salt.utils.alias_function(rm_job, 'rm')

View file

@ -3368,7 +3368,11 @@ def stats(path, hash_type=None, follow_symlinks=True):
pstat = os.lstat(path)
except OSError:
# Not a broken symlink, just a nonexistent path
return ret
# NOTE: The file.directory state checks the content of the error
# message in this exception. Any changes made to the message for this
# exception will reflect the file.directory state as well, and will
# likely require changes there.
raise CommandExecutionError('Path not found: {0}'.format(path))
else:
if follow_symlinks:
pstat = os.stat(path)
@ -3832,8 +3836,15 @@ def get_managed(
parsed_scheme = urlparsed_source.scheme
parsed_path = os.path.join(
urlparsed_source.netloc, urlparsed_source.path).rstrip(os.sep)
unix_local_source = parsed_scheme in ('file', '')
if parsed_scheme and parsed_scheme.lower() in 'abcdefghijklmnopqrstuvwxyz':
if unix_local_source:
sfn = parsed_path
if not os.path.exists(sfn):
msg = 'Local file source {0} does not exist'.format(sfn)
return '', {}, msg
if parsed_scheme and parsed_scheme.lower() in string.ascii_lowercase:
parsed_path = ':'.join([parsed_scheme, parsed_path])
parsed_scheme = 'file'
@ -3841,9 +3852,10 @@ def get_managed(
source_sum = __salt__['cp.hash_file'](source, saltenv)
if not source_sum:
return '', {}, 'Source file {0} not found'.format(source)
elif not source_hash and parsed_scheme == 'file':
elif not source_hash and unix_local_source:
source_sum = _get_local_file_source_sum(parsed_path)
elif not source_hash and source.startswith(os.sep):
# This should happen on Windows
source_sum = _get_local_file_source_sum(source)
else:
if not skip_verify:
@ -4193,12 +4205,6 @@ def check_perms(name, ret, user, group, mode, follow_symlinks=False):
# Check permissions
perms = {}
cur = stats(name, follow_symlinks=follow_symlinks)
if not cur:
# NOTE: The file.directory state checks the content of the error
# message in this exception. Any changes made to the message for this
# exception will reflect the file.directory state as well, and will
# likely require changes there.
raise CommandExecutionError('{0} does not exist'.format(name))
perms['luser'] = cur['user']
perms['lgroup'] = cur['group']
perms['lmode'] = salt.utils.normalize_mode(cur['mode'])
@ -4498,11 +4504,18 @@ def check_file_meta(
'''
changes = {}
if not source_sum:
source_sum = {}
lstats = stats(name, hash_type=source_sum.get('hash_type', None), follow_symlinks=False)
source_sum = dict()
try:
lstats = stats(name, hash_type=source_sum.get('hash_type', None),
follow_symlinks=False)
except CommandExecutionError:
lstats = {}
if not lstats:
changes['newfile'] = name
return changes
if 'hsum' in source_sum:
if source_sum['hsum'] != lstats['sum']:
if not sfn and source:
@ -4741,21 +4754,22 @@ def manage_file(name,
if source_sum and ('hsum' in source_sum):
source_sum['hsum'] = source_sum['hsum'].lower()
if source and not sfn:
# File is not present, cache it
sfn = __salt__['cp.cache_file'](source, saltenv)
if source:
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
htype = source_sum.get('hash_type', __opts__['hash_type'])
# Recalculate source sum now that file has been cached
source_sum = {
'hash_type': htype,
'hsum': get_hash(sfn, form=htype)
}
# File is not present, cache it
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
htype = source_sum.get('hash_type', __opts__['hash_type'])
# Recalculate source sum now that file has been cached
source_sum = {
'hash_type': htype,
'hsum': get_hash(sfn, form=htype)
}
if keep_mode:
if _urlparse(source).scheme in ('salt', 'file') \
or source.startswith('/'):
if _urlparse(source).scheme in ('salt', 'file', ''):
try:
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
except Exception as exc:
@ -4785,7 +4799,7 @@ def manage_file(name,
# source, and we are not skipping checksum verification, then
# verify that it matches the specified checksum.
if not skip_verify \
and _urlparse(source).scheme not in ('salt', ''):
and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
@ -4973,8 +4987,6 @@ def manage_file(name,
makedirs_(name, user=user, group=group, mode=dir_mode)
if source:
# It is a new file, set the diff accordingly
ret['changes']['diff'] = 'New file'
# Apply the new file
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
@ -4998,6 +5010,8 @@ def manage_file(name,
)
ret['result'] = False
return ret
# It is a new file, set the diff accordingly
ret['changes']['diff'] = 'New file'
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)

View file

@ -585,7 +585,8 @@ def _parse_members(settype, members):
def _parse_member(settype, member, strict=False):
subtypes = settype.split(':')[1].split(',')
parts = member.split(' ')
all_parts = member.split(' ', 1)
parts = all_parts[0].split(',')
parsed_member = []
for i in range(len(subtypes)):
@ -610,8 +611,8 @@ def _parse_member(settype, member, strict=False):
parsed_member.append(part)
if len(parts) > len(subtypes):
parsed_member.append(' '.join(parts[len(subtypes):]))
if len(all_parts) > 1:
parsed_member.append(all_parts[1])
return parsed_member

View file

@ -19,11 +19,12 @@ import logging
import time
# Import 3rdp-party libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
from salt.ext.six.moves import range, map # pylint: disable=import-error,redefined-builtin
from salt.ext.six import string_types
# Import salt libs
import salt.utils
import salt.utils.files
import salt.utils.decorators as decorators
from salt.utils.locales import sdecode as _sdecode
from salt.exceptions import CommandExecutionError, SaltInvocationError
@ -520,16 +521,72 @@ def get_auto_login():
return False if ret['retcode'] else ret['stdout']
def enable_auto_login(name):
def _kcpassword(password):
'''
Internal function for obfuscating the password used for AutoLogin
This is later written as the contents of the ``/etc/kcpassword`` file
.. versionadded:: 2017.7.3
Adapted from:
https://github.com/timsutton/osx-vm-templates/blob/master/scripts/support/set_kcpassword.py
Args:
password(str):
The password to obfuscate
Returns:
str: The obfuscated password
'''
# The magic 11 bytes - these are just repeated
# 0x7D 0x89 0x52 0x23 0xD2 0xBC 0xDD 0xEA 0xA3 0xB9 0x1F
key = [125, 137, 82, 35, 210, 188, 221, 234, 163, 185, 31]
key_len = len(key)
# Convert each character to a byte
password = list(map(ord, password))
# pad password length out to an even multiple of key length
remainder = len(password) % key_len
if remainder > 0:
password = password + [0] * (key_len - remainder)
# Break the password into chunks the size of len(key) (11)
for chunk_index in range(0, len(password), len(key)):
# Reset the key_index to 0 for each iteration
key_index = 0
# Do an XOR on each character of that chunk of the password with the
# corresponding item in the key
# The length of the password, or the length of the key, whichever is
# smaller
for password_index in range(chunk_index,
min(chunk_index + len(key), len(password))):
password[password_index] = password[password_index] ^ key[key_index]
key_index += 1
# Convert each byte back to a character
password = list(map(chr, password))
return ''.join(password)
def enable_auto_login(name, password):
'''
.. versionadded:: 2016.3.0
Configures the machine to auto login with the specified user
:param str name: The user account use for auto login
Args:
:return: True if successful, False if not
:rtype: bool
name (str): The user account use for auto login
password (str): The password to user for auto login
.. versionadded:: 2017.7.3
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
@ -537,6 +594,7 @@ def enable_auto_login(name):
salt '*' user.enable_auto_login stevej
'''
# Make the entry into the defaults file
cmd = ['defaults',
'write',
'/Library/Preferences/com.apple.loginwindow.plist',
@ -544,6 +602,13 @@ def enable_auto_login(name):
name]
__salt__['cmd.run'](cmd)
current = get_auto_login()
# Create/Update the kcpassword file with an obfuscated password
o_password = _kcpassword(password=password)
with salt.utils.files.set_umask(0o077):
with salt.utils.fopen('/etc/kcpassword', 'w') as fd:
fd.write(o_password)
return current if isinstance(current, bool) else current.lower() == name.lower()
@ -553,8 +618,8 @@ def disable_auto_login():
Disables auto login on the machine
:return: True if successful, False if not
:rtype: bool
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
@ -562,6 +627,11 @@ def disable_auto_login():
salt '*' user.disable_auto_login
'''
# Remove the kcpassword file
cmd = 'rm -f /etc/kcpassword'
__salt__['cmd.run'](cmd)
# Remove the entry from the defaults file
cmd = ['defaults',
'delete',
'/Library/Preferences/com.apple.loginwindow.plist',

View file

@ -1084,8 +1084,8 @@ def build_routes(iface, **settings):
log.debug("IPv4 routes:\n{0}".format(opts4))
log.debug("IPv6 routes:\n{0}".format(opts6))
routecfg = template.render(routes=opts4)
routecfg6 = template.render(routes=opts6)
routecfg = template.render(routes=opts4, iface=iface)
routecfg6 = template.render(routes=opts6, iface=iface)
if settings['test']:
routes = _read_temp(routecfg)

View file

@ -99,17 +99,16 @@ def _set_retcode(ret, highstate=None):
__context__['retcode'] = 2
def _check_pillar(kwargs, pillar=None):
def _get_pillar_errors(kwargs, pillar=None):
'''
Check the pillar for errors, refuse to run the state if there are errors
in the pillar and return the pillar errors
Checks all pillars (external and internal) for errors.
Return an error message, if anywhere or None.
:param kwargs: dictionary of options
:param pillar: external pillar
:return: None or an error message
'''
if kwargs.get('force'):
return True
pillar_dict = pillar if pillar is not None else __pillar__
if '_errors' in pillar_dict:
return False
return True
return None if kwargs.get('force') else (pillar or {}).get('_errors', __pillar__.get('_errors')) or None
def _wait(jid):
@ -411,10 +410,10 @@ def template(tem, queue=False, **kwargs):
context=__context__,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render',
info=st_.opts['pillar']['_errors'])
raise CommandExecutionError('Pillar failed to render', info=errors)
if not tem.endswith('.sls'):
tem = '{sls}.sls'.format(sls=tem)
@ -896,11 +895,10 @@ def highstate(test=None, queue=False, **kwargs):
mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:']
err += __pillar__['_errors']
return err
return ['Pillar failed to render with the following messages:'] + errors
st_.push_active()
ret = {}
@ -1107,11 +1105,10 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:']
err += __pillar__['_errors']
return err
return ['Pillar failed to render with the following messages:'] + errors
orchestration_jid = kwargs.get('orchestration_jid')
umask = os.umask(0o77)
@ -1126,7 +1123,6 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
mods = mods.split(',')
st_.push_active()
ret = {}
try:
high_, errors = st_.render_highstate({opts['environment']: mods})
@ -1233,11 +1229,10 @@ def top(topfn, test=None, queue=False, **kwargs):
pillar_enc=pillar_enc,
context=__context__,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:']
err += __pillar__['_errors']
return err
return ['Pillar failed to render with the following messages:'] + errors
st_.push_active()
st_.opts['state_top'] = salt.utils.url.create(topfn)
@ -1295,10 +1290,10 @@ def show_highstate(queue=False, **kwargs):
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render',
info=st_.opts['pillar']['_errors'])
raise CommandExecutionError('Pillar failed to render', info=errors)
st_.push_active()
try:
@ -1329,10 +1324,10 @@ def show_lowstate(queue=False, **kwargs):
st_ = salt.state.HighState(opts,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render',
info=st_.opts['pillar']['_errors'])
raise CommandExecutionError('Pillar failed to render', info=errors)
st_.push_active()
try:
@ -1430,11 +1425,10 @@ def sls_id(id_, mods, test=None, queue=False, **kwargs):
st_ = salt.state.HighState(opts,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:']
err += __pillar__['_errors']
return err
return ['Pillar failed to render with the following messages:'] + errors
if isinstance(mods, six.string_types):
split_mods = mods.split(',')
@ -1516,10 +1510,10 @@ def show_low_sls(mods, test=None, queue=False, **kwargs):
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render',
info=st_.opts['pillar']['_errors'])
raise CommandExecutionError('Pillar failed to render', info=errors)
if isinstance(mods, six.string_types):
mods = mods.split(',')
@ -1603,10 +1597,10 @@ def show_sls(mods, test=None, queue=False, **kwargs):
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render',
info=st_.opts['pillar']['_errors'])
raise CommandExecutionError('Pillar failed to render', info=errors)
if isinstance(mods, six.string_types):
mods = mods.split(',')
@ -1652,10 +1646,10 @@ def show_top(queue=False, **kwargs):
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render',
info=st_.opts['pillar']['_errors'])
raise CommandExecutionError('Pillar failed to render', info=errors)
errors = []
top_ = st_.get_top()

View file

@ -791,7 +791,7 @@ def chgrp(path, group):
def stats(path, hash_type='sha256', follow_symlinks=True):
'''
Return a dict containing the stats for a given file
Return a dict containing the stats about a given file
Under Windows, `gid` will equal `uid` and `group` will equal `user`.
@ -820,6 +820,8 @@ def stats(path, hash_type='sha256', follow_symlinks=True):
salt '*' file.stats /etc/passwd
'''
# This is to mirror the behavior of file.py. `check_file_meta` expects an
# empty dictionary when the file does not exist
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
@ -1227,33 +1229,37 @@ def mkdir(path,
path (str): The full path to the directory.
owner (str): The owner of the directory. If not passed, it will be the
account that created the directory, likely SYSTEM
owner (str):
The owner of the directory. If not passed, it will be the account
that created the directory, likely SYSTEM
grant_perms (dict): A dictionary containing the user/group and the basic
permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``.
You can also set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to`` setting
like this:
grant_perms (dict):
A dictionary containing the user/group and the basic permissions to
grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to``
setting like this:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
To set advanced permissions use a list for the ``perms`` parameter, ie:
To set advanced permissions use a list for the ``perms`` parameter, ie:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
deny_perms (dict): A dictionary containing the user/group and
permissions to deny along with the ``applies_to`` setting. Use the same
format used for the ``grant_perms`` parameter. Remember, deny
permissions supersede grant permissions.
deny_perms (dict):
A dictionary containing the user/group and permissions to deny along
with the ``applies_to`` setting. Use the same format used for the
``grant_perms`` parameter. Remember, deny permissions supersede
grant permissions.
inheritance (bool): If True the object will inherit permissions from the
parent, if False, inheritance will be disabled. Inheritance setting will
not apply to parent directories if they must be created
inheritance (bool):
If True the object will inherit permissions from the parent, if
False, inheritance will be disabled. Inheritance setting will not
apply to parent directories if they must be created
Returns:
bool: True if successful
@ -1312,33 +1318,37 @@ def makedirs_(path,
path (str): The full path to the directory.
owner (str): The owner of the directory. If not passed, it will be the
account that created the directly, likely SYSTEM
owner (str):
The owner of the directory. If not passed, it will be the account
that created the directly, likely SYSTEM
grant_perms (dict): A dictionary containing the user/group and the basic
permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``.
You can also set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to`` setting
like this:
grant_perms (dict):
A dictionary containing the user/group and the basic permissions to
grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to``
setting like this:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
To set advanced permissions use a list for the ``perms`` parameter, ie:
To set advanced permissions use a list for the ``perms`` parameter, ie:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
deny_perms (dict): A dictionary containing the user/group and
permissions to deny along with the ``applies_to`` setting. Use the same
format used for the ``grant_perms`` parameter. Remember, deny
permissions supersede grant permissions.
deny_perms (dict):
A dictionary containing the user/group and permissions to deny along
with the ``applies_to`` setting. Use the same format used for the
``grant_perms`` parameter. Remember, deny permissions supersede
grant permissions.
inheritance (bool): If True the object will inherit permissions from the
parent, if False, inheritance will be disabled. Inheritance setting will
not apply to parent directories if they must be created
inheritance (bool):
If True the object will inherit permissions from the parent, if
False, inheritance will be disabled. Inheritance setting will not
apply to parent directories if they must be created
.. note::
@ -1423,36 +1433,40 @@ def makedirs_perms(path,
path (str): The full path to the directory.
owner (str): The owner of the directory. If not passed, it will be the
account that created the directory, likely SYSTEM
owner (str):
The owner of the directory. If not passed, it will be the account
that created the directory, likely SYSTEM
grant_perms (dict): A dictionary containing the user/group and the basic
permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``.
You can also set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to`` setting
like this:
grant_perms (dict):
A dictionary containing the user/group and the basic permissions to
grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to``
setting like this:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
To set advanced permissions use a list for the ``perms`` parameter, ie:
To set advanced permissions use a list for the ``perms`` parameter, ie:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
deny_perms (dict): A dictionary containing the user/group and
permissions to deny along with the ``applies_to`` setting. Use the same
format used for the ``grant_perms`` parameter. Remember, deny
permissions supersede grant permissions.
deny_perms (dict):
A dictionary containing the user/group and permissions to deny along
with the ``applies_to`` setting. Use the same format used for the
``grant_perms`` parameter. Remember, deny permissions supersede
grant permissions.
inheritance (bool): If True the object will inherit permissions from the
parent, if False, inheritance will be disabled. Inheritance setting will
not apply to parent directories if they must be created
inheritance (bool):
If True the object will inherit permissions from the parent, if
False, inheritance will be disabled. Inheritance setting will not
apply to parent directories if they must be created
Returns:
bool: True if successful, otherwise raise an error
bool: True if successful, otherwise raises an error
CLI Example:
@ -1505,45 +1519,54 @@ def check_perms(path,
deny_perms=None,
inheritance=True):
'''
Set owner and permissions for each directory created.
Set owner and permissions for each directory created. Used mostly by the
state system.
Args:
path (str): The full path to the directory.
ret (dict): A dictionary to append changes to and return. If not passed,
will create a new dictionary to return.
ret (dict):
A dictionary to append changes to and return. If not passed, will
create a new dictionary to return.
owner (str): The owner of the directory. If not passed, it will be the
account that created the directory, likely SYSTEM
owner (str):
The owner of the directory. If not passed, it will be the account
that created the directory, likely SYSTEM
grant_perms (dict): A dictionary containing the user/group and the basic
permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``.
You can also set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to`` setting
like this:
grant_perms (dict):
A dictionary containing the user/group and the basic permissions to
grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to``
setting like this:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
To set advanced permissions use a list for the ``perms`` parameter, ie:
To set advanced permissions use a list for the ``perms`` parameter, ie:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
deny_perms (dict): A dictionary containing the user/group and
permissions to deny along with the ``applies_to`` setting. Use the same
format used for the ``grant_perms`` parameter. Remember, deny
permissions supersede grant permissions.
deny_perms (dict):
A dictionary containing the user/group and permissions to deny along
with the ``applies_to`` setting. Use the same format used for the
``grant_perms`` parameter. Remember, deny permissions supersede
grant permissions.
inheritance (bool): If True the object will inherit permissions from the
parent, if False, inheritance will be disabled. Inheritance setting will
not apply to parent directories if they must be created
inheritance (bool):
If True the object will inherit permissions from the parent, if
False, inheritance will be disabled. Inheritance setting will not
apply to parent directories if they must be created
Returns:
bool: True if successful, otherwise raise an error
dict: A dictionary of changes made to the object
Raises:
CommandExecutionError: If the object does not exist
CLI Example:
@ -1558,6 +1581,9 @@ def check_perms(path,
# Specify advanced attributes with a list
salt '*' file.check_perms C:\\Temp\\ Administrators "{'jsnuffy': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'files_only'}}"
'''
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
path = os.path.expanduser(path)
if not ret:

View file

@ -619,8 +619,8 @@ class _policy_info(object):
},
},
'RemoteRegistryExactPaths': {
'Policy': 'Network access: Remotely accessible registry '
'paths',
'Policy': 'Network access: Remotely accessible '
'registry paths',
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
@ -632,8 +632,8 @@ class _policy_info(object):
},
},
'RemoteRegistryPaths': {
'Policy': 'Network access: Remotely accessible registry '
'paths and sub-paths',
'Policy': 'Network access: Remotely accessible '
'registry paths and sub-paths',
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
@ -644,8 +644,8 @@ class _policy_info(object):
},
},
'RestrictNullSessAccess': {
'Policy': 'Network access: Restrict anonymous access to '
'Named Pipes and Shares',
'Policy': 'Network access: Restrict anonymous access '
'to Named Pipes and Shares',
'lgpo_section': self.security_options_gpedit_path,
'Settings': self.enabled_one_disabled_zero.keys(),
'Registry': {
@ -898,9 +898,9 @@ class _policy_info(object):
'Transform': self.enabled_one_disabled_zero_transform,
},
'CachedLogonsCount': {
'Policy': 'Interactive logon: Number of previous logons '
'to cache (in case domain controller is not '
'available)',
'Policy': 'Interactive logon: Number of previous '
'logons to cache (in case domain controller '
'is not available)',
'Settings': {
'Function': '_in_range_inclusive',
'Args': {'min': 0, 'max': 50}
@ -915,8 +915,9 @@ class _policy_info(object):
},
},
'ForceUnlockLogon': {
'Policy': 'Interactive logon: Require Domain Controller '
'authentication to unlock workstation',
'Policy': 'Interactive logon: Require Domain '
'Controller authentication to unlock '
'workstation',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -983,8 +984,8 @@ class _policy_info(object):
},
'EnableUIADesktopToggle': {
'Policy': 'User Account Control: Allow UIAccess '
'applications to prompt for elevation without '
'using the secure desktop',
'applications to prompt for elevation '
'without using the secure desktop',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -998,8 +999,8 @@ class _policy_info(object):
},
'ConsentPromptBehaviorAdmin': {
'Policy': 'User Account Control: Behavior of the '
'elevation prompt for administrators in Admin '
'Approval Mode',
'elevation prompt for administrators in '
'Admin Approval Mode',
'Settings': self.uac_admin_prompt_lookup.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -1077,7 +1078,7 @@ class _policy_info(object):
},
'EnableSecureUIAPaths': {
'Policy': 'User Account Control: Only elevate UIAccess '
'applicaitons that are installed in secure '
'applications that are installed in secure '
'locations',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
@ -1091,8 +1092,8 @@ class _policy_info(object):
'Transform': self.enabled_one_disabled_zero_transform,
},
'EnableLUA': {
'Policy': 'User Account Control: Run all administrators '
'in Admin Approval Mode',
'Policy': 'User Account Control: Run all '
'administrators in Admin Approval Mode',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -1354,8 +1355,8 @@ class _policy_info(object):
'Transform': self.enabled_one_disabled_zero_transform,
},
'EnableForcedLogoff': {
'Policy': 'Microsoft network server: Disconnect clients '
'when logon hours expire',
'Policy': 'Microsoft network server: Disconnect '
'clients when logon hours expire',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -1422,7 +1423,8 @@ class _policy_info(object):
'Transform': self.enabled_one_disabled_zero_transform,
},
'UndockWithoutLogon': {
'Policy': 'Devices: Allow undock without having to log on',
'Policy': 'Devices: Allow undock without having to log '
'on',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -1497,8 +1499,8 @@ class _policy_info(object):
},
},
'SubmitControl': {
'Policy': 'Domain controller: Allow server operators to '
'schedule tasks',
'Policy': 'Domain controller: Allow server operators '
'to schedule tasks',
'Settings': self.enabled_one_disabled_zero_strings.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -1577,8 +1579,8 @@ class _policy_info(object):
'Transform': self.enabled_one_disabled_zero_strings_transform,
},
'SignSecureChannel': {
'Policy': 'Domain member: Digitally sign secure channel '
'data (when possible)',
'Policy': 'Domain member: Digitally sign secure '
'channel data (when possible)',
'Settings': self.enabled_one_disabled_zero_strings.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -2301,7 +2303,7 @@ class _policy_info(object):
},
'RecoveryConsoleSecurityLevel': {
'Policy': 'Recovery console: Allow automatic '
'adminstrative logon',
'administrative logon',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -2433,15 +2435,18 @@ class _policy_info(object):
'''
converts a binary 0/1 to Disabled/Enabled
'''
if val is not None:
if ord(val) == 0:
return 'Disabled'
elif ord(val) == 1:
return 'Enabled'
try:
if val is not None:
if ord(val) == 0:
return 'Disabled'
elif ord(val) == 1:
return 'Enabled'
else:
return 'Invalid Value'
else:
return 'Invalid Value'
else:
return 'Not Defined'
return 'Not Defined'
except TypeError:
return 'Invalid Value'
@classmethod
def _binary_enable_zero_disable_one_reverse_conversion(cls, val, **kwargs):
@ -3502,7 +3507,7 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element,
reg_key,
reg_valuename,
chr(registry.vtype[this_vtype]),
six.unichr(len(this_element_value.encode('utf-16-le'))),
six.unichr(len(this_element_value.encode('utf-16-le', '' if six.PY2 else 'surrogatepass'))),
this_element_value)
return expected_string
@ -4242,8 +4247,8 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
for adm_namespace in admtemplate_data:
for adm_policy in admtemplate_data[adm_namespace]:
if str(admtemplate_data[adm_namespace][adm_policy]).lower() == 'not configured':
if adm_policy in base_policy_settings[adm_namespace]:
base_policy_settings[adm_namespace].pop(adm_policy)
if base_policy_settings.get(adm_namespace, {}).pop(adm_policy, None) is not None:
log.debug('Policy "{0}" removed'.format(adm_policy))
else:
log.debug('adding {0} to base_policy_settings'.format(adm_policy))
if adm_namespace not in base_policy_settings:

View file

@ -317,7 +317,6 @@ def version(*names, **kwargs):
str: version string when a single packge is specified.
dict: The package name(s) with the installed versions.
.. code-block:: cfg
{['<version>', '<version>', ]} OR
{'<package name>': ['<version>', '<version>', ]}

View file

@ -202,7 +202,14 @@ def _check_cron(user,
return 'present'
else:
for cron in lst['special']:
if special == cron['spec'] and cmd == cron['cmd']:
if _cron_matched(cron, cmd, identifier):
if any([_needs_change(x, y) for x, y in
((cron['spec'], special),
(cron['identifier'], identifier),
(cron['cmd'], cmd),
(cron['comment'], comment),
(cron['commented'], commented))]):
return 'update'
return 'present'
return 'absent'
@ -349,7 +356,12 @@ def present(name,
commented=commented,
identifier=identifier)
else:
data = __salt__['cron.set_special'](user, special, name)
data = __salt__['cron.set_special'](user=user,
special=special,
cmd=name,
comment=comment,
commented=commented,
identifier=identifier)
if data == 'present':
ret['comment'] = 'Cron {0} already present'.format(name)
return ret
@ -418,7 +430,7 @@ def absent(name,
if special is None:
data = __salt__['cron.rm_job'](user, name, identifier=identifier)
else:
data = __salt__['cron.rm_special'](user, special, name)
data = __salt__['cron.rm_special'](user, name, special=special, identifier=identifier)
if data == 'absent':
ret['comment'] = "Cron {0} already absent".format(name)

View file

@ -758,7 +758,7 @@ def _check_directory_win(name,
changes = {}
if not os.path.isdir(name):
changes = {'directory': 'new'}
changes = {name: {'directory': 'new'}}
else:
# Check owner
owner = salt.utils.win_dacl.get_owner(name)
@ -883,7 +883,11 @@ def _check_dir_meta(name,
'''
Check the changes in directory metadata
'''
stats = __salt__['file.stats'](name, None, follow_symlinks)
try:
stats = __salt__['file.stats'](name, None, follow_symlinks)
except CommandExecutionError:
stats = {}
changes = {}
if not stats:
changes['directory'] = 'new'
@ -2087,6 +2091,9 @@ def managed(name,
'name': name,
'result': True}
if not name:
return _error(ret, 'Destination file name is required')
if mode is not None and salt.utils.is_windows():
return _error(ret, 'The \'mode\' option is not supported on Windows')
@ -2237,8 +2244,6 @@ def managed(name,
ret['comment'] = 'Error while applying template on contents'
return ret
if not name:
return _error(ret, 'Must provide name to file.managed')
user = _test_owner(kwargs, user=user)
if salt.utils.is_windows():
@ -2988,7 +2993,7 @@ def directory(name,
ret, _ = __salt__['file.check_perms'](
full, ret, user, group, dir_mode, follow_symlinks)
except CommandExecutionError as exc:
if not exc.strerror.endswith('does not exist'):
if not exc.strerror.startswith('Path not found'):
errors.append(exc.strerror)
if clean:

View file

@ -709,7 +709,7 @@ def edited_conf(name, lxc_conf=None, lxc_conf_unset=None):
# to keep this function around and cannot officially remove it. Progress of
# the new function will be tracked in https://github.com/saltstack/salt/issues/35523
salt.utils.warn_until(
'Oxygen',
'Fluorine',
'This state is unsuitable for setting parameters that appear more '
'than once in an LXC config file, or parameters which must appear in '
'a certain order (such as when configuring more than one network '

View file

@ -5,5 +5,6 @@
/{{route.netmask}}
{%- endif -%}
{%- if route.gateway %} via {{route.gateway}}
{%- else %} dev {{iface}}
{%- endif %}
{% endfor -%}

View file

@ -1143,10 +1143,10 @@ def format_call(fun,
continue
extra[key] = copy.deepcopy(value)
# We'll be showing errors to the users until Salt Oxygen comes out, after
# We'll be showing errors to the users until Salt Fluorine comes out, after
# which, errors will be raised instead.
warn_until(
'Oxygen',
'Fluorine',
'It\'s time to start raising `SaltInvocationError` instead of '
'returning warnings',
# Let's not show the deprecation warning on the console, there's no
@ -1183,7 +1183,7 @@ def format_call(fun,
'{0}. If you were trying to pass additional data to be used '
'in a template context, please populate \'context\' with '
'\'key: value\' pairs. Your approach will work until Salt '
'Oxygen is out.{1}'.format(
'Fluorine is out.{1}'.format(
msg,
'' if 'full' not in ret else ' Please update your state files.'
)

View file

@ -334,6 +334,7 @@ import errno
import random
import yaml
import copy
import weakref
# Import Salt libs
import salt.config
@ -845,6 +846,47 @@ class Schedule(object):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid()
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
ret['return'] = self.functions[func](*args, **kwargs)
# runners do not provide retcode

View file

@ -197,7 +197,7 @@ def vb_get_network_adapters(machine_name=None, machine=None):
return network_adapters
def vb_wait_for_network_address(timeout, step=None, machine_name=None, machine=None):
def vb_wait_for_network_address(timeout, step=None, machine_name=None, machine=None, wait_for_pattern=None):
'''
Wait until a machine has a network address to return or quit after the timeout
@ -209,12 +209,16 @@ def vb_wait_for_network_address(timeout, step=None, machine_name=None, machine=N
@type machine_name: str
@param machine:
@type machine: IMachine
@type wait_for_pattern: str
@param wait_for_pattern:
@type machine: str
@return:
@rtype: list
'''
kwargs = {
'machine_name': machine_name,
'machine': machine
'machine': machine,
'wait_for_pattern': wait_for_pattern
}
return wait_for(vb_get_network_addresses, timeout=timeout, step=step, default=[], func_kwargs=kwargs)
@ -251,7 +255,7 @@ def vb_wait_for_session_state(xp_session, state='Unlocked', timeout=10, step=Non
wait_for(_check_session_state, timeout=timeout, step=step, default=False, func_args=args)
def vb_get_network_addresses(machine_name=None, machine=None):
def vb_get_network_addresses(machine_name=None, machine=None, wait_for_pattern=None):
'''
TODO distinguish between private and public addresses
@ -276,21 +280,38 @@ def vb_get_network_addresses(machine_name=None, machine=None):
machine = vb_get_box().findMachine(machine_name)
ip_addresses = []
# We can't trust virtualbox to give us up to date guest properties if the machine isn't running
# For some reason it may give us outdated (cached?) values
log.debug("checking for power on:")
if machine.state == _virtualboxManager.constants.MachineState_Running:
try:
total_slots = int(machine.getGuestPropertyValue('/VirtualBox/GuestInfo/Net/Count'))
except ValueError:
total_slots = 0
for i in range(total_slots):
try:
address = machine.getGuestPropertyValue('/VirtualBox/GuestInfo/Net/{0}/V4/IP'.format(i))
if address:
ip_addresses.append(address)
except Exception as e:
log.debug(e.message)
log.debug("got power on:")
#wait on an arbitrary named property
#for instance use a dhcp client script to set a property via VBoxControl guestproperty set dhcp_done 1
if wait_for_pattern and not machine.getGuestPropertyValue(wait_for_pattern):
log.debug("waiting for pattern:{}:".format(wait_for_pattern))
return None
_total_slots = machine.getGuestPropertyValue('/VirtualBox/GuestInfo/Net/Count')
#upon dhcp the net count drops to 0 and it takes some seconds for it to be set again
if not _total_slots:
log.debug("waiting for net count:{}:".format(wait_for_pattern))
return None
try:
total_slots = int(_total_slots)
for i in range(total_slots):
try:
address = machine.getGuestPropertyValue('/VirtualBox/GuestInfo/Net/{0}/V4/IP'.format(i))
if address:
ip_addresses.append(address)
except Exception as e:
log.debug(e.message)
except ValueError as e:
log.debug(e.message)
return None
log.debug("returning ip_addresses:{}:".format(ip_addresses))
return ip_addresses
@ -339,6 +360,7 @@ def vb_create_machine(name=None):
def vb_clone_vm(
name=None,
clone_from=None,
clone_mode=0,
timeout=10000,
**kwargs
):
@ -370,7 +392,7 @@ def vb_clone_vm(
progress = source_machine.cloneTo(
new_machine,
0, # CloneMode
clone_mode, # CloneMode
None # CloneOptions : None = Full?
)

View file

@ -7,12 +7,14 @@
from __future__ import absolute_import
import random
import string
import os
# Import Salt Testing Libs
from tests.support.case import ModuleCase
from tests.support.helpers import destructiveTest, skip_if_not_root
# Import Salt Libs
import salt.utils
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
@ -148,6 +150,86 @@ class MacUserModuleTest(ModuleCase):
self.run_function('user.delete', [CHANGE_USER])
raise
def test_mac_user_enable_auto_login(self):
'''
Tests mac_user functions that enable auto login
'''
# Make sure auto login is disabled before we start
if self.run_function('user.get_auto_login'):
self.skipTest('Auto login already enabled')
try:
# Does enable return True
self.assertTrue(
self.run_function('user.enable_auto_login',
['Spongebob', 'Squarepants']))
# Did it set the user entry in the plist file
self.assertEqual(
self.run_function('user.get_auto_login'),
'Spongebob')
# Did it generate the `/etc/kcpassword` file
self.assertTrue(os.path.exists('/etc/kcpassword'))
# Are the contents of the file correct
test_data = b'.\xf8\'B\xa0\xd9\xad\x8b\xcd\xcdl'
with salt.utils.fopen('/etc/kcpassword', 'rb') as f:
file_data = f.read()
self.assertEqual(test_data, file_data)
# Does disable return True
self.assertTrue(self.run_function('user.disable_auto_login'))
# Does it remove the user entry in the plist file
self.assertFalse(self.run_function('user.get_auto_login'))
# Is the `/etc/kcpassword` file removed
self.assertFalse(os.path.exists('/etc/kcpassword'))
finally:
# Make sure auto_login is disabled
self.assertTrue(self.run_function('user.disable_auto_login'))
# Make sure autologin is disabled
if self.run_function('user.get_auto_login'):
raise Exception('Failed to disable auto login')
def test_mac_user_disable_auto_login(self):
'''
Tests mac_user functions that disable auto login
'''
# Make sure auto login is enabled before we start
# Is there an existing setting
if self.run_function('user.get_auto_login'):
self.skipTest('Auto login already enabled')
try:
# Enable auto login for the test
self.run_function('user.enable_auto_login',
['Spongebob', 'Squarepants'])
# Make sure auto login got set up
if not self.run_function('user.get_auto_login') == 'Spongebob':
raise Exception('Failed to enable auto login')
# Does disable return True
self.assertTrue(self.run_function('user.disable_auto_login'))
# Does it remove the user entry in the plist file
self.assertFalse(self.run_function('user.get_auto_login'))
# Is the `/etc/kcpassword` file removed
self.assertFalse(os.path.exists('/etc/kcpassword'))
finally:
# Make sure auto login is disabled
self.assertTrue(self.run_function('user.disable_auto_login'))
# Make sure auto login is disabled
if self.run_function('user.get_auto_login'):
raise Exception('Failed to disable auto login')
def tearDown(self):
'''
Clean up after tests

View file

@ -2,12 +2,13 @@
# Import python libs
from __future__ import absolute_import
import os
# Import Salt Testing libs
from tests.support.case import ShellCase
from tests.support.case import ShellCase, SPMCase
class SPMTest(ShellCase):
class SPMTest(ShellCase, SPMCase):
'''
Test spm script
'''
@ -29,3 +30,47 @@ class SPMTest(ShellCase):
output = self.run_spm('doesnotexist')
for arg in expected_args:
self.assertIn(arg, ''.join(output))
def test_spm_assume_yes(self):
'''
test spm install with -y arg
'''
config = self._spm_config(assume_yes=False)
self._spm_build_files(config)
spm_file = os.path.join(config['spm_build_dir'],
'apache-201506-2.spm')
build = self.run_spm('build {0} -c {1}'.format(self.formula_dir,
self._tmp_spm))
install = self.run_spm('install {0} -c {1} -y'.format(spm_file,
self._tmp_spm))
self.assertTrue(os.path.exists(os.path.join(config['formula_path'],
'apache', 'apache.sls')))
def test_spm_force(self):
'''
test spm install with -f arg
'''
config = self._spm_config(assume_yes=False)
self._spm_build_files(config)
spm_file = os.path.join(config['spm_build_dir'],
'apache-201506-2.spm')
build = self.run_spm('build {0} -c {1}'.format(self.formula_dir,
self._tmp_spm))
install = self.run_spm('install {0} -c {1} -y'.format(spm_file,
self._tmp_spm))
self.assertTrue(os.path.exists(os.path.join(config['formula_path'],
'apache', 'apache.sls')))
# check if it forces the install after its already been installed it
install = self.run_spm('install {0} -c {1} -y -f'.format(spm_file,
self._tmp_spm))
self.assertEqual(['... installing apache'], install)

View file

@ -67,17 +67,16 @@ def _test_managed_file_mode_keep_helper(testcase, local=False):
'''
DRY helper function to run the same test with a local or remote path
'''
rel_path = 'grail/scene33'
name = os.path.join(TMP, os.path.basename(rel_path))
grail_fs_path = os.path.join(FILES, 'file', 'base', rel_path)
grail = 'salt://' + rel_path if not local else grail_fs_path
name = os.path.join(TMP, 'scene33')
grail_fs_path = os.path.join(FILES, 'file', 'base', 'grail', 'scene33')
grail = 'salt://grail/scene33' if not local else grail_fs_path
# Get the current mode so that we can put the file back the way we
# found it when we're done.
grail_fs_mode = os.stat(grail_fs_path).st_mode
initial_mode = 504 # 0770 octal
new_mode_1 = 384 # 0600 octal
new_mode_2 = 420 # 0644 octal
grail_fs_mode = int(testcase.run_function('file.get_mode', [grail_fs_path]), 8)
initial_mode = 0o770
new_mode_1 = 0o600
new_mode_2 = 0o644
# Set the initial mode, so we can be assured that when we set the mode
# to "keep", we're actually changing the permissions of the file to the
@ -568,6 +567,84 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
if os.path.exists('/tmp/sudoers'):
os.remove('/tmp/sudoers')
def test_managed_local_source_with_source_hash(self):
'''
Make sure that we enforce the source_hash even with local files
'''
name = os.path.join(TMP, 'local_source_with_source_hash')
local_path = os.path.join(FILES, 'file', 'base', 'grail', 'scene33')
actual_hash = '567fd840bf1548edc35c48eb66cdd78bfdfcccff'
# Reverse the actual hash
bad_hash = actual_hash[::-1]
def remove_file():
try:
os.remove(name)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
def do_test(clean=False):
for proto in ('file://', ''):
source = proto + local_path
log.debug('Trying source %s', source)
try:
ret = self.run_state(
'file.managed',
name=name,
source=source,
source_hash='sha1={0}'.format(bad_hash))
self.assertSaltFalseReturn(ret)
ret = ret[next(iter(ret))]
# Shouldn't be any changes
self.assertFalse(ret['changes'])
# Check that we identified a hash mismatch
self.assertIn(
'does not match actual checksum', ret['comment'])
ret = self.run_state(
'file.managed',
name=name,
source=source,
source_hash='sha1={0}'.format(actual_hash))
self.assertSaltTrueReturn(ret)
finally:
if clean:
remove_file()
remove_file()
log.debug('Trying with nonexistant destination file')
do_test()
log.debug('Trying with destination file already present')
with salt.utils.fopen(name, 'w'):
pass
try:
do_test(clean=False)
finally:
remove_file()
def test_managed_local_source_does_not_exist(self):
'''
Make sure that we exit gracefully when a local source doesn't exist
'''
name = os.path.join(TMP, 'local_source_does_not_exist')
local_path = os.path.join(FILES, 'file', 'base', 'grail', 'scene99')
for proto in ('file://', ''):
source = proto + local_path
log.debug('Trying source %s', source)
ret = self.run_state(
'file.managed',
name=name,
source=source)
self.assertSaltFalseReturn(ret)
ret = ret[next(iter(ret))]
# Shouldn't be any changes
self.assertFalse(ret['changes'])
# Check that we identified a hash mismatch
self.assertIn(
'does not exist', ret['comment'])
def test_directory(self):
'''
file.directory
@ -585,19 +662,29 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
try:
tmp_dir = os.path.join(TMP, 'pgdata')
sym_dir = os.path.join(TMP, 'pg_data')
os.mkdir(tmp_dir, 0o700)
os.symlink(tmp_dir, sym_dir)
ret = self.run_state(
'file.directory', test=True, name=sym_dir, follow_symlinks=True,
mode=700
)
if IS_WINDOWS:
self.run_function('file.mkdir', [tmp_dir, 'Administrators'])
else:
os.mkdir(tmp_dir, 0o700)
self.run_function('file.symlink', [tmp_dir, sym_dir])
if IS_WINDOWS:
ret = self.run_state(
'file.directory', test=True, name=sym_dir,
follow_symlinks=True, win_owner='Administrators')
else:
ret = self.run_state(
'file.directory', test=True, name=sym_dir,
follow_symlinks=True, mode=700)
self.assertSaltTrueReturn(ret)
finally:
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
self.run_function('file.remove', [tmp_dir])
if os.path.islink(sym_dir):
os.unlink(sym_dir)
self.run_function('file.remove', [sym_dir])
@skip_if_not_root
@skipIf(IS_WINDOWS, 'Mode not available in Windows')
@ -1592,25 +1679,24 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
'''
fname = 'append_issue_1864_makedirs'
name = os.path.join(TMP, fname)
try:
self.assertFalse(os.path.exists(name))
except AssertionError:
os.remove(name)
# Make sure the file is not there to begin with
if os.path.isfile(name):
self.run_function('file.remove', [name])
try:
# Non existing file get's touched
if os.path.isfile(name):
# left over
os.remove(name)
ret = self.run_state(
'file.append', name=name, text='cheese', makedirs=True
)
self.assertSaltTrueReturn(ret)
finally:
if os.path.isfile(name):
os.remove(name)
self.run_function('file.remove', [name])
# Nested directory and file get's touched
name = os.path.join(TMP, 'issue_1864', fname)
try:
ret = self.run_state(
'file.append', name=name, text='cheese', makedirs=True
@ -1618,20 +1704,17 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
self.assertSaltTrueReturn(ret)
finally:
if os.path.isfile(name):
os.remove(name)
self.run_function('file.remove', [name])
# Parent directory exists but file does not and makedirs is False
try:
# Parent directory exists but file does not and makedirs is False
ret = self.run_state(
'file.append', name=name, text='cheese'
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(name))
finally:
shutil.rmtree(
os.path.join(TMP, 'issue_1864'),
ignore_errors=True
)
self.run_function('file.remove', [os.path.join(TMP, 'issue_1864')])
def test_prepend_issue_27401_makedirs(self):
'''
@ -1966,19 +2049,21 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.run_function('state.sls', mods='issue-8343')
for name, step in six.iteritems(ret):
self.assertSaltTrueReturn({name: step})
with salt.utils.fopen(testcase_filedest) as fp_:
contents = fp_.read().split(os.linesep)
self.assertEqual(
['#-- start salt managed zonestart -- PLEASE, DO NOT EDIT',
'foo',
'#-- end salt managed zonestart --',
'#',
'#-- start salt managed zoneend -- PLEASE, DO NOT EDIT',
'bar',
'#-- end salt managed zoneend --',
''],
contents
)
expected = [
'#-- start salt managed zonestart -- PLEASE, DO NOT EDIT',
'foo',
'#-- end salt managed zonestart --',
'#',
'#-- start salt managed zoneend -- PLEASE, DO NOT EDIT',
'bar',
'#-- end salt managed zoneend --',
'']
self.assertEqual(expected, contents)
finally:
if os.path.isdir(testcase_filedest):
os.unlink(testcase_filedest)

View file

@ -628,7 +628,7 @@ class SPMCase(TestCase, AdaptedConfigurationTestCaseMixin):
description: Formula for installing Apache
'''))
def _spm_config(self):
def _spm_config(self, assume_yes=True):
self._tmp_spm = tempfile.mkdtemp()
config = self.get_temp_config('minion', **{
'spm_logfile': os.path.join(self._tmp_spm, 'log'),
@ -641,10 +641,10 @@ class SPMCase(TestCase, AdaptedConfigurationTestCaseMixin):
'spm_db': os.path.join(self._tmp_spm, 'packages.db'),
'extension_modules': os.path.join(self._tmp_spm, 'modules'),
'file_roots': {'base': [self._tmp_spm, ]},
'formula_path': os.path.join(self._tmp_spm, 'spm'),
'formula_path': os.path.join(self._tmp_spm, 'salt'),
'pillar_path': os.path.join(self._tmp_spm, 'pillar'),
'reactor_path': os.path.join(self._tmp_spm, 'reactor'),
'assume_yes': True,
'assume_yes': True if assume_yes else False,
'force': False,
'verbose': False,
'cache': 'localfs',
@ -652,6 +652,16 @@ class SPMCase(TestCase, AdaptedConfigurationTestCaseMixin):
'spm_repo_dups': 'ignore',
'spm_share_dir': os.path.join(self._tmp_spm, 'share'),
})
import salt.utils
import yaml
if not os.path.isdir(config['formula_path']):
os.makedirs(config['formula_path'])
with salt.utils.fopen(os.path.join(self._tmp_spm, 'spm'), 'w') as fp:
fp.write(yaml.dump(config))
return config
def _spm_create_update_repo(self, config):

View file

@ -698,9 +698,9 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.top("reverse_top.sls"), "A")
mock = MagicMock(side_effect=[False, True, True])
with patch.object(state, '_check_pillar', mock):
with patch.dict(state.__pillar__, {"_errors": "E"}):
mock = MagicMock(side_effect=[['E'], None, None])
with patch.object(state, '_get_pillar_errors', mock):
with patch.dict(state.__pillar__, {"_errors": ['E']}):
self.assertListEqual(state.top("reverse_top.sls"), ret)
with patch.dict(state.__opts__, {"test": "A"}):
@ -857,14 +857,10 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
True),
["A"])
mock = MagicMock(side_effect=[False,
True,
True,
True,
True])
with patch.object(state, '_check_pillar', mock):
mock = MagicMock(side_effect=[['E', '1'], None, None, None, None])
with patch.object(state, '_get_pillar_errors', mock):
with patch.dict(state.__context__, {"retcode": 5}):
with patch.dict(state.__pillar__, {"_errors": "E1"}):
with patch.dict(state.__pillar__, {"_errors": ['E', '1']}):
self.assertListEqual(state.sls("core,edit.vim dev",
None,
None,
@ -979,3 +975,62 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
MockJson.flag = False
with patch('salt.utils.fopen', mock_open()):
self.assertTrue(state.pkg(tar_file, 0, "md5"))
def test_get_pillar_errors_CC(self):
'''
Test _get_pillar_errors function.
CC: External clean, Internal clean
:return:
'''
for int_pillar, ext_pillar in [({'foo': 'bar'}, {'fred': 'baz'}),
({'foo': 'bar'}, None),
({}, {'fred': 'baz'})]:
with patch('salt.modules.state.__pillar__', int_pillar):
for opts, res in [({'force': True}, None),
({'force': False}, None),
({}, None)]:
assert res == state._get_pillar_errors(kwargs=opts, pillar=ext_pillar)
def test_get_pillar_errors_EC(self):
'''
Test _get_pillar_errors function.
EC: External erroneous, Internal clean
:return:
'''
errors = ['failure', 'everywhere']
for int_pillar, ext_pillar in [({'foo': 'bar'}, {'fred': 'baz', '_errors': errors}),
({}, {'fred': 'baz', '_errors': errors})]:
with patch('salt.modules.state.__pillar__', int_pillar):
for opts, res in [({'force': True}, None),
({'force': False}, errors),
({}, errors)]:
assert res == state._get_pillar_errors(kwargs=opts, pillar=ext_pillar)
def test_get_pillar_errors_EE(self):
'''
Test _get_pillar_errors function.
CC: External erroneous, Internal erroneous
:return:
'''
errors = ['failure', 'everywhere']
for int_pillar, ext_pillar in [({'foo': 'bar', '_errors': errors}, {'fred': 'baz', '_errors': errors})]:
with patch('salt.modules.state.__pillar__', int_pillar):
for opts, res in [({'force': True}, None),
({'force': False}, errors),
({}, errors)]:
assert res == state._get_pillar_errors(kwargs=opts, pillar=ext_pillar)
def test_get_pillar_errors_CE(self):
'''
Test _get_pillar_errors function.
CC: External clean, Internal erroneous
:return:
'''
errors = ['failure', 'everywhere']
for int_pillar, ext_pillar in [({'foo': 'bar', '_errors': errors}, {'fred': 'baz'}),
({'foo': 'bar', '_errors': errors}, None)]:
with patch('salt.modules.state.__pillar__', int_pillar):
for opts, res in [({'force': True}, None),
({'force': False}, errors),
({}, errors)]:
assert res == state._get_pillar_errors(kwargs=opts, pillar=ext_pillar)

View file

@ -0,0 +1,51 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Shane Lee <slee@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
import os
# Import Salt Testing Libs
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.win_file as win_file
from salt.exceptions import CommandExecutionError
import salt.utils
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WinFileTestCase(TestCase):
'''
Test cases for salt.modules.win_file
'''
FAKE_RET = {'fake': 'ret data'}
if salt.utils.is_windows():
FAKE_PATH = os.sep.join(['C:', 'path', 'does', 'not', 'exist'])
else:
FAKE_PATH = os.sep.join(['path', 'does', 'not', 'exist'])
def test_issue_43328_stats(self):
'''
Make sure that a CommandExecutionError is raised if the file does NOT
exist
'''
with patch('os.path.exists', return_value=False):
self.assertRaises(CommandExecutionError,
win_file.stats,
self.FAKE_PATH)
def test_issue_43328_check_perms_no_ret(self):
'''
Make sure that a CommandExecutionError is raised if the file does NOT
exist
'''
with patch('os.path.exists', return_value=False):
self.assertRaises(
CommandExecutionError, win_file.check_perms, self.FAKE_PATH)

View file

@ -577,7 +577,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin):
'file.copy': mock_cp,
'file.manage_file': mock_ex,
'cmd.run_all': mock_cmd_fail}):
comt = ('Must provide name to file.managed')
comt = ('Destination file name is required')
ret.update({'comment': comt, 'name': '', 'pchanges': {}})
self.assertDictEqual(filestate.managed(''), ret)
@ -743,7 +743,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin):
mock_check = MagicMock(return_value=(
None,
'The directory "{0}" will be changed'.format(name),
{'directory': 'new'}))
{name: {'directory': 'new'}}))
mock_error = CommandExecutionError
with patch.dict(filestate.__salt__, {'config.manage_mode': mock_t,
'file.user_to_uid': mock_uid,
@ -801,16 +801,15 @@ class TestFileState(TestCase, LoaderModuleMockMixin):
group=group),
ret)
with patch.object(os.path, 'isfile', mock_f):
with patch.object(os.path, 'isdir', mock_f):
with patch.dict(filestate.__opts__, {'test': True}):
if salt.utils.is_windows():
comt = 'The directory "{0}" will be changed' \
''.format(name)
p_chg = {'directory': 'new'}
else:
comt = ('The following files will be changed:\n{0}:'
' directory - new\n'.format(name))
p_chg = {'/etc/grub.conf': {'directory': 'new'}}
p_chg = {'/etc/grub.conf': {'directory': 'new'}}
ret.update({
'comment': comt,
'result': None,

View file

@ -131,3 +131,51 @@ class MinionTestCase(TestCase):
self.assertEqual(minion.jid_queue, [456, 789])
finally:
minion.destroy()
def test_beacons_before_connect(self):
'''
Tests that the 'beacons_before_connect' option causes the beacons to be initialized before connect.
'''
with patch('salt.minion.Minion.ctx', MagicMock(return_value={})), \
patch('salt.minion.Minion.sync_connect_master', MagicMock(side_effect=RuntimeError('stop execution'))), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True)), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True)):
mock_opts = copy.copy(salt.config.DEFAULT_MINION_OPTS)
mock_opts['beacons_before_connect'] = True
try:
minion = salt.minion.Minion(mock_opts, io_loop=tornado.ioloop.IOLoop())
try:
minion.tune_in(start=True)
except RuntimeError:
pass
# Make sure beacons are initialized but the sheduler is not
self.assertTrue('beacons' in minion.periodic_callbacks)
self.assertTrue('schedule' not in minion.periodic_callbacks)
finally:
minion.destroy()
def test_scheduler_before_connect(self):
'''
Tests that the 'scheduler_before_connect' option causes the scheduler to be initialized before connect.
'''
with patch('salt.minion.Minion.ctx', MagicMock(return_value={})), \
patch('salt.minion.Minion.sync_connect_master', MagicMock(side_effect=RuntimeError('stop execution'))), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True)), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True)):
mock_opts = copy.copy(salt.config.DEFAULT_MINION_OPTS)
mock_opts['scheduler_before_connect'] = True
try:
minion = salt.minion.Minion(mock_opts, io_loop=tornado.ioloop.IOLoop())
try:
minion.tune_in(start=True)
except RuntimeError:
pass
# Make sure the scheduler is initialized but the beacons are not
self.assertTrue('schedule' in minion.periodic_callbacks)
self.assertTrue('beacons' not in minion.periodic_callbacks)
finally:
minion.destroy()