Merge branch 'develop' into storage_policies-gh

This commit is contained in:
Nicole Thomas 2017-09-26 13:21:56 -04:00 committed by GitHub
commit c51891c939
31 changed files with 2522 additions and 157 deletions

View file

@ -689,6 +689,12 @@
# for a full explanation.
#multiprocessing: True
# Limit the maximum amount of processes or threads created by salt-minion.
# This is useful to avoid resource exhaustion in case the minion receives more
# publications than it is able to handle, as it limits the number of spawned
# processes or threads. -1 is the default and disables the limit.
#process_count_max: -1
##### Logging settings #####
##########################################

View file

@ -2419,6 +2419,23 @@ executed in a thread.
multiprocessing: True
.. conf_minion:: process_count_max
``process_count_max``
-------
.. versionadded:: Oxygen
Default: ``-1``
Limit the maximum amount of processes or threads created by ``salt-minion``.
This is useful to avoid resource exhaustion in case the minion receives more
publications than it is able to handle, as it limits the number of spawned
processes or threads. ``-1`` is the default and disables the limit.
.. code-block:: yaml
process_count_max: -1
.. _minion-logging-settings:

View file

@ -25,6 +25,9 @@ configuration:
- web*:
- test.*
- pkg.*
# Allow managers to use saltutil module functions
manager_.*:
- saltutil.*
Permission Issues
-----------------

View file

@ -1,5 +1,5 @@
salt.runners.auth module
========================
salt.runners.auth
=================
.. automodule:: salt.runners.auth
:members:

View file

@ -1,5 +1,5 @@
salt.runners.event module
=========================
salt.runners.event
==================
.. automodule:: salt.runners.event
:members:

View file

@ -1,5 +1,5 @@
salt.runners.smartos_vmadm module
=================================
salt.runners.smartos_vmadm
==========================
.. automodule:: salt.runners.smartos_vmadm
:members:

View file

@ -1,5 +1,5 @@
salt.runners.vistara module
===========================
salt.runners.vistara
====================
.. automodule:: salt.runners.vistara
:members:

File diff suppressed because it is too large Load diff

View file

@ -369,46 +369,13 @@ class LoadAuth(object):
eauth_config = self.opts['external_auth'][eauth]
if not groups:
groups = []
group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups
# First we need to know if the user is allowed to proceed via any of their group memberships.
group_auth_match = False
for group_config in group_perm_keys:
if group_config.rstrip('%') in groups:
group_auth_match = True
break
# If a group_auth_match is set it means only that we have a
# user which matches at least one or more of the groups defined
# in the configuration file.
external_auth_in_db = False
for entry in eauth_config:
if entry.startswith('^'):
external_auth_in_db = True
break
# If neither a catchall, a named membership or a group
# membership is found, there is no need to continue. Simply
# deny the user access.
if not ((name in eauth_config) |
('*' in eauth_config) |
group_auth_match | external_auth_in_db):
# Auth successful, but no matching user found in config
log.warning('Authorization failure occurred.')
return None
# We now have an authenticated session and it is time to determine
# what the user has access to.
auth_list = []
if name in eauth_config:
auth_list = eauth_config[name]
elif '*' in eauth_config:
auth_list = eauth_config['*']
if group_auth_match:
auth_list = self.ckminions.fill_auth_list_from_groups(
eauth_config,
groups,
auth_list)
auth_list = self.ckminions.fill_auth_list(
eauth_config,
name,
groups)
auth_list = self.__process_acl(load, auth_list)

View file

@ -481,18 +481,17 @@ def list_(bank):
Lists entries stored in the specified bank.
'''
redis_server = _get_redis_server()
bank_keys_redis_key = _get_bank_keys_redis_key(bank)
bank_keys = None
bank_redis_key = _get_bank_redis_key(bank)
try:
bank_keys = redis_server.smembers(bank_keys_redis_key)
banks = redis_server.smembers(bank_redis_key)
except (RedisConnectionError, RedisResponseError) as rerr:
mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key,
mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key,
rerr=rerr)
log.error(mesg)
raise SaltCacheError(mesg)
if not bank_keys:
if not banks:
return []
return list(bank_keys)
return list(banks)
def contains(bank, key):
@ -500,15 +499,11 @@ def contains(bank, key):
Checks if the specified bank contains the specified key.
'''
redis_server = _get_redis_server()
bank_keys_redis_key = _get_bank_keys_redis_key(bank)
bank_keys = None
bank_redis_key = _get_bank_redis_key(bank)
try:
bank_keys = redis_server.smembers(bank_keys_redis_key)
return redis_server.sismember(bank_redis_key, key)
except (RedisConnectionError, RedisResponseError) as rerr:
mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key,
mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key,
rerr=rerr)
log.error(mesg)
raise SaltCacheError(mesg)
if not bank_keys:
return False
return key in bank_keys

View file

@ -3543,16 +3543,15 @@ def list_nodes_min(location=None, call=None):
for instance in instances:
if isinstance(instance['instancesSet']['item'], list):
for item in instance['instancesSet']['item']:
state = item['instanceState']['name']
name = _extract_name_tag(item)
id = item['instanceId']
items = instance['instancesSet']['item']
else:
item = instance['instancesSet']['item']
items = [instance['instancesSet']['item']]
for item in items:
state = item['instanceState']['name']
name = _extract_name_tag(item)
id = item['instanceId']
ret[name] = {'state': state, 'id': id}
ret[name] = {'state': state, 'id': id}
return ret

View file

@ -101,7 +101,7 @@ __virtualname__ = 'libvirt'
log = logging.getLogger(__name__)
def libvirt_error_handler(ctx, error):
def libvirt_error_handler(ctx, error): # pylint: disable=unused-argument
'''
Redirect stderr prints from libvirt to salt logging.
'''

View file

@ -337,6 +337,9 @@ VALID_OPTS = {
# Whether or not processes should be forked when needed. The alternative is to use threading.
'multiprocessing': bool,
# Maximum number of concurrently active processes at any given point in time
'process_count_max': int,
# Whether or not the salt minion should run scheduled mine updates
'mine_enabled': bool,
@ -746,6 +749,10 @@ VALID_OPTS = {
'fileserver_limit_traversal': bool,
'fileserver_verify_config': bool,
# Optionally apply '*' permissioins to any user. By default '*' is a fallback case that is
# applied only if the user didn't matched by other matchers.
'permissive_acl': bool,
# Optionally enables keeping the calculated user's auth list in the token file.
'keep_acl_in_token': bool,
@ -1258,6 +1265,7 @@ DEFAULT_MINION_OPTS = {
'auto_accept': True,
'autosign_timeout': 120,
'multiprocessing': True,
'process_count_max': -1,
'mine_enabled': True,
'mine_return_job': False,
'mine_interval': 60,
@ -1526,6 +1534,7 @@ DEFAULT_MASTER_OPTS = {
'external_auth': {},
'token_expire': 43200,
'token_expire_user_override': False,
'permissive_acl': False,
'keep_acl_in_token': False,
'eauth_acl_module': '',
'eauth_tokens': 'localfs',

View file

@ -170,6 +170,14 @@ def clean_old_jobs(opts):
def mk_key(opts, user):
if HAS_PWD:
uid = None
try:
uid = pwd.getpwnam(user).pw_uid
except KeyError:
# User doesn't exist in the system
if opts['client_acl_verify']:
return None
if salt.utils.platform.is_windows():
# The username may contain '\' if it is in Windows
# 'DOMAIN\username' format. Fix this for the keyfile path.
@ -197,9 +205,9 @@ def mk_key(opts, user):
# Write access is necessary since on subsequent runs, if the file
# exists, it needs to be written to again. Windows enforces this.
os.chmod(keyfile, 0o600)
if HAS_PWD:
if HAS_PWD and uid is not None:
try:
os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1)
os.chown(keyfile, uid, -1)
except OSError:
# The master is not being run as root and can therefore not
# chown the key file
@ -214,27 +222,26 @@ def access_keys(opts):
'''
# TODO: Need a way to get all available users for systems not supported by pwd module.
# For now users pattern matching will not work for publisher_acl.
users = []
keys = {}
publisher_acl = opts['publisher_acl']
acl_users = set(publisher_acl.keys())
if opts.get('user'):
acl_users.add(opts['user'])
acl_users.add(salt.utils.get_user())
for user in acl_users:
log.info('Preparing the %s key for local communication', user)
key = mk_key(opts, user)
if key is not None:
keys[user] = key
# Check other users matching ACL patterns
if opts['client_acl_verify'] and HAS_PWD:
log.profile('Beginning pwd.getpwall() call in masterarpi access_keys function')
for user in pwd.getpwall():
users.append(user.pw_name)
log.profile('End pwd.getpwall() call in masterarpi access_keys function')
for user in acl_users:
log.info('Preparing the %s key for local communication', user)
keys[user] = mk_key(opts, user)
# Check other users matching ACL patterns
if HAS_PWD:
for user in users:
user = user.pw_name
if user not in keys and salt.utils.check_whitelist_blacklist(user, whitelist=acl_users):
keys[user] = mk_key(opts, user)
log.profile('End pwd.getpwall() call in masterarpi access_keys function')
return keys

View file

@ -16,6 +16,7 @@ import os
import json
import socket
import sys
import glob
import re
import platform
import logging
@ -65,6 +66,7 @@ __salt__ = {
'cmd.run_all': salt.modules.cmdmod._run_all_quiet,
'smbios.records': salt.modules.smbios.records,
'smbios.get': salt.modules.smbios.get,
'cmd.run_ps': salt.modules.cmdmod.powershell,
}
log = logging.getLogger(__name__)
@ -2472,3 +2474,119 @@ def default_gateway():
except Exception as exc:
pass
return grains
def fc_wwn():
'''
Return list of fiber channel HBA WWNs
'''
grains = {}
grains['fc_wwn'] = False
if salt.utils.platform.is_linux():
grains['fc_wwn'] = _linux_wwns()
elif salt.utils.platform.is_windows():
grains['fc_wwn'] = _windows_wwns()
return grains
def iscsi_iqn():
'''
Return iSCSI IQN
'''
grains = {}
grains['iscsi_iqn'] = False
if salt.utils.platform.is_linux():
grains['iscsi_iqn'] = _linux_iqn()
elif salt.utils.platform.is_windows():
grains['iscsi_iqn'] = _windows_iqn()
elif salt.utils.platform.is_aix():
grains['iscsi_iqn'] = _aix_iqn()
return grains
def _linux_iqn():
'''
Return iSCSI IQN from a Linux host.
'''
ret = []
initiator = '/etc/iscsi/initiatorname.iscsi'
if os.path.isfile(initiator):
with salt.utils.files.fopen(initiator, 'r') as _iscsi:
for line in _iscsi:
if line.find('InitiatorName') != -1:
iqn = line.split('=')
ret.extend([iqn[1]])
return ret
def _aix_iqn():
'''
Return iSCSI IQN from an AIX host.
'''
ret = []
aixcmd = 'lsattr -E -l iscsi0 | grep initiator_name'
aixret = __salt__['cmd.run'](aixcmd)
if aixret[0].isalpha():
iqn = aixret.split()
ret.extend([iqn[1]])
return ret
def _linux_wwns():
'''
Return Fibre Channel port WWNs from a Linux host.
'''
ret = []
for fcfile in glob.glob('/sys/class/fc_host/*/port_name'):
with salt.utils.files.fopen(fcfile, 'r') as _wwn:
for line in _wwn:
ret.extend([line[2:]])
return ret
def _windows_iqn():
'''
Return iSCSI IQN from a Windows host.
'''
ret = []
wmic = salt.utils.path.which('wmic')
if not wmic:
return ret
namespace = r'\\root\WMI'
mspath = 'MSiSCSIInitiator_MethodClass'
get = 'iSCSINodeName'
cmdret = __salt__['cmd.run_all'](
'{0} /namespace:{1} path {2} get {3} /format:table'.format(
wmic, namespace, mspath, get))
for line in cmdret['stdout'].splitlines():
if line[0].isalpha():
continue
ret.extend([line])
return ret
def _windows_wwns():
'''
Return Fibre Channel port WWNs from a Windows host.
'''
ps_cmd = r'Get-WmiObject -class MSFC_FibrePortHBAAttributes -namespace "root\WMI" | Select -Expandproperty Attributes | %{($_.PortWWN | % {"{0:x2}" -f $_}) -join ""}'
ret = []
cmdret = __salt__['cmd.run_ps'](ps_cmd)
for line in cmdret:
ret.append(line)
return ret

View file

@ -1333,6 +1333,7 @@ class Minion(MinionBase):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
@tornado.gen.coroutine
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
@ -1365,6 +1366,15 @@ class Minion(MinionBase):
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get('process_count_max')
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
log.warn("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
yield tornado.gen.sleep(10)
process_count = len(salt.utils.minion.running(self.opts))
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other

View file

@ -599,9 +599,14 @@ def exists(vpc_id=None, name=None, cidr=None, tags=None, region=None, key=None,
try:
vpc_ids = _find_vpcs(vpc_id=vpc_id, vpc_name=name, cidr=cidr, tags=tags,
region=region, key=key, keyid=keyid, profile=profile)
return {'exists': bool(vpc_ids)}
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
except BotoServerError as err:
boto_err = salt.utils.boto.get_error(err)
if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound':
# VPC was not found: handle the error and return False.
return {'exists': False}
return {'error': boto_err}
return {'exists': bool(vpc_ids)}
def create(cidr_block, instance_tenancy=None, vpc_name=None,
@ -723,27 +728,34 @@ def describe(vpc_id=None, vpc_name=None, region=None, key=None,
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
if not vpc_id:
except BotoServerError as err:
boto_err = salt.utils.boto.get_error(err)
if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound':
# VPC was not found: handle the error and return None.
return {'vpc': None}
return {'error': boto_err}
filter_parameters = {'vpc_ids': vpc_id}
if not vpc_id:
return {'vpc': None}
filter_parameters = {'vpc_ids': vpc_id}
try:
vpcs = conn.get_all_vpcs(**filter_parameters)
except BotoServerError as err:
return {'error': salt.utils.boto.get_error(err)}
if vpcs:
vpc = vpcs[0] # Found!
log.debug('Found VPC: {0}'.format(vpc.id))
if vpcs:
vpc = vpcs[0] # Found!
log.debug('Found VPC: {0}'.format(vpc.id))
keys = ('id', 'cidr_block', 'is_default', 'state', 'tags',
'dhcp_options_id', 'instance_tenancy')
_r = dict([(k, getattr(vpc, k)) for k in keys])
_r.update({'region': getattr(vpc, 'region').name})
return {'vpc': _r}
else:
return {'vpc': None}
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
keys = ('id', 'cidr_block', 'is_default', 'state', 'tags',
'dhcp_options_id', 'instance_tenancy')
_r = dict([(k, getattr(vpc, k)) for k in keys])
_r.update({'region': getattr(vpc, 'region').name})
return {'vpc': _r}
else:
return {'vpc': None}
def describe_vpcs(vpc_id=None, name=None, cidr=None, tags=None,
@ -809,7 +821,7 @@ def _find_subnets(subnet_name=None, vpc_id=None, cidr=None, tags=None, conn=None
Given subnet properties, find and return matching subnet ids
'''
if not any(subnet_name, tags, cidr):
if not any([subnet_name, tags, cidr]):
raise SaltInvocationError('At least one of the following must be '
'specified: subnet_name, cidr or tags.')
@ -927,34 +939,38 @@ def subnet_exists(subnet_id=None, name=None, subnet_name=None, cidr=None,
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
filter_parameters = {'filters': {}}
except BotoServerError as err:
return {'error': salt.utils.boto.get_error(err)}
if subnet_id:
filter_parameters['subnet_ids'] = [subnet_id]
if subnet_name:
filter_parameters['filters']['tag:Name'] = subnet_name
if cidr:
filter_parameters['filters']['cidr'] = cidr
if tags:
for tag_name, tag_value in six.iteritems(tags):
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
if zones:
filter_parameters['filters']['availability_zone'] = zones
filter_parameters = {'filters': {}}
if subnet_id:
filter_parameters['subnet_ids'] = [subnet_id]
if subnet_name:
filter_parameters['filters']['tag:Name'] = subnet_name
if cidr:
filter_parameters['filters']['cidr'] = cidr
if tags:
for tag_name, tag_value in six.iteritems(tags):
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
if zones:
filter_parameters['filters']['availability_zone'] = zones
try:
subnets = conn.get_all_subnets(**filter_parameters)
log.debug('The filters criteria {0} matched the following subnets:{1}'.format(filter_parameters, subnets))
if subnets:
log.info('Subnet {0} exists.'.format(subnet_name or subnet_id))
return {'exists': True}
else:
log.info('Subnet {0} does not exist.'.format(subnet_name or subnet_id))
except BotoServerError as err:
boto_err = salt.utils.boto.get_error(err)
if boto_err.get('aws', {}).get('code') == 'InvalidSubnetID.NotFound':
# Subnet was not found: handle the error and return False.
return {'exists': False}
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
return {'error': boto_err}
log.debug('The filters criteria {0} matched the following subnets:{1}'.format(filter_parameters, subnets))
if subnets:
log.info('Subnet {0} exists.'.format(subnet_name or subnet_id))
return {'exists': True}
else:
log.info('Subnet {0} does not exist.'.format(subnet_name or subnet_id))
return {'exists': False}
def get_subnet_association(subnets, region=None, key=None, keyid=None,

View file

@ -4281,7 +4281,8 @@ def extract_hash(hash_fn,
def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False):
'''
Check the permissions on files, modify attributes and chown if needed
Check the permissions on files, modify attributes and chown if needed. File
attributes are only verified if lsattr(1) is installed.
CLI Example:
@ -4293,6 +4294,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False)
``follow_symlinks`` option added
'''
name = os.path.expanduser(name)
lsattr_cmd = salt.utils.path.which('lsattr')
if not ret:
ret = {'name': name,
@ -4318,7 +4320,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False)
perms['lmode'] = salt.utils.normalize_mode(cur['mode'])
is_dir = os.path.isdir(name)
if not salt.utils.platform.is_windows() and not is_dir:
if not salt.utils.platform.is_windows() and not is_dir and lsattr_cmd:
# List attributes on file
perms['lattrs'] = ''.join(lsattr(name)[name])
# Remove attributes on file so changes can be enforced.
@ -4429,7 +4431,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False)
if __opts__['test'] is True and ret['changes']:
ret['result'] = None
if not salt.utils.platform.is_windows() and not is_dir:
if not salt.utils.platform.is_windows() and not is_dir and lsattr_cmd:
# Replace attributes on file if it had been removed
if perms['lattrs']:
chattr(name, operator='add', attributes=perms['lattrs'])

View file

@ -132,7 +132,7 @@ def procs():
uind = 0
pind = 0
cind = 0
plines = __salt__['cmd.run'](__grains__['ps']).splitlines()
plines = __salt__['cmd.run'](__grains__['ps'], python_shell=True).splitlines()
guide = plines.pop(0).split()
if 'USER' in guide:
uind = guide.index('USER')
@ -1417,7 +1417,7 @@ def pid(sig):
'''
cmd = __grains__['ps']
output = __salt__['cmd.run_stdout'](cmd)
output = __salt__['cmd.run_stdout'](cmd, python_shell=True)
pids = ''
for line in output.splitlines():

View file

@ -110,7 +110,7 @@ def available(software=True,
Include software updates in the results (default is True)
drivers (bool):
Include driver updates in the results (default is False)
Include driver updates in the results (default is True)
summary (bool):
- True: Return a summary of updates available for each category.

View file

@ -1347,6 +1347,7 @@ def install(name=None,
to_install = []
to_downgrade = []
to_reinstall = []
_available = {}
# The above three lists will be populated with tuples containing the
# package name and the string being used for this particular package
# modification. The reason for this method is that the string we use for

View file

@ -77,6 +77,9 @@ def __virtual__():
) == 0:
return 'zfs'
if __grains__['kernel'] == 'OpenBSD':
return False
_zfs_fuse = lambda f: __salt__['service.' + f]('zfs-fuse')
if _zfs_fuse('available') and (_zfs_fuse('status') or _zfs_fuse('start')):
return 'zfs'

View file

@ -0,0 +1,163 @@
# -*- coding: utf-8 -*-
'''
Provide external pillar data from RethinkDB
.. versionadded:: Oxygen
:depends: rethinkdb (on the salt-master)
salt master rethinkdb configuration
===================================
These variables must be configured in your master configuration file.
* ``rethinkdb.host`` - The RethinkDB server. Defaults to ``'salt'``
* ``rethinkdb.port`` - The port the RethinkDB server listens on.
Defaults to ``'28015'``
* ``rethinkdb.database`` - The database to connect to.
Defaults to ``'salt'``
* ``rethinkdb.username`` - The username for connecting to RethinkDB.
Defaults to ``''``
* ``rethinkdb.password`` - The password for connecting to RethinkDB.
Defaults to ``''``
salt-master ext_pillar configuration
====================================
The ext_pillar function arguments are given in single line dictionary notation.
.. code-block:: yaml
ext_pillar:
- rethinkdb: {table: ext_pillar, id_field: minion_id, field: pillar_root, pillar_key: external_pillar}
In the example above the following happens.
* The salt-master will look for external pillars in the 'ext_pillar' table
on the RethinkDB host
* The minion id will be matched against the 'minion_id' field
* Pillars will be retrieved from the nested field 'pillar_root'
* Found pillars will be merged inside a key called 'external_pillar'
Module Documentation
====================
'''
from __future__ import absolute_import
# Import python libraries
import logging
# Import 3rd party libraries
try:
import rethinkdb
HAS_RETHINKDB = True
except ImportError:
HAS_RETHINKDB = False
__virtualname__ = 'rethinkdb'
__opts__ = {
'rethinkdb.host': 'salt',
'rethinkdb.port': '28015',
'rethinkdb.database': 'salt',
'rethinkdb.username': None,
'rethinkdb.password': None
}
def __virtual__():
if not HAS_RETHINKDB:
return False
return True
# Configure logging
log = logging.getLogger(__name__)
def ext_pillar(minion_id,
pillar,
table='pillar',
id_field=None,
field=None,
pillar_key=None):
'''
Collect minion external pillars from a RethinkDB database
Arguments:
* `table`: The RethinkDB table containing external pillar information.
Defaults to ``'pillar'``
* `id_field`: Field in document containing the minion id.
If blank then we assume the table index matches minion ids
* `field`: Specific field in the document used for pillar data, if blank
then the entire document will be used
* `pillar_key`: The salt-master will nest found external pillars under
this key before merging into the minion pillars. If blank, external
pillars will be merged at top level
'''
host = __opts__['rethinkdb.host']
port = __opts__['rethinkdb.port']
database = __opts__['rethinkdb.database']
username = __opts__['rethinkdb.username']
password = __opts__['rethinkdb.password']
log.debug('Connecting to {0}:{1} as user \'{2}\' for RethinkDB ext_pillar'
.format(host, port, username))
# Connect to the database
conn = rethinkdb.connect(host=host,
port=port,
db=database,
user=username,
password=password)
data = None
try:
if id_field:
log.debug('ext_pillar.rethinkdb: looking up pillar. '
'table: {0}, field: {1}, minion: {2}'.format(
table, id_field, minion_id))
if field:
data = rethinkdb.table(table).filter(
{id_field: minion_id}).pluck(field).run(conn)
else:
data = rethinkdb.table(table).filter(
{id_field: minion_id}).run(conn)
else:
log.debug('ext_pillar.rethinkdb: looking up pillar. '
'table: {0}, field: id, minion: {1}'.format(
table, minion_id))
if field:
data = rethinkdb.table(table).get(minion_id).pluck(field).run(
conn)
else:
data = rethinkdb.table(table).get(minion_id).run(conn)
finally:
if conn.is_open():
conn.close()
if data.items:
# Return nothing if multiple documents are found for a minion
if len(data.items) > 1:
log.error('ext_pillar.rethinkdb: ambiguous documents found for '
'minion {0}'.format(minion_id))
return {}
else:
result = data.items.pop()
if pillar_key:
return {pillar_key: result}
return result
else:
# No document found in the database
log.debug('ext_pillar.rethinkdb: no document found')
return {}

View file

@ -414,7 +414,7 @@ def extracted(name,
.. versionadded:: 2017.7.3
keep : True
Same as ``keep_source``.
Same as ``keep_source``, kept for backward-compatibility.
.. note::
If both ``keep_source`` and ``keep`` are used, ``keep`` will be
@ -648,6 +648,21 @@ def extracted(name,
# Remove pub kwargs as they're irrelevant here.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if 'keep_source' in kwargs and 'keep' in kwargs:
ret.setdefault('warnings', []).append(
'Both \'keep_source\' and \'keep\' were used. Since these both '
'do the same thing, \'keep\' was ignored.'
)
keep_source = bool(kwargs.pop('keep_source'))
kwargs.pop('keep')
elif 'keep_source' in kwargs:
keep_source = bool(kwargs.pop('keep_source'))
elif 'keep' in kwargs:
keep_source = bool(kwargs.pop('keep'))
else:
# Neither was passed, default is True
keep_source = True
if 'keep_source' in kwargs and 'keep' in kwargs:
ret.setdefault('warnings', []).append(
'Both \'keep_source\' and \'keep\' were used. Since these both '

View file

@ -6637,6 +6637,28 @@ def cached(name,
else:
pre_hash = None
def _try_cache(path, checksum):
'''
This helper is not needed anymore in develop as the fileclient in the
develop branch now has means of skipping a download if the existing
hash matches one passed to cp.cache_file. Remove this helper and the
code that invokes it, once we have merged forward into develop.
'''
if not path or not checksum:
return True
form = salt.utils.files.HASHES_REVMAP.get(len(checksum))
if form is None:
# Shouldn't happen, an invalid checksum length should be caught
# before we get here. But in the event this gets through, don't let
# it cause any trouble, and just return True.
return True
try:
return salt.utils.get_hash(path, form=form) != checksum
except (IOError, OSError, ValueError):
# Again, shouldn't happen, but don't let invalid input/permissions
# in the call to get_hash blow this up.
return True
# Cache the file. Note that this will not actually download the file if
# either of the following is true:
# 1. source is a salt:// URL and the fileserver determines that the hash
@ -6645,14 +6667,18 @@ def cached(name,
# matches the cached copy.
# Remote, non salt:// sources _will_ download if a copy of the file was
# not already present in the minion cache.
try:
local_copy = __salt__['cp.cache_file'](
name,
saltenv=saltenv,
source_hash=source_sum.get('hsum'))
except Exception as exc:
ret['comment'] = exc.__str__()
return ret
if _try_cache(local_copy, source_sum.get('hsum')):
# The _try_cache helper is obsolete in the develop branch. Once merged
# forward, remove the helper as well as this if statement, and dedent
# the below block.
try:
local_copy = __salt__['cp.cache_file'](
name,
saltenv=saltenv,
source_hash=source_sum.get('hsum'))
except Exception as exc:
ret['comment'] = exc.__str__()
return ret
if not local_copy:
ret['comment'] = (

View file

@ -84,10 +84,12 @@ def installed(name, updates=None):
Args:
name (str): The identifier of a single update to install.
name (str):
The identifier of a single update to install.
updates (list): A list of identifiers for updates to be installed.
Overrides ``name``. Default is None.
updates (list):
A list of identifiers for updates to be installed. Overrides
``name``. Default is None.
.. note:: Identifiers can be the GUID, the KB number, or any part of the
Title of the Microsoft update. GUIDs and KBs are the preferred method
@ -121,7 +123,7 @@ def installed(name, updates=None):
# Install multiple updates
install_updates:
wua.installed:
- name:
- updates:
- KB3194343
- 28cf1b09-2b1a-458c-9bd1-971d1b26b211
'''
@ -215,10 +217,12 @@ def removed(name, updates=None):
Args:
name (str): The identifier of a single update to uninstall.
name (str):
The identifier of a single update to uninstall.
updates (list): A list of identifiers for updates to be removed.
Overrides ``name``. Default is None.
updates (list):
A list of identifiers for updates to be removed. Overrides ``name``.
Default is None.
.. note:: Identifiers can be the GUID, the KB number, or any part of the
Title of the Microsoft update. GUIDs and KBs are the preferred method
@ -329,3 +333,172 @@ def removed(name, updates=None):
ret['comment'] = 'Updates removed successfully'
return ret
def uptodate(name,
software=True,
drivers=False,
skip_hidden=False,
skip_mandatory=False,
skip_reboot=True,
categories=None,
severities=None,):
'''
Ensure Microsoft Updates that match the passed criteria are installed.
Updates will be downloaded if needed.
This state allows you to update a system without specifying a specific
update to apply. All matching updates will be installed.
Args:
name (str):
The name has no functional value and is only used as a tracking
reference
software (bool):
Include software updates in the results (default is True)
drivers (bool):
Include driver updates in the results (default is False)
skip_hidden (bool):
Skip updates that have been hidden. Default is False.
skip_mandatory (bool):
Skip mandatory updates. Default is False.
skip_reboot (bool):
Skip updates that require a reboot. Default is True.
categories (list):
Specify the categories to list. Must be passed as a list. All
categories returned by default.
Categories include the following:
* Critical Updates
* Definition Updates
* Drivers (make sure you set drivers=True)
* Feature Packs
* Security Updates
* Update Rollups
* Updates
* Update Rollups
* Windows 7
* Windows 8.1
* Windows 8.1 drivers
* Windows 8.1 and later drivers
* Windows Defender
severities (list):
Specify the severities to include. Must be passed as a list. All
severities returned by default.
Severities include the following:
* Critical
* Important
Returns:
dict: A dictionary containing the results of the update
CLI Example:
.. code-block:: yaml
# Update the system using the state defaults
update_system:
wua.up_to_date
# Update the drivers
update_drivers:
wua.up_to_date:
- software: False
- drivers: True
- skip_reboot: False
# Apply all critical updates
update_critical:
wua.up_to_date:
- severities:
- Critical
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
wua = salt.utils.win_update.WindowsUpdateAgent()
available_updates = wua.available(
skip_hidden=skip_hidden, skip_installed=True,
skip_mandatory=skip_mandatory, skip_reboot=skip_reboot,
software=software, drivers=drivers, categories=categories,
severities=severities)
# No updates found
if available_updates.count() == 0:
ret['comment'] = 'No updates found'
return ret
updates = list(available_updates.list().keys())
# Search for updates
install_list = wua.search(updates)
# List of updates to download
download = salt.utils.win_update.Updates()
for item in install_list.updates:
if not salt.utils.is_true(item.IsDownloaded):
download.updates.Add(item)
# List of updates to install
install = salt.utils.win_update.Updates()
for item in install_list.updates:
if not salt.utils.is_true(item.IsInstalled):
install.updates.Add(item)
# Return comment of changes if test.
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Updates will be installed:'
for update in install.updates:
ret['comment'] += '\n'
ret['comment'] += ': '.join(
[update.Identity.UpdateID, update.Title])
return ret
# Download updates
wua.download(download)
# Install updates
wua.install(install)
# Refresh windows update info
wua.refresh()
post_info = wua.updates().list()
# Verify the installation
for item in install.list():
if not salt.utils.is_true(post_info[item]['Installed']):
ret['changes']['failed'] = {
item: {'Title': post_info[item]['Title'][:40] + '...',
'KBs': post_info[item]['KBs']}
}
ret['result'] = False
else:
ret['changes']['installed'] = {
item: {'Title': post_info[item]['Title'][:40] + '...',
'NeedsReboot': post_info[item]['NeedsReboot'],
'KBs': post_info[item]['KBs']}
}
if ret['changes'].get('failed', False):
ret['comment'] = 'Updates failed'
else:
ret['comment'] = 'Updates installed successfully'
return ret

View file

@ -966,6 +966,31 @@ class CkMinions(object):
auth_list.append(matcher)
return auth_list
def fill_auth_list(self, auth_provider, name, groups, auth_list=None, permissive=None):
'''
Returns a list of authorisation matchers that a user is eligible for.
This list is a combination of the provided personal matchers plus the
matchers of any group the user is in.
'''
if auth_list is None:
auth_list = []
if permissive is None:
permissive = self.opts.get('permissive_acl')
name_matched = False
for match in auth_provider:
if match == '*' and not permissive:
continue
if match.endswith('%'):
if match.rstrip('%') in groups:
auth_list.extend(auth_provider[match])
else:
if salt.utils.expr_match(match, name):
name_matched = True
auth_list.extend(auth_provider[match])
if not permissive and not name_matched and '*' in auth_provider:
auth_list.extend(auth_provider['*'])
return auth_list
def wheel_check(self, auth_list, fun, args):
'''
Check special API permissions
@ -982,6 +1007,8 @@ class CkMinions(object):
'''
Check special API permissions
'''
if not auth_list:
return False
if form != 'cloud':
comps = fun.split('.')
if len(comps) != 2:

View file

@ -63,7 +63,7 @@ class LocalFuncsTestCase(TestCase):
u'message': u'A command invocation error occurred: Check syntax.'}}
with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])):
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])):
ret = self.local_funcs.runner(load)
self.assertDictEqual(mock_ret, ret)
@ -93,7 +93,7 @@ class LocalFuncsTestCase(TestCase):
self.assertDictEqual(mock_ret, ret)
def test_runner_eauth_salt_invocation_errpr(self):
def test_runner_eauth_salt_invocation_error(self):
'''
Asserts that an EauthAuthenticationError is returned when the user authenticates, but the
command is malformed.
@ -102,7 +102,7 @@ class LocalFuncsTestCase(TestCase):
mock_ret = {u'error': {u'name': u'SaltInvocationError',
u'message': u'A command invocation error occurred: Check syntax.'}}
with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])):
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])):
ret = self.local_funcs.runner(load)
self.assertDictEqual(mock_ret, ret)
@ -146,7 +146,7 @@ class LocalFuncsTestCase(TestCase):
u'message': u'A command invocation error occurred: Check syntax.'}}
with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])):
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])):
ret = self.local_funcs.wheel(load)
self.assertDictEqual(mock_ret, ret)
@ -176,7 +176,7 @@ class LocalFuncsTestCase(TestCase):
self.assertDictEqual(mock_ret, ret)
def test_wheel_eauth_salt_invocation_errpr(self):
def test_wheel_eauth_salt_invocation_error(self):
'''
Asserts that an EauthAuthenticationError is returned when the user authenticates, but the
command is malformed.
@ -185,7 +185,7 @@ class LocalFuncsTestCase(TestCase):
mock_ret = {u'error': {u'name': u'SaltInvocationError',
u'message': u'A command invocation error occurred: Check syntax.'}}
with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])):
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])):
ret = self.local_funcs.wheel(load)
self.assertDictEqual(mock_ret, ret)

View file

@ -63,7 +63,7 @@ class ClearFuncsTestCase(TestCase):
u'message': u'A command invocation error occurred: Check syntax.'}}
with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])):
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])):
ret = self.clear_funcs.runner(clear_load)
self.assertDictEqual(mock_ret, ret)
@ -93,7 +93,7 @@ class ClearFuncsTestCase(TestCase):
self.assertDictEqual(mock_ret, ret)
def test_runner_eauth_salt_invocation_errpr(self):
def test_runner_eauth_salt_invocation_error(self):
'''
Asserts that an EauthAuthenticationError is returned when the user authenticates, but the
command is malformed.
@ -102,7 +102,7 @@ class ClearFuncsTestCase(TestCase):
mock_ret = {u'error': {u'name': u'SaltInvocationError',
u'message': u'A command invocation error occurred: Check syntax.'}}
with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])):
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])):
ret = self.clear_funcs.runner(clear_load)
self.assertDictEqual(mock_ret, ret)
@ -155,7 +155,7 @@ class ClearFuncsTestCase(TestCase):
u'message': u'A command invocation error occurred: Check syntax.'}}
with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])):
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])):
ret = self.clear_funcs.wheel(clear_load)
self.assertDictEqual(mock_ret, ret)
@ -185,7 +185,7 @@ class ClearFuncsTestCase(TestCase):
self.assertDictEqual(mock_ret, ret)
def test_wheel_eauth_salt_invocation_errpr(self):
def test_wheel_eauth_salt_invocation_error(self):
'''
Asserts that an EauthAuthenticationError is returned when the user authenticates, but the
command is malformed.
@ -194,7 +194,7 @@ class ClearFuncsTestCase(TestCase):
mock_ret = {u'error': {u'name': u'SaltInvocationError',
u'message': u'A command invocation error occurred: Check syntax.'}}
with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])):
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])):
ret = self.clear_funcs.wheel(clear_load)
self.assertDictEqual(mock_ret, ret)

View file

@ -18,6 +18,7 @@ import salt.utils.event as event
from salt.exceptions import SaltSystemExit
import salt.syspaths
import tornado
from salt.ext.six.moves import range
__opts__ = {}
@ -69,7 +70,7 @@ class MinionTestCase(TestCase):
mock_jid_queue = [123]
try:
minion = salt.minion.Minion(mock_opts, jid_queue=copy.copy(mock_jid_queue), io_loop=tornado.ioloop.IOLoop())
ret = minion._handle_decoded_payload(mock_data)
ret = minion._handle_decoded_payload(mock_data).result()
self.assertEqual(minion.jid_queue, mock_jid_queue)
self.assertIsNone(ret)
finally:
@ -98,7 +99,7 @@ class MinionTestCase(TestCase):
# Call the _handle_decoded_payload function and update the mock_jid_queue to include the new
# mock_jid. The mock_jid should have been added to the jid_queue since the mock_jid wasn't
# previously included. The minion's jid_queue attribute and the mock_jid_queue should be equal.
minion._handle_decoded_payload(mock_data)
minion._handle_decoded_payload(mock_data).result()
mock_jid_queue.append(mock_jid)
self.assertEqual(minion.jid_queue, mock_jid_queue)
finally:
@ -126,8 +127,54 @@ class MinionTestCase(TestCase):
# Call the _handle_decoded_payload function and check that the queue is smaller by one item
# and contains the new jid
minion._handle_decoded_payload(mock_data)
minion._handle_decoded_payload(mock_data).result()
self.assertEqual(len(minion.jid_queue), 2)
self.assertEqual(minion.jid_queue, [456, 789])
finally:
minion.destroy()
def test_process_count_max(self):
'''
Tests that the _handle_decoded_payload function does not spawn more than the configured amount of processes,
as per process_count_max.
'''
with patch('salt.minion.Minion.ctx', MagicMock(return_value={})), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True)), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True)), \
patch('salt.utils.minion.running', MagicMock(return_value=[])), \
patch('tornado.gen.sleep', MagicMock(return_value=tornado.concurrent.Future())):
process_count_max = 10
mock_opts = salt.config.DEFAULT_MINION_OPTS
mock_opts['minion_jid_queue_hwm'] = 100
mock_opts["process_count_max"] = process_count_max
try:
io_loop = tornado.ioloop.IOLoop()
minion = salt.minion.Minion(mock_opts, jid_queue=[], io_loop=io_loop)
# mock gen.sleep to throw a special Exception when called, so that we detect it
class SleepCalledEception(Exception):
"""Thrown when sleep is called"""
pass
tornado.gen.sleep.return_value.set_exception(SleepCalledEception())
# up until process_count_max: gen.sleep does not get called, processes are started normally
for i in range(process_count_max):
mock_data = {'fun': 'foo.bar',
'jid': i}
io_loop.run_sync(lambda data=mock_data: minion._handle_decoded_payload(data))
self.assertEqual(salt.utils.process.SignalHandlingMultiprocessingProcess.start.call_count, i + 1)
self.assertEqual(len(minion.jid_queue), i + 1)
salt.utils.minion.running.return_value += [i]
# above process_count_max: gen.sleep does get called, JIDs are created but no new processes are started
mock_data = {'fun': 'foo.bar',
'jid': process_count_max + 1}
self.assertRaises(SleepCalledEception,
lambda: io_loop.run_sync(lambda: minion._handle_decoded_payload(mock_data)))
self.assertEqual(salt.utils.process.SignalHandlingMultiprocessingProcess.start.call_count,
process_count_max)
self.assertEqual(len(minion.jid_queue), process_count_max + 1)
finally:
minion.destroy()

View file

@ -958,5 +958,47 @@ class SaltAPIParserTestCase(LogSettingsParserTests):
self.addCleanup(delattr, self, 'parser')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class DaemonMixInTestCase(TestCase):
'''
Tests the PIDfile deletion in the DaemonMixIn.
'''
def setUp(self):
'''
Setting up
'''
# Set PID
self.pid = '/some/fake.pid'
# Setup mixin
self.mixin = salt.utils.parsers.DaemonMixIn()
self.mixin.info = None
self.mixin.config = {}
self.mixin.config['pidfile'] = self.pid
def test_pid_file_deletion(self):
'''
PIDfile deletion without exception.
'''
with patch('os.unlink', MagicMock()) as os_unlink:
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch.object(self.mixin, 'info', MagicMock()):
self.mixin._mixin_before_exit()
assert self.mixin.info.call_count == 0
assert os_unlink.call_count == 1
def test_pid_file_deletion_with_oserror(self):
'''
PIDfile deletion with exception
'''
with patch('os.unlink', MagicMock(side_effect=OSError())) as os_unlink:
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch.object(self.mixin, 'info', MagicMock()):
self.mixin._mixin_before_exit()
assert os_unlink.call_count == 1
self.mixin.info.assert_called_with(
'PIDfile could not be deleted: {0}'.format(self.pid))
# Hide the class from unittest framework when it searches for TestCase classes in the module
del LogSettingsParserTests