mirror of
https://github.com/saltstack/salt.git
synced 2025-04-16 09:40:20 +00:00
Merge remote-tracking branch 'upstream/2015.2' into merge-forward-develop
Conflicts: salt/modules/mongodb.py salt/utils/http.py salt/utils/process.py tests/integration/__init__.py
This commit is contained in:
commit
befb666623
36 changed files with 928 additions and 190 deletions
|
@ -97,9 +97,10 @@ the started execution and complete.
|
|||
New in version 0.17.
|
||||
|
||||
.sp
|
||||
Choose the format of the state output. The options are \fIfull\fP,
|
||||
\fIterse\fP, \fImixed\fP, \fIchanges\fP, and \fIfilter\fP\&. Default: full
|
||||
.UNINDENT
|
||||
Override the configured \fBstate_output\fP value for minion output. One of
|
||||
\fBfull\fP, \fBterse\fP, \fBmixed\fP, \fBchanges\fP or \fBfilter\fP\&.
|
||||
Default: \fBfull\fB\&.
|
||||
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B \-\-subset=SUBSET
|
||||
|
|
|
@ -46,8 +46,9 @@ Options
|
|||
|
||||
.. versionadded:: 0.17
|
||||
|
||||
Choose the format of the state output. The options are `full`,
|
||||
`terse`, `mixed`, `changes`, and `filter`. Default: full
|
||||
Override the configured ``state_output`` value for minion output. One of
|
||||
``full``, ``terse``, ``mixed``, ``changes`` or ``filter``. Default:
|
||||
``full``.
|
||||
|
||||
.. option:: --subset=SUBSET
|
||||
|
||||
|
|
|
@ -122,6 +122,21 @@ Python's :func:`random.shuffle <python2:random.shuffle>` method.
|
|||
|
||||
master_shuffle: True
|
||||
|
||||
.. conf_minion:: retry_dns
|
||||
|
||||
``retry_dns``
|
||||
---------------
|
||||
|
||||
Default: ``30``
|
||||
|
||||
Set the number of seconds to wait before attempting to resolve
|
||||
the master hostname if name resolution fails. Defaults to 30 seconds.
|
||||
Set to zero if the minion should shutdown and not retry.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
retry_dns: 30
|
||||
|
||||
.. conf_minion:: master_port
|
||||
|
||||
``master_port``
|
||||
|
|
|
@ -19,6 +19,15 @@ Authentication events
|
|||
``reject``.
|
||||
:var pub: The minion public key.
|
||||
|
||||
|
||||
.. note:: Minions fire auth events on fairly regular basis for a number
|
||||
of reasons. Writing reactors to respond to events through
|
||||
the auth cycle can lead to infinite reactor event loops
|
||||
(minion tries to auth, reactor responds by doing something
|
||||
that generates another auth event, minion sends auth event,
|
||||
etc.). Consider reacting to ``salt/key`` or ``salt/minion/<MID>/start``
|
||||
or firing a custom event tag instead.
|
||||
|
||||
Start events
|
||||
============
|
||||
|
||||
|
@ -37,7 +46,14 @@ Key events
|
|||
|
||||
:var id: The minion ID.
|
||||
:var act: The new status of the minion key: ``accept``, ``pend``,
|
||||
``reject``.
|
||||
``reject``.
|
||||
|
||||
.. warning:: If a master is in :conf_master:`auto_accept mode`, ``salt/key`` events
|
||||
will not be fired when the keys are accepted. In addition, pre-seeding
|
||||
keys (like happens through :ref:`Salt-Cloud<salt-cloud>`) will not cause
|
||||
firing of these events.
|
||||
|
||||
|
||||
|
||||
Job events
|
||||
==========
|
||||
|
|
|
@ -6,6 +6,7 @@ After=syslog.target network.target
|
|||
Type=simple
|
||||
LimitNOFILE=8192
|
||||
ExecStart=/usr/bin/salt-minion
|
||||
KillMode=process
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
|
@ -159,6 +159,10 @@ class LoadAuth(object):
|
|||
'name': fcall['args'][0],
|
||||
'eauth': load['eauth'],
|
||||
'token': tok}
|
||||
|
||||
if 'groups' in load:
|
||||
tdata['groups'] = load['groups']
|
||||
|
||||
with salt.utils.fopen(t_path, 'w+b') as fp_:
|
||||
fp_.write(self.serial.dumps(tdata))
|
||||
return tdata
|
||||
|
|
|
@ -1337,8 +1337,13 @@ class Cloud(object):
|
|||
if not vm_overrides:
|
||||
vm_overrides = {}
|
||||
|
||||
with salt.utils.fopen(os.path.join(salt.syspaths.CONFIG_DIR, 'cloud'), 'r') as mcc:
|
||||
main_cloud_config = yaml.safe_load(mcc)
|
||||
try:
|
||||
with salt.utils.fopen(self.opts['conf_file'], 'r') as mcc:
|
||||
main_cloud_config = yaml.safe_load(mcc)
|
||||
except KeyError:
|
||||
main_cloud_config = {}
|
||||
except IOError:
|
||||
main_cloud_config = {}
|
||||
|
||||
profile_details = self.opts['profiles'][profile]
|
||||
alias, driver = profile_details['provider'].split(':')
|
||||
|
|
|
@ -289,7 +289,7 @@ def create(vm_):
|
|||
'Error creating volume {0} on CLOUDSTACK\n\n'
|
||||
'The following exception was thrown by libcloud when trying to '
|
||||
'requesting a volume: \n{1}'.format(
|
||||
ex_blockdevicemapping['VirtualName'], exc.message
|
||||
ex_blockdevicemapping['VirtualName'], exc
|
||||
),
|
||||
# Show the traceback if the debug logging level is enabled
|
||||
exc_info_on_loglevel=logging.DEBUG
|
||||
|
@ -319,7 +319,7 @@ def create(vm_):
|
|||
'Error attaching volume {0} on CLOUDSTACK\n\n'
|
||||
'The following exception was thrown by libcloud when trying to '
|
||||
'attach a volume: \n{1}'.format(
|
||||
ex_blockdevicemapping.get('VirtualName', 'UNKNOWN'), exc.message
|
||||
ex_blockdevicemapping.get('VirtualName', 'UNKNOWN'), exc
|
||||
),
|
||||
# Show the traceback if the debug logging level is enabled
|
||||
exc_info=log.isEnabledFor(logging.DEBUG)
|
||||
|
|
|
@ -755,6 +755,13 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
|||
Allow for "direct" attribute access-- this allows jinja templates to
|
||||
access things like `salt.test.ping()`
|
||||
'''
|
||||
# if we have an attribute named that, lets return it.
|
||||
try:
|
||||
return object.__getattr__(self, mod_name)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# otherwise we assume its jinja template access
|
||||
if mod_name not in self.loaded_modules and not self.loaded:
|
||||
for name in self._iter_files(mod_name):
|
||||
if name in self.loaded_files:
|
||||
|
|
|
@ -1468,6 +1468,7 @@ class ClearFuncs(object):
|
|||
|
||||
Any return other than None is an eauth failure
|
||||
'''
|
||||
|
||||
if 'eauth' not in clear_load:
|
||||
msg = ('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
|
||||
|
@ -1656,6 +1657,7 @@ class ClearFuncs(object):
|
|||
Create and return an authentication token, the clear load needs to
|
||||
contain the eauth key and the needed authentication creds.
|
||||
'''
|
||||
|
||||
if 'eauth' not in clear_load:
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return ''
|
||||
|
@ -1665,10 +1667,19 @@ class ClearFuncs(object):
|
|||
return ''
|
||||
try:
|
||||
name = self.loadauth.load_name(clear_load)
|
||||
groups = self.loadauth.get_groups(clear_load)
|
||||
if not ((name in self.opts['external_auth'][clear_load['eauth']]) |
|
||||
('*' in self.opts['external_auth'][clear_load['eauth']])):
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return ''
|
||||
found = False
|
||||
for group in groups:
|
||||
if "{0}%".format(group) in self.opts['external_auth'][clear_load['eauth']]:
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return ''
|
||||
else:
|
||||
clear_load['groups'] = groups
|
||||
if not self.loadauth.time_auth(clear_load):
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return ''
|
||||
|
@ -1725,12 +1736,38 @@ class ClearFuncs(object):
|
|||
return ''
|
||||
if not ((token['name'] in self.opts['external_auth'][token['eauth']]) |
|
||||
('*' in self.opts['external_auth'][token['eauth']])):
|
||||
log.warning('Authentication failure of type "token" occurred.')
|
||||
return ''
|
||||
found = False
|
||||
for group in token['groups']:
|
||||
if "{0}%".format(group) in self.opts['external_auth'][token['eauth']]:
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
log.warning('Authentication failure of type "token" occurred.')
|
||||
return ''
|
||||
|
||||
group_perm_keys = filter(lambda(item): item.endswith('%'), self.opts['external_auth'][token['eauth']]) # The configured auth groups
|
||||
|
||||
# First we need to know if the user is allowed to proceed via any of their group memberships.
|
||||
group_auth_match = False
|
||||
for group_config in group_perm_keys:
|
||||
group_config = group_config.rstrip('%')
|
||||
for group in token['groups']:
|
||||
if group == group_config:
|
||||
group_auth_match = True
|
||||
|
||||
auth_list = []
|
||||
|
||||
if '*' in self.opts['external_auth'][token['eauth']]:
|
||||
auth_list.extend(self.opts['external_auth'][token['eauth']]['*'])
|
||||
if token['name'] in self.opts['external_auth'][token['eauth']]:
|
||||
auth_list.extend(self.opts['external_auth'][token['eauth']][token['name']])
|
||||
if group_auth_match:
|
||||
auth_list = self.ckminions.fill_auth_list_from_groups(self.opts['external_auth'][token['eauth']], token['groups'], auth_list)
|
||||
|
||||
log.trace("compiled auth_list: {0}".format(auth_list))
|
||||
|
||||
good = self.ckminions.auth_check(
|
||||
self.opts['external_auth'][token['eauth']][token['name']]
|
||||
if token['name'] in self.opts['external_auth'][token['eauth']]
|
||||
else self.opts['external_auth'][token['eauth']]['*'],
|
||||
auth_list,
|
||||
clear_load['fun'],
|
||||
clear_load['tgt'],
|
||||
clear_load.get('tgt_type', 'glob'))
|
||||
|
|
|
@ -220,8 +220,9 @@ def _get_artifact_metadata_xml(artifactory_url, repository, group_id, artifact_i
|
|||
artifact_metadata_url = _get_artifact_metadata_url(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id)
|
||||
try:
|
||||
artifact_metadata_xml = urllib2.urlopen(artifact_metadata_url).read()
|
||||
except HTTPError as e:
|
||||
raise Exception("Could not fetch data from url: {url}, HTTPError: {message}".format(url=artifact_metadata_url, message=e.message))
|
||||
except HTTPError as http_error:
|
||||
message = 'Could not fetch data from url: {url}, HTTPError: {error}'
|
||||
raise Exception(message.format(url=artifact_metadata_url, error=http_error))
|
||||
|
||||
log.debug('artifact_metadata_xml=%s', artifact_metadata_xml)
|
||||
return artifact_metadata_xml
|
||||
|
@ -257,8 +258,9 @@ def _get_snapshot_version_metadata_xml(artifactory_url, repository, group_id, ar
|
|||
snapshot_version_metadata_url = _get_snapshot_version_metadata_url(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version)
|
||||
try:
|
||||
snapshot_version_metadata_xml = urllib2.urlopen(snapshot_version_metadata_url).read()
|
||||
except HTTPError as e:
|
||||
raise Exception("Could not fetch data from url: {url}, HTTPError: {message}".format(url=snapshot_version_metadata_url, message=e.message))
|
||||
except HTTPError as http_error:
|
||||
message = 'Could not fetch data from url: {url}, HTTPError: {error}'
|
||||
raise Exception(message.format(url=snapshot_version_metadata_url, error=http_error))
|
||||
log.debug('snapshot_version_metadata_xml=%s', snapshot_version_metadata_xml)
|
||||
return snapshot_version_metadata_xml
|
||||
|
||||
|
|
|
@ -100,8 +100,8 @@ def exists(name, region=None, key=None, keyid=None, profile=None):
|
|||
msg = 'The load balancer does not exist in region {0}'.format(region)
|
||||
log.debug(msg)
|
||||
return False
|
||||
except boto.exception.BotoServerError as e:
|
||||
log.debug(e)
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
return False
|
||||
|
||||
|
||||
|
@ -137,8 +137,8 @@ def get_elb_config(name, region=None, key=None, keyid=None, profile=None):
|
|||
ret['scheme'] = lb.scheme
|
||||
ret['dns_name'] = lb.dns_name
|
||||
return ret
|
||||
except boto.exception.BotoServerError as e:
|
||||
log.debug(e)
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
return []
|
||||
|
||||
|
||||
|
@ -185,9 +185,9 @@ def create(name, availability_zones, listeners=None, subnets=None,
|
|||
msg = 'Failed to create ELB {0}'.format(name)
|
||||
log.error(msg)
|
||||
return False
|
||||
except boto.exception.BotoServerError as e:
|
||||
log.debug(e)
|
||||
msg = 'Failed to create ELB {0}: {1}'.format(name, e.message)
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
msg = 'Failed to create ELB {0}: {1}'.format(name, error)
|
||||
log.error(msg)
|
||||
return False
|
||||
|
||||
|
@ -209,8 +209,8 @@ def delete(name, region=None, key=None, keyid=None, profile=None):
|
|||
msg = 'Deleted ELB {0}.'.format(name)
|
||||
log.info(msg)
|
||||
return True
|
||||
except boto.exception.BotoServerError as e:
|
||||
log.debug(e)
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
msg = 'Failed to delete ELB {0}'.format(name)
|
||||
log.error(msg)
|
||||
return False
|
||||
|
@ -248,9 +248,9 @@ def create_listeners(name, listeners=None, region=None, key=None, keyid=None,
|
|||
msg = 'Created ELB listeners on {0}'.format(name)
|
||||
log.info(msg)
|
||||
return True
|
||||
except boto.exception.BotoServerError as e:
|
||||
log.debug(e)
|
||||
msg = 'Failed to create ELB listeners on {0}: {1}'.format(name, e.message)
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
msg = 'Failed to create ELB listeners on {0}: {1}'.format(name, error)
|
||||
log.error(msg)
|
||||
return False
|
||||
|
||||
|
@ -273,9 +273,9 @@ def delete_listeners(name, ports, region=None, key=None, keyid=None,
|
|||
msg = 'Deleted ELB listeners on {0}'.format(name)
|
||||
log.info(msg)
|
||||
return True
|
||||
except boto.exception.BotoServerError as e:
|
||||
log.debug(e)
|
||||
msg = 'Failed to delete ELB listeners on {0}: {1}'.format(name, e.message)
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
msg = 'Failed to delete ELB listeners on {0}: {1}'.format(name, error)
|
||||
log.error(msg)
|
||||
return False
|
||||
|
||||
|
@ -324,9 +324,9 @@ def enable_availability_zones(name, availability_zones, region=None, key=None,
|
|||
msg = 'Enabled availability_zones on ELB {0}'.format(name)
|
||||
log.info(msg)
|
||||
return True
|
||||
except boto.exception.BotoServerError as e:
|
||||
log.debug(e)
|
||||
msg = 'Failed to enable availability_zones on ELB {0}: {1}'.format(name, e.message)
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
msg = 'Failed to enable availability_zones on ELB {0}: {1}'.format(name, error)
|
||||
log.error(msg)
|
||||
return False
|
||||
|
||||
|
@ -349,9 +349,9 @@ def disable_availability_zones(name, availability_zones, region=None, key=None,
|
|||
msg = 'Disabled availability_zones on ELB {0}'.format(name)
|
||||
log.info(msg)
|
||||
return True
|
||||
except boto.exception.BotoServerError as e:
|
||||
log.debug(e)
|
||||
msg = 'Failed to disable availability_zones on ELB {0}: {1}'.format(name, e.message)
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
msg = 'Failed to disable availability_zones on ELB {0}: {1}'.format(name, error)
|
||||
log.error(msg)
|
||||
return False
|
||||
|
||||
|
@ -374,9 +374,9 @@ def attach_subnets(name, subnets, region=None, key=None, keyid=None,
|
|||
msg = 'Attached ELB {0} on subnets.'.format(name)
|
||||
log.info(msg)
|
||||
return True
|
||||
except boto.exception.BotoServerError as e:
|
||||
log.debug(e)
|
||||
msg = 'Failed to attach ELB {0} on subnets: {1}'.format(name, e.message)
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
msg = 'Failed to attach ELB {0} on subnets: {1}'.format(name, error)
|
||||
log.error(msg)
|
||||
return False
|
||||
|
||||
|
@ -399,9 +399,9 @@ def detach_subnets(name, subnets, region=None, key=None, keyid=None,
|
|||
msg = 'Detached ELB {0} from subnets.'.format(name)
|
||||
log.info(msg)
|
||||
return True
|
||||
except boto.exception.BotoServerError as e:
|
||||
log.debug(e)
|
||||
msg = 'Failed to detach ELB {0} from subnets: {1}'.format(name, e.message)
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
msg = 'Failed to detach ELB {0} from subnets: {1}'.format(name, error)
|
||||
log.error(msg)
|
||||
return False
|
||||
|
||||
|
@ -436,9 +436,9 @@ def get_attributes(name, region=None, key=None, keyid=None, profile=None):
|
|||
ret['connection_draining']['timeout'] = cd.timeout
|
||||
ret['connecting_settings']['idle_timeout'] = cs.idle_timeout
|
||||
return ret
|
||||
except boto.exception.BotoServerError as e:
|
||||
log.debug(e)
|
||||
log.error('ELB {0} does not exist: {1}'.format(name, e.message))
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
log.error('ELB {0} does not exist: {1}'.format(name, error))
|
||||
return {}
|
||||
|
||||
|
||||
|
@ -533,9 +533,9 @@ def get_health_check(name, region=None, key=None, keyid=None, profile=None):
|
|||
ret['timeout'] = hc.timeout
|
||||
ret['unhealthy_threshold'] = hc.unhealthy_threshold
|
||||
return ret
|
||||
except boto.exception.BotoServerError as e:
|
||||
log.debug(e)
|
||||
log.error('ELB {0} does not exist: {1}'.format(name, e.message))
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
log.error('ELB {0} does not exist: {1}'.format(name, error))
|
||||
return {}
|
||||
|
||||
|
||||
|
@ -554,9 +554,9 @@ def set_health_check(name, health_check, region=None, key=None, keyid=None,
|
|||
try:
|
||||
conn.configure_health_check(name, hc)
|
||||
log.info('Configured health check on ELB {0}'.format(name))
|
||||
except boto.exception.BotoServerError as e:
|
||||
log.debug(e)
|
||||
log.info('Failed to configure health check on ELB {0}: {1}'.format(name, e.message))
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
log.info('Failed to configure health check on ELB {0}: {1}'.format(name, error))
|
||||
return False
|
||||
return True
|
||||
|
||||
|
@ -585,8 +585,8 @@ def register_instances(name, instances, region=None, key=None, keyid=None,
|
|||
|
||||
try:
|
||||
registered_instances = conn.register_instances(name, instances)
|
||||
except boto.exception.BotoServerError as e:
|
||||
log.warn(e)
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.warn(error)
|
||||
return False
|
||||
registered_instance_ids = [instance.id for instance in
|
||||
registered_instances]
|
||||
|
@ -627,18 +627,18 @@ def deregister_instances(name, instances, region=None, key=None, keyid=None,
|
|||
|
||||
try:
|
||||
registered_instances = conn.deregister_instances(name, instances)
|
||||
except boto.exception.BotoServerError as e:
|
||||
except boto.exception.BotoServerError as error:
|
||||
# if the instance(s) given as an argument are not members of the ELB
|
||||
# boto returns e.error_code == 'InvalidInstance'
|
||||
# boto returns error.error_code == 'InvalidInstance'
|
||||
# deregister_instances returns "None" because the instances are
|
||||
# effectively deregistered from ELB
|
||||
if e.error_code == 'InvalidInstance':
|
||||
if error.error_code == 'InvalidInstance':
|
||||
log.warn('One or more of instance(s) {0} are not part of ELB {1}.'
|
||||
' deregister_instances not performed.'
|
||||
.format(instances, name))
|
||||
return None
|
||||
else:
|
||||
log.warn(e)
|
||||
log.warn(error)
|
||||
return False
|
||||
registered_instance_ids = [instance.id for instance in
|
||||
registered_instances]
|
||||
|
@ -675,6 +675,6 @@ def get_instance_health(name, region=None, key=None, keyid=None, profile=None, i
|
|||
'reason_code': _instance.reason_code
|
||||
})
|
||||
return ret
|
||||
except boto.exception.BotoServerError as e:
|
||||
log.debug(e)
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
return []
|
||||
|
|
|
@ -1159,7 +1159,7 @@ def init(name,
|
|||
try:
|
||||
clone_from = _get_base(vgname=vgname, profile=profile, **kwargs)
|
||||
except (SaltInvocationError, CommandExecutionError) as exc:
|
||||
ret['comment'] = exc.message
|
||||
ret['comment'] = exc.strerror
|
||||
if changes:
|
||||
ret['changes'] = changes_dict
|
||||
return ret
|
||||
|
|
|
@ -413,7 +413,7 @@ def insert(objects, collection, user=None, password=None,
|
|||
try:
|
||||
objects = _to_dict(objects)
|
||||
except Exception as err:
|
||||
return err.message
|
||||
return err
|
||||
|
||||
try:
|
||||
log.info("Inserting %r into %s.%s", objects, database, collection)
|
||||
|
@ -422,8 +422,8 @@ def insert(objects, collection, user=None, password=None,
|
|||
ids = col.insert(objects)
|
||||
return ids
|
||||
except pymongo.errors.PyMongoError as err:
|
||||
log.error("Inserting objects %r failed with error %s", objects, err.message)
|
||||
return err.message
|
||||
log.error("Inserting objects %r failed with error %s", objects, err)
|
||||
return err
|
||||
|
||||
|
||||
def find(collection, query=None, user=None, password=None,
|
||||
|
@ -435,7 +435,7 @@ def find(collection, query=None, user=None, password=None,
|
|||
try:
|
||||
query = _to_dict(query)
|
||||
except Exception as err:
|
||||
return err.message
|
||||
return err
|
||||
|
||||
try:
|
||||
log.info("Searching for %r in %s", query, collection)
|
||||
|
@ -444,8 +444,8 @@ def find(collection, query=None, user=None, password=None,
|
|||
ret = col.find(query)
|
||||
return list(ret)
|
||||
except pymongo.errors.PyMongoError as err:
|
||||
log.error("Removing objects failed with error: %s", err.message)
|
||||
return err.message
|
||||
log.error("Removing objects failed with error: %s", err)
|
||||
return err
|
||||
|
||||
|
||||
def remove(collection, query=None, user=None, password=None,
|
||||
|
|
|
@ -71,8 +71,8 @@ def _connect(**kwargs):
|
|||
nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], True)
|
||||
try:
|
||||
nitro.login()
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSNitro.login() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSNitro.login() failed: {0}'.format(error))
|
||||
return None
|
||||
return nitro
|
||||
|
||||
|
@ -80,8 +80,8 @@ def _connect(**kwargs):
|
|||
def _disconnect(nitro):
|
||||
try:
|
||||
nitro.logout()
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSNitro.logout() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSNitro.logout() failed: {0}'.format(error))
|
||||
return None
|
||||
return nitro
|
||||
|
||||
|
@ -97,8 +97,8 @@ def _servicegroup_get(sg_name, **connection_args):
|
|||
sg.set_servicegroupname(sg_name)
|
||||
try:
|
||||
sg = NSServiceGroup.get(nitro, sg)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSServiceGroup.get() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSServiceGroup.get() failed: {0}'.format(error))
|
||||
sg = None
|
||||
_disconnect(nitro)
|
||||
return sg
|
||||
|
@ -115,8 +115,8 @@ def _servicegroup_get_servers(sg_name, **connection_args):
|
|||
sg.set_servicegroupname(sg_name)
|
||||
try:
|
||||
sg = NSServiceGroup.get_servers(nitro, sg)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSServiceGroup.get_servers failed(): {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSServiceGroup.get_servers failed(): {0}'.format(error))
|
||||
sg = None
|
||||
_disconnect(nitro)
|
||||
return sg
|
||||
|
@ -180,8 +180,8 @@ def servicegroup_add(sg_name, sg_type='HTTP', **connection_args):
|
|||
sg.set_servicetype(sg_type.upper())
|
||||
try:
|
||||
NSServiceGroup.add(nitro, sg)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSServiceGroup.add() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSServiceGroup.add() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -206,8 +206,8 @@ def servicegroup_delete(sg_name, **connection_args):
|
|||
return False
|
||||
try:
|
||||
NSServiceGroup.delete(nitro, sg)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSServiceGroup.delete() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSServiceGroup.delete() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -260,8 +260,8 @@ def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args):
|
|||
return False
|
||||
try:
|
||||
NSServiceGroup.enable_server(nitro, server)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSServiceGroup.enable_server() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSServiceGroup.enable_server() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -286,8 +286,8 @@ def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args):
|
|||
return False
|
||||
try:
|
||||
NSServiceGroup.disable_server(nitro, server)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSServiceGroup.disable_server() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSServiceGroup.disable_server() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -317,8 +317,8 @@ def servicegroup_server_add(sg_name, s_name, s_port, **connection_args):
|
|||
sgsb.set_port(s_port)
|
||||
try:
|
||||
NSServiceGroupServerBinding.add(nitro, sgsb)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSServiceGroupServerBinding() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSServiceGroupServerBinding() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -348,8 +348,8 @@ def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args):
|
|||
sgsb.set_port(s_port)
|
||||
try:
|
||||
NSServiceGroupServerBinding.delete(nitro, sgsb)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSServiceGroupServerBinding() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSServiceGroupServerBinding() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -366,8 +366,8 @@ def _service_get(s_name, **connection_args):
|
|||
service.set_name(s_name)
|
||||
try:
|
||||
service = NSService.get(nitro, service)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSService.get() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSService.get() failed: {0}'.format(error))
|
||||
service = None
|
||||
_disconnect(nitro)
|
||||
return service
|
||||
|
@ -420,8 +420,8 @@ def service_enable(s_name, **connection_args):
|
|||
return False
|
||||
try:
|
||||
NSService.enable(nitro, service)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSService.enable() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSService.enable() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -449,8 +449,8 @@ def service_disable(s_name, s_delay=None, **connection_args):
|
|||
return False
|
||||
try:
|
||||
NSService.disable(nitro, service)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSService.enable() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSService.enable() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -464,8 +464,8 @@ def _server_get(s_name, **connection_args):
|
|||
server.set_name(s_name)
|
||||
try:
|
||||
server = NSServer.get(nitro, server)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSServer.get() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSServer.get() failed: {0}'.format(error))
|
||||
server = None
|
||||
_disconnect(nitro)
|
||||
return server
|
||||
|
@ -516,8 +516,8 @@ def server_add(s_name, s_ip, s_state=None, **connection_args):
|
|||
server.set_state(s_state)
|
||||
try:
|
||||
NSServer.add(nitro, server)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSServer.add() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSServer.add() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -542,8 +542,8 @@ def server_delete(s_name, **connection_args):
|
|||
return False
|
||||
try:
|
||||
NSServer.delete(nitro, server)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSServer.delete() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSServer.delete() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -578,8 +578,8 @@ def server_update(s_name, s_ip, **connection_args):
|
|||
ret = True
|
||||
try:
|
||||
NSServer.update(nitro, alt_server)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSServer.update() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSServer.update() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -620,8 +620,8 @@ def server_enable(s_name, **connection_args):
|
|||
return False
|
||||
try:
|
||||
NSServer.enable(nitro, server)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSServer.enable() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSServer.enable() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -648,8 +648,8 @@ def server_disable(s_name, **connection_args):
|
|||
return False
|
||||
try:
|
||||
NSServer.disable(nitro, server)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSServer.disable() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSServer.disable() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -663,8 +663,8 @@ def _vserver_get(v_name, **connection_args):
|
|||
return None
|
||||
try:
|
||||
vserver = NSLBVServer.get(nitro, vserver)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSLBVServer.get() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSLBVServer.get() failed: {0}'.format(error))
|
||||
vserver = None
|
||||
_disconnect(nitro)
|
||||
return vserver
|
||||
|
@ -716,8 +716,8 @@ def vserver_add(v_name, v_ip, v_port, v_type, **connection_args):
|
|||
vserver.set_servicetype(v_type.upper())
|
||||
try:
|
||||
NSLBVServer.add(nitro, vserver)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSLBVServer.add() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSLBVServer.add() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -742,8 +742,8 @@ def vserver_delete(v_name, **connection_args):
|
|||
return False
|
||||
try:
|
||||
NSLBVServer.delete(nitro, vserver)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSVServer.delete() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSVServer.delete() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -758,8 +758,8 @@ def _vserver_servicegroup_get(v_name, sg_name, **connection_args):
|
|||
vsg.set_name(v_name)
|
||||
try:
|
||||
vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: {0}'.format(error))
|
||||
return None
|
||||
for vsg in vsgs:
|
||||
if vsg.get_servicegroupname() == sg_name:
|
||||
|
@ -802,8 +802,8 @@ def vserver_servicegroup_add(v_name, sg_name, **connection_args):
|
|||
vsg.set_servicegroupname(sg_name)
|
||||
try:
|
||||
NSLBVServerServiceGroupBinding.add(nitro, vsg)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -830,8 +830,8 @@ def vserver_servicegroup_delete(v_name, sg_name, **connection_args):
|
|||
vsg.set_servicegroupname(sg_name)
|
||||
try:
|
||||
NSLBVServerServiceGroupBinding.delete(nitro, vsg)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -846,8 +846,8 @@ def _vserver_sslcert_get(v_name, sc_name, **connection_args):
|
|||
sslcert.set_vservername(v_name)
|
||||
try:
|
||||
sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: {0}'.format(error))
|
||||
return None
|
||||
for sslcert in sslcerts:
|
||||
if sslcert.get_certkeyname() == sc_name:
|
||||
|
@ -889,8 +889,8 @@ def vserver_sslcert_add(v_name, sc_name, **connection_args):
|
|||
sslcert.set_certkeyname(sc_name)
|
||||
try:
|
||||
NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
@ -917,8 +917,8 @@ def vserver_sslcert_delete(v_name, sc_name, **connection_args):
|
|||
sslcert.set_certkeyname(sc_name)
|
||||
try:
|
||||
NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert)
|
||||
except NSNitroError as e:
|
||||
log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: {0}'.format(e.message))
|
||||
except NSNitroError as error:
|
||||
log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: {0}'.format(error))
|
||||
ret = False
|
||||
_disconnect(nitro)
|
||||
return ret
|
||||
|
|
|
@ -411,8 +411,15 @@ def status(name, sig=None):
|
|||
return bool(__salt__['status.pid'](sig))
|
||||
cmd = ['service', name, 'status']
|
||||
if _service_is_upstart(name):
|
||||
return 'start/running' in __salt__['cmd.run'](cmd, python_shell=False)
|
||||
return not bool(__salt__['cmd.retcode'](cmd, python_shell=False))
|
||||
# decide result base on cmd output, thus ignore retcode,
|
||||
# which makes cmd output not at error lvl even when cmd fail.
|
||||
return 'start/running' in __salt__['cmd.run'](cmd, python_shell=False,
|
||||
ignore_retcode=True)
|
||||
# decide result base on retcode, thus ignore output (set quite)
|
||||
# because there is no way to avoid logging at error lvl when
|
||||
# service is not running - retcode != 0 (which is totally relevant).
|
||||
return not bool(__salt__['cmd.retcode'](cmd, python_shell=False,
|
||||
quite=True))
|
||||
|
||||
|
||||
def _get_service_exec():
|
||||
|
|
|
@ -74,14 +74,17 @@ def list_installed():
|
|||
salt '*' win_servermanager.list_installed
|
||||
'''
|
||||
ret = {}
|
||||
for line in list_available().splitlines()[2:]:
|
||||
names = _srvmgr('Get-WindowsFeature -erroraction silentlycontinue -warningaction silentlycontinue | Select DisplayName,Name')
|
||||
for line in names.splitlines()[2:]:
|
||||
splt = line.split()
|
||||
if splt[0] == '[X]':
|
||||
name = splt.pop(-1)
|
||||
splt.pop(0)
|
||||
display_name = ' '.join(splt)
|
||||
ret[name] = display_name
|
||||
|
||||
name = splt.pop(-1)
|
||||
display_name = ' '.join(splt)
|
||||
ret[name] = display_name
|
||||
state = _srvmgr('Get-WindowsFeature -erroraction silentlycontinue -warningaction silentlycontinue | Select InstallState,Name')
|
||||
for line in state.splitlines()[2:]:
|
||||
splt = line.split()
|
||||
if splt[0] != 'Installed' and splt[1] in ret:
|
||||
del ret[splt[1]]
|
||||
return ret
|
||||
|
||||
|
||||
|
|
|
@ -1427,7 +1427,18 @@ class Login(LowDataAdapter):
|
|||
# Grab eauth config for the current backend for the current user
|
||||
try:
|
||||
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
|
||||
perms = eauth.get(token['name'], eauth.get('*'))
|
||||
|
||||
if 'groups' in token:
|
||||
user_groups = set(token['groups'])
|
||||
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
|
||||
|
||||
perms = []
|
||||
for group in user_groups & eauth_groups:
|
||||
perms.extend(eauth['{0}%'.format(group)])
|
||||
|
||||
perms = perms or None
|
||||
else:
|
||||
perms = eauth.get(token['name'], eauth.get('*'))
|
||||
|
||||
if perms is None:
|
||||
raise ValueError("Eauth permission list not found.")
|
||||
|
|
|
@ -16,20 +16,24 @@ state_verbose:
|
|||
instruct the highstate outputter to omit displaying anything in green, this
|
||||
means that nothing with a result of True and no changes will not be printed
|
||||
state_output:
|
||||
The highstate outputter has five output modes, `full`, `terse`, `mixed`,
|
||||
`changes` and `filter`. The default is set to full, which will display many
|
||||
lines of detailed information for each executed chunk. If the `state_output`
|
||||
option is set to `terse` then the output is greatly simplified and shown in
|
||||
only one line. If `mixed` is used, then terse output will be used unless a
|
||||
state failed, in which case full output will be used. If `changes` is used,
|
||||
then terse output will be used if there was no error and no changes,
|
||||
otherwise full output will be used. If `filter` is used, then either or both
|
||||
of two different filters can be used: `exclude` or `terse`. These can be set
|
||||
as such from the command line, or in the Salt config as
|
||||
`state_output_exclude` or `state_output_terse`, respectively. The values to
|
||||
exclude must be a comma-separated list of `True`, `False` and/or `None`.
|
||||
Because of parsing nuances, if only one of these is used, it must still
|
||||
contain a comma. For instance: `exclude=True,`.
|
||||
The highstate outputter has five output modes, ``full``, ``terse``,
|
||||
``mixed``, ``changes`` and ``filter``.
|
||||
|
||||
* The default is set to ``full``, which will display many lines of detailed
|
||||
information for each executed chunk.
|
||||
* If ``terse`` is used, then the output is greatly simplified and shown in
|
||||
only one line.
|
||||
* If ``mixed`` is used, then terse output will be used unless a state
|
||||
failed, in which case full output will be used.
|
||||
* If ``changes`` is used, then terse output will be used if there was no
|
||||
error and no changes, otherwise full output will be used.
|
||||
* If ``filter`` is used, then either or both of two different filters can be
|
||||
used: ``exclude`` or ``terse``.
|
||||
These can be set as such from the command line, or in the Salt config as
|
||||
`state_output_exclude` or `state_output_terse`, respectively. The values to
|
||||
exclude must be a comma-separated list of `True`, `False` and/or `None`.
|
||||
Because of parsing nuances, if only one of these is used, it must still
|
||||
contain a comma. For instance: `exclude=True,`.
|
||||
state_tabular:
|
||||
If `state_output` uses the terse output, set this to `True` for an aligned
|
||||
output format. If you wish to use a custom format, this can be set to a
|
||||
|
|
|
@ -556,9 +556,9 @@ def __get_artifact(artifact, salt_source):
|
|||
try:
|
||||
fetch_result = __fetch_from_artifactory(artifact)
|
||||
log.debug('fetch_result={0}'.format(fetch_result))
|
||||
except Exception as e:
|
||||
except Exception as exception:
|
||||
log.debug(traceback.format_exc())
|
||||
return None, e.message
|
||||
return None, exception
|
||||
|
||||
if fetch_result['status']:
|
||||
resolved_source = fetch_result['target_file']
|
||||
|
|
|
@ -30,20 +30,12 @@ try:
|
|||
# installation time.
|
||||
import salt._syspaths as __generated_syspaths # pylint: disable=no-name-in-module
|
||||
except ImportError:
|
||||
class __generated_syspaths(object):
|
||||
__slots__ = ('ROOT_DIR',
|
||||
'CONFIG_DIR',
|
||||
'CACHE_DIR',
|
||||
'SOCK_DIR',
|
||||
'SRV_ROOT_DIR',
|
||||
'BASE_FILE_ROOTS_DIR',
|
||||
'BASE_PILLAR_ROOTS_DIR',
|
||||
'BASE_MASTER_ROOTS_DIR',
|
||||
'LOGS_DIR',
|
||||
'PIDFILE_DIR')
|
||||
ROOT_DIR = CONFIG_DIR = CACHE_DIR = SOCK_DIR = None
|
||||
SRV_ROOT_DIR = BASE_FILE_ROOTS_DIR = BASE_PILLAR_ROOTS_DIR = None
|
||||
BASE_MASTER_ROOTS_DIR = LOGS_DIR = PIDFILE_DIR = None
|
||||
import imp
|
||||
__generated_syspaths = imp.new_module('salt._syspaths')
|
||||
for key in ('ROOT_DIR', 'CONFIG_DIR', 'CACHE_DIR', 'SOCK_DIR',
|
||||
'SRV_ROOT_DIR', 'BASE_FILE_ROOTS_DIR', 'BASE_PILLAR_ROOTS_DIR',
|
||||
'BASE_MASTER_ROOTS_DIR', 'LOGS_DIR', 'PIDFILE_DIR'):
|
||||
setattr(__generated_syspaths, key, None)
|
||||
|
||||
|
||||
# Let's find out the path of this module
|
||||
|
|
|
@ -32,7 +32,6 @@ except ImportError:
|
|||
except ImportError:
|
||||
HAS_MATCHHOSTNAME = False
|
||||
import socket
|
||||
import urllib2
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
|
@ -42,12 +41,14 @@ import salt.config
|
|||
import salt.version
|
||||
from salt.template import compile_template
|
||||
from salt import syspaths
|
||||
import salt.ext.six.moves.http_client # pylint: disable=no-name-in-module
|
||||
|
||||
# Import 3rd party libs
|
||||
import salt.ext.six as six
|
||||
# pylint: disable=import-error,no-name-in-module
|
||||
import salt.ext.six.moves.http_client
|
||||
import salt.ext.six.moves.http_cookiejar
|
||||
import salt.ext.six.moves.urllib as urllib
|
||||
# pylint: enable=import-error,no-name-in-module
|
||||
try:
|
||||
import requests
|
||||
|
@ -267,10 +268,10 @@ def query(url,
|
|||
result_text = result.text
|
||||
result_cookies = result.cookies
|
||||
else:
|
||||
request = urllib2.Request(url, data)
|
||||
request = urllib.Request(url, data)
|
||||
handlers = [
|
||||
urllib2.HTTPHandler,
|
||||
urllib2.HTTPCookieProcessor(sess_cookies)
|
||||
urllib.HTTPHandler,
|
||||
urllib.HTTPCookieProcessor(sess_cookies)
|
||||
]
|
||||
|
||||
if url.startswith('https') or port == 443:
|
||||
|
@ -316,7 +317,7 @@ def query(url,
|
|||
if hasattr(ssl, 'SSLContext'):
|
||||
# Python >= 2.7.9
|
||||
context = ssl.SSLContext.load_cert_chain(*cert_chain)
|
||||
handlers.append(urllib2.HTTPSHandler(context=context)) # pylint: disable=E1123
|
||||
handlers.append(urllib.HTTPSHandler(context=context)) # pylint: disable=E1123
|
||||
else:
|
||||
# Python < 2.7.9
|
||||
cert_kwargs = {
|
||||
|
@ -328,7 +329,7 @@ def query(url,
|
|||
cert_kwargs['key_file'] = cert_chain[1]
|
||||
handlers[0] = salt.ext.six.moves.http_client.HTTPSConnection(**cert_kwargs)
|
||||
|
||||
opener = urllib2.build_opener(*handlers)
|
||||
opener = urllib.build_opener(*handlers)
|
||||
for header in header_dict:
|
||||
request.add_header(header, header_dict[header])
|
||||
request.get_method = lambda: method
|
||||
|
|
|
@ -1030,8 +1030,9 @@ class OutputOptionsMixIn(six.with_metaclass(MixInMeta, object)):
|
|||
group.add_option(
|
||||
'--state-output', '--state_output',
|
||||
default='full',
|
||||
help=('Override the configured state_output value for minion output'
|
||||
'. Default: full')
|
||||
help=('Override the configured state_output value for minion '
|
||||
'output. One of full, terse, mixed, changes or filter. '
|
||||
'Default: full.')
|
||||
)
|
||||
|
||||
for option in self.output_options_group.option_list:
|
||||
|
|
|
@ -12,7 +12,6 @@ import sys
|
|||
import copy
|
||||
import json
|
||||
import time
|
||||
import errno
|
||||
import signal
|
||||
import shutil
|
||||
import pprint
|
||||
|
@ -283,7 +282,7 @@ class TestDaemon(object):
|
|||
# Wait for the daemons to all spin up
|
||||
time.sleep(5)
|
||||
|
||||
#self.smaster_process = self.start_daemon(salt.daemons.flo.IofloMaster,
|
||||
# self.smaster_process = self.start_daemon(salt.daemons.flo.IofloMaster,
|
||||
# self.syndic_master_opts,
|
||||
# 'start')
|
||||
|
||||
|
@ -512,7 +511,7 @@ class TestDaemon(object):
|
|||
minion_opts['raet_port'] = 64510
|
||||
sub_minion_opts['transport'] = 'raet'
|
||||
sub_minion_opts['raet_port'] = 64520
|
||||
#syndic_master_opts['transport'] = 'raet'
|
||||
# syndic_master_opts['transport'] = 'raet'
|
||||
|
||||
# Set up config options that require internal data
|
||||
master_opts['pillar_roots'] = {
|
||||
|
@ -703,7 +702,7 @@ class TestDaemon(object):
|
|||
sync_needed = self.parser.options.clean
|
||||
if self.parser.options.clean is False:
|
||||
def sumfile(fpath):
|
||||
# Since we will be do'in this for small files, it should be ok
|
||||
# Since we will be doing this for small files, it should be ok
|
||||
fobj = fopen(fpath)
|
||||
m = md5()
|
||||
while True:
|
||||
|
|
|
@ -1 +1 @@
|
|||
# encoding: utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
|
|
@ -1 +1 @@
|
|||
# encoding: utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
|
|
@ -191,11 +191,20 @@ class SaltTestsuiteParser(SaltCoverageTestingParser):
|
|||
os.environ['EXPENSIVE_TESTS'] = 'True'
|
||||
|
||||
if self.options.coverage and any((
|
||||
self.options.module, self.options.cli, self.options.client,
|
||||
self.options.shell, self.options.unit, self.options.state,
|
||||
self.options.runners, self.options.loader, self.options.name,
|
||||
self.options.outputter, self.options.fileserver,
|
||||
self.options.wheel, os.geteuid() != 0,
|
||||
self.options.module,
|
||||
self.options.cli,
|
||||
self.options.client,
|
||||
self.options.shell,
|
||||
self.options.unit,
|
||||
self.options.state,
|
||||
self.options.runners,
|
||||
self.options.loader,
|
||||
self.options.name,
|
||||
self.options.outputter,
|
||||
self.options.fileserver,
|
||||
self.options.wheel,
|
||||
self.options.api,
|
||||
os.geteuid() != 0,
|
||||
not self.options.run_destructive)):
|
||||
self.error(
|
||||
'No sense in generating the tests coverage report when '
|
||||
|
@ -342,6 +351,7 @@ class SaltTestsuiteParser(SaltCoverageTestingParser):
|
|||
self.options.fileserver or
|
||||
self.options.wheel or
|
||||
self.options.cloud_provider_tests or
|
||||
self.options.api or
|
||||
named_tests):
|
||||
# We're either not running any of runners, state, module and client
|
||||
# tests, or, we're only running unittests by passing --unit or by
|
||||
|
|
156
tests/unit/modules/rbenv_test.py
Normal file
156
tests/unit/modules/rbenv_test.py
Normal file
|
@ -0,0 +1,156 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import skipIf, TestCase
|
||||
from salttesting.mock import (
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON,
|
||||
MagicMock,
|
||||
patch)
|
||||
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.modules import rbenv
|
||||
import os
|
||||
|
||||
|
||||
# Globals
|
||||
rbenv.__grains__ = {}
|
||||
rbenv.__salt__ = {}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class RbenvTestCase(TestCase):
|
||||
'''
|
||||
Test cases for salt.modules.rbenv
|
||||
'''
|
||||
def test_install(self):
|
||||
'''
|
||||
Test for install Rbenv systemwide
|
||||
'''
|
||||
with patch.object(rbenv, '_rbenv_path', return_value=True):
|
||||
with patch.object(rbenv, '_install_rbenv', return_value=True):
|
||||
with patch.object(rbenv, '_install_ruby_build',
|
||||
return_value=True):
|
||||
with patch.object(os.path, 'expanduser', return_value='A'):
|
||||
self.assertTrue(rbenv.install())
|
||||
|
||||
def test_update(self):
|
||||
'''
|
||||
Test for updates the current versions of Rbenv and Ruby-Build
|
||||
'''
|
||||
with patch.object(rbenv, '_rbenv_path', return_value=True):
|
||||
with patch.object(rbenv, '_update_rbenv', return_value=True):
|
||||
with patch.object(rbenv, '_update_ruby_build',
|
||||
return_value=True):
|
||||
with patch.object(os.path, 'expanduser', return_value='A'):
|
||||
self.assertTrue(rbenv.update())
|
||||
|
||||
def test_is_installed(self):
|
||||
'''
|
||||
Test for check if Rbenv is installed.
|
||||
'''
|
||||
with patch.object(rbenv, '_rbenv_bin', return_value='A'):
|
||||
with patch.dict(rbenv.__salt__,
|
||||
{'cmd.has_exec': MagicMock(return_value=True)}):
|
||||
self.assertTrue(rbenv.is_installed())
|
||||
|
||||
def test_install_ruby(self):
|
||||
'''
|
||||
Test for install a ruby implementation.
|
||||
'''
|
||||
with patch.dict(rbenv.__grains__, {'os': 'FreeBSD'}):
|
||||
with patch.dict(rbenv.__salt__,
|
||||
{'config.get': MagicMock(return_value='True')}):
|
||||
with patch.object(rbenv, '_rbenv_exec',
|
||||
return_value={'retcode': 0,
|
||||
'stderr': 'stderr'}):
|
||||
with patch.object(rbenv, 'rehash', return_value=None):
|
||||
self.assertEqual(rbenv.install_ruby('ruby'), 'stderr')
|
||||
|
||||
with patch.object(rbenv, '_rbenv_exec',
|
||||
return_value={'retcode': 1,
|
||||
'stderr': 'stderr'}):
|
||||
with patch.object(rbenv, 'uninstall_ruby',
|
||||
return_value=None):
|
||||
self.assertFalse(rbenv.install_ruby('ruby'))
|
||||
|
||||
def test_uninstall_ruby(self):
|
||||
'''
|
||||
Test for uninstall a ruby implementation.
|
||||
'''
|
||||
with patch.object(rbenv, '_rbenv_exec', return_value=None):
|
||||
self.assertTrue(rbenv.uninstall_ruby('ruby', 'runas'))
|
||||
|
||||
def test_versions(self):
|
||||
'''
|
||||
Test for list the installed versions of ruby.
|
||||
'''
|
||||
with patch.object(rbenv, '_rbenv_exec', return_value='A\nBC\nD'):
|
||||
self.assertListEqual(rbenv.versions(), ['A', 'BC', 'D'])
|
||||
|
||||
def test_default(self):
|
||||
'''
|
||||
Test for returns or sets the currently defined default ruby.
|
||||
'''
|
||||
with patch.object(rbenv, '_rbenv_exec',
|
||||
MagicMock(side_effect=[None, False])):
|
||||
self.assertTrue(rbenv.default('ruby', 'runas'))
|
||||
|
||||
self.assertEqual(rbenv.default(), '')
|
||||
|
||||
def test_list_(self):
|
||||
'''
|
||||
Test for list the installable versions of ruby.
|
||||
'''
|
||||
with patch.object(rbenv, '_rbenv_exec', return_value='A\nB\nCD\n'):
|
||||
self.assertListEqual(rbenv.list_(), ['A', 'B', 'CD'])
|
||||
|
||||
def test_rehash(self):
|
||||
'''
|
||||
Test for run rbenv rehash to update the installed shims.
|
||||
'''
|
||||
with patch.object(rbenv, '_rbenv_exec', return_value=None):
|
||||
self.assertTrue(rbenv.rehash())
|
||||
|
||||
def test_do(self):
|
||||
'''
|
||||
Test for execute a ruby command with rbenv's shims from
|
||||
the user or the system.
|
||||
'''
|
||||
with patch.object(rbenv, '_rbenv_path', return_value='A'):
|
||||
with patch.dict(rbenv.__salt__,
|
||||
{'cmd.run_all':
|
||||
MagicMock(return_value={'retcode': 0,
|
||||
'stdout': 'stdout'})}):
|
||||
with patch.object(rbenv, 'rehash', return_value=None):
|
||||
self.assertEqual(rbenv.do(), 'stdout')
|
||||
|
||||
with patch.dict(rbenv.__salt__,
|
||||
{'cmd.run_all':
|
||||
MagicMock(return_value={'retcode': 1,
|
||||
'stdout': 'stdout'})}):
|
||||
with patch.object(rbenv, 'rehash', return_value=None):
|
||||
self.assertFalse(rbenv.do(), 'stdout')
|
||||
|
||||
def test_do_with_ruby(self):
|
||||
'''
|
||||
Test for execute a ruby command with rbenv's shims using a
|
||||
specific ruby version.
|
||||
'''
|
||||
with patch.object(rbenv, 'do', return_value='A'):
|
||||
self.assertEqual(rbenv.do_with_ruby('ruby', 'cmdline'), 'A')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(RbenvTestCase, needs_daemon=False)
|
|
@ -1 +1 @@
|
|||
# encoding: utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
|
|
@ -1 +1 @@
|
|||
# encoding: utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
|
|
@ -1 +1 @@
|
|||
# encoding: utf-8
|
||||
# -*- coding: utf-8 -*-
|
||||
|
|
90
tests/unit/states/boto_lc_test.py
Normal file
90
tests/unit/states/boto_lc_test.py
Normal file
|
@ -0,0 +1,90 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import skipIf, TestCase
|
||||
from salttesting.mock import (
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON,
|
||||
MagicMock,
|
||||
patch)
|
||||
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
from salt.exceptions import SaltInvocationError
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.states import boto_lc
|
||||
|
||||
boto_lc.__salt__ = {}
|
||||
boto_lc.__opts__ = {}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class BotoLcTestCase(TestCase):
|
||||
'''
|
||||
Test cases for salt.states.boto_lc
|
||||
'''
|
||||
# 'present' function tests: 1
|
||||
|
||||
def test_present(self):
|
||||
'''
|
||||
Test to ensure the launch configuration exists.
|
||||
'''
|
||||
name = 'mylc'
|
||||
image_id = 'ami-0b9c9f62'
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
'comment': ''}
|
||||
|
||||
self.assertRaises(SaltInvocationError, boto_lc.present, name,
|
||||
image_id, user_data=True, cloud_init=True)
|
||||
|
||||
mock = MagicMock(side_effect=[True, False])
|
||||
with patch.dict(boto_lc.__salt__,
|
||||
{'boto_asg.launch_configuration_exists': mock}):
|
||||
comt = ('Launch configuration present.')
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(boto_lc.present(name, image_id), ret)
|
||||
|
||||
with patch.dict(boto_lc.__opts__, {'test': True}):
|
||||
comt = ('Launch configuration set to be created.')
|
||||
ret.update({'comment': comt, 'result': None})
|
||||
self.assertDictEqual(boto_lc.present(name, image_id), ret)
|
||||
|
||||
# 'absent' function tests: 1
|
||||
|
||||
def test_absent(self):
|
||||
'''
|
||||
Test to ensure the named launch configuration is deleted.
|
||||
'''
|
||||
name = 'mylc'
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
'comment': ''}
|
||||
|
||||
mock = MagicMock(side_effect=[False, True])
|
||||
with patch.dict(boto_lc.__salt__,
|
||||
{'boto_asg.launch_configuration_exists': mock}):
|
||||
comt = ('Launch configuration does not exist.')
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(boto_lc.absent(name), ret)
|
||||
|
||||
with patch.dict(boto_lc.__opts__, {'test': True}):
|
||||
comt = ('Launch configuration set to be deleted.')
|
||||
ret.update({'comment': comt, 'result': None})
|
||||
self.assertDictEqual(boto_lc.absent(name), ret)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(BotoLcTestCase, needs_daemon=False)
|
106
tests/unit/states/boto_route53_test.py
Normal file
106
tests/unit/states/boto_route53_test.py
Normal file
|
@ -0,0 +1,106 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import skipIf, TestCase
|
||||
from salttesting.mock import (
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON,
|
||||
MagicMock,
|
||||
patch)
|
||||
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.states import boto_route53
|
||||
|
||||
boto_route53.__salt__ = {}
|
||||
boto_route53.__opts__ = {}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class BotoRoute53TestCase(TestCase):
|
||||
'''
|
||||
Test cases for salt.states.boto_route53
|
||||
'''
|
||||
# 'present' function tests: 1
|
||||
|
||||
def test_present(self):
|
||||
'''
|
||||
Test to ensure the Route53 record is present.
|
||||
'''
|
||||
name = 'test.example.com.'
|
||||
value = '1.1.1.1'
|
||||
zone = 'example.com.'
|
||||
record_type = 'A'
|
||||
|
||||
ret = {'name': name,
|
||||
'result': False,
|
||||
'changes': {},
|
||||
'comment': ''}
|
||||
|
||||
mock = MagicMock(side_effect=[{}, {}, {'value': ''}, False])
|
||||
mock_bool = MagicMock(return_value=False)
|
||||
with patch.dict(boto_route53.__salt__,
|
||||
{'boto_route53.get_record': mock,
|
||||
'boto_route53.add_record': mock_bool}):
|
||||
with patch.dict(boto_route53.__opts__, {'test': False}):
|
||||
comt = ('Failed to add {0} Route53 record.'.format(name))
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(boto_route53.present(name, value, zone,
|
||||
record_type), ret)
|
||||
|
||||
with patch.dict(boto_route53.__opts__, {'test': True}):
|
||||
comt = ('Route53 record {0} set to be added.'.format(name))
|
||||
ret.update({'comment': comt, 'result': None})
|
||||
self.assertDictEqual(boto_route53.present(name, value, zone,
|
||||
record_type), ret)
|
||||
|
||||
comt = ('Route53 record {0} set to be updated.'.format(name))
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(boto_route53.present(name, value, zone,
|
||||
record_type), ret)
|
||||
|
||||
ret.update({'comment': '', 'result': True})
|
||||
self.assertDictEqual(boto_route53.present(name, value, zone,
|
||||
record_type), ret)
|
||||
|
||||
# 'absent' function tests: 1
|
||||
|
||||
def test_absent(self):
|
||||
'''
|
||||
Test to ensure the Route53 record is deleted.
|
||||
'''
|
||||
name = 'test.example.com.'
|
||||
zone = 'example.com.'
|
||||
record_type = 'A'
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
'comment': ''}
|
||||
|
||||
mock = MagicMock(side_effect=[False, True])
|
||||
with patch.dict(boto_route53.__salt__,
|
||||
{'boto_route53.get_record': mock}):
|
||||
comt = ('{0} does not exist.'.format(name))
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(boto_route53.absent(name, zone, record_type),
|
||||
ret)
|
||||
|
||||
with patch.dict(boto_route53.__opts__, {'test': True}):
|
||||
comt = ('Route53 record {0} set to be deleted.'.format(name))
|
||||
ret.update({'comment': comt, 'result': None})
|
||||
self.assertDictEqual(boto_route53.absent(name, zone,
|
||||
record_type), ret)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(BotoRoute53TestCase, needs_daemon=False)
|
92
tests/unit/states/boto_sns_test.py
Normal file
92
tests/unit/states/boto_sns_test.py
Normal file
|
@ -0,0 +1,92 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import skipIf, TestCase
|
||||
from salttesting.mock import (
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON,
|
||||
MagicMock,
|
||||
patch)
|
||||
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.states import boto_sns
|
||||
|
||||
boto_sns.__salt__ = {}
|
||||
boto_sns.__opts__ = {}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class BotoSnsTestCase(TestCase):
|
||||
'''
|
||||
Test cases for salt.states.boto_sns
|
||||
'''
|
||||
# 'present' function tests: 1
|
||||
|
||||
def test_present(self):
|
||||
'''
|
||||
Test to ensure the SNS topic exists.
|
||||
'''
|
||||
name = 'test.example.com.'
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
'comment': ''}
|
||||
|
||||
mock = MagicMock(side_effect=[True, False, False])
|
||||
mock_bool = MagicMock(return_value=False)
|
||||
with patch.dict(boto_sns.__salt__,
|
||||
{'boto_sns.exists': mock,
|
||||
'boto_sns.create': mock_bool}):
|
||||
comt = ('AWS SNS topic {0} present.'.format(name))
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(boto_sns.present(name), ret)
|
||||
|
||||
with patch.dict(boto_sns.__opts__, {'test': True}):
|
||||
comt = ('AWS SNS topic {0} is set to be created.'.format(name))
|
||||
ret.update({'comment': comt, 'result': None})
|
||||
self.assertDictEqual(boto_sns.present(name), ret)
|
||||
|
||||
with patch.dict(boto_sns.__opts__, {'test': False}):
|
||||
comt = ('Failed to create {0} AWS SNS topic'.format(name))
|
||||
ret.update({'comment': comt, 'result': False})
|
||||
self.assertDictEqual(boto_sns.present(name), ret)
|
||||
|
||||
# 'absent' function tests: 1
|
||||
|
||||
def test_absent(self):
|
||||
'''
|
||||
Test to ensure the named sns topic is deleted.
|
||||
'''
|
||||
name = 'test.example.com.'
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
'comment': ''}
|
||||
|
||||
mock = MagicMock(side_effect=[False, True])
|
||||
with patch.dict(boto_sns.__salt__,
|
||||
{'boto_sns.exists': mock}):
|
||||
comt = ('AWS SNS topic {0} does not exist.'.format(name))
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(boto_sns.absent(name), ret)
|
||||
|
||||
with patch.dict(boto_sns.__opts__, {'test': True}):
|
||||
comt = ('AWS SNS topic {0} is set to be removed.'.format(name))
|
||||
ret.update({'comment': comt, 'result': None})
|
||||
self.assertDictEqual(boto_sns.absent(name), ret)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(BotoSnsTestCase, needs_daemon=False)
|
100
tests/unit/states/boto_sqs_test.py
Normal file
100
tests/unit/states/boto_sqs_test.py
Normal file
|
@ -0,0 +1,100 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import skipIf, TestCase
|
||||
from salttesting.mock import (
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON,
|
||||
MagicMock,
|
||||
patch)
|
||||
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.states import boto_sqs
|
||||
|
||||
boto_sqs.__salt__ = {}
|
||||
boto_sqs.__opts__ = {}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class BotoSqsTestCase(TestCase):
|
||||
'''
|
||||
Test cases for salt.states.boto_sqs
|
||||
'''
|
||||
# 'present' function tests: 1
|
||||
|
||||
def test_present(self):
|
||||
'''
|
||||
Test to ensure the SQS queue exists.
|
||||
'''
|
||||
name = 'mysqs'
|
||||
attributes = {'ReceiveMessageWaitTimeSeconds': 20}
|
||||
|
||||
ret = {'name': name,
|
||||
'result': False,
|
||||
'changes': {},
|
||||
'comment': ''}
|
||||
|
||||
mock = MagicMock(side_effect=[False, False, True, True])
|
||||
mock_bool = MagicMock(return_value=False)
|
||||
mock_attr = MagicMock(return_value={})
|
||||
with patch.dict(boto_sqs.__salt__,
|
||||
{'boto_sqs.exists': mock,
|
||||
'boto_sqs.create': mock_bool,
|
||||
'boto_sqs.get_attributes': mock_attr}):
|
||||
with patch.dict(boto_sqs.__opts__, {'test': False}):
|
||||
comt = ('Failed to create {0} AWS queue'.format(name))
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(boto_sqs.present(name), ret)
|
||||
|
||||
with patch.dict(boto_sqs.__opts__, {'test': True}):
|
||||
comt = ('AWS SQS queue {0} is set to be created.'.format(name))
|
||||
ret.update({'comment': comt, 'result': None})
|
||||
self.assertDictEqual(boto_sqs.present(name), ret)
|
||||
|
||||
comt = ('Attribute(s) ReceiveMessageWaitTimeSeconds'
|
||||
' to be set on mysqs.')
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(boto_sqs.present(name, attributes), ret)
|
||||
|
||||
comt = ('mysqs present. Attributes set.')
|
||||
ret.update({'comment': comt, 'result': True})
|
||||
self.assertDictEqual(boto_sqs.present(name), ret)
|
||||
|
||||
# 'absent' function tests: 1
|
||||
|
||||
def test_absent(self):
|
||||
'''
|
||||
Test to ensure the named sqs queue is deleted.
|
||||
'''
|
||||
name = 'test.example.com.'
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
'comment': ''}
|
||||
|
||||
mock = MagicMock(side_effect=[False, True])
|
||||
with patch.dict(boto_sqs.__salt__,
|
||||
{'boto_sqs.exists': mock}):
|
||||
comt = ('{0} does not exist in None.'.format(name))
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(boto_sqs.absent(name), ret)
|
||||
|
||||
with patch.dict(boto_sqs.__opts__, {'test': True}):
|
||||
comt = ('AWS SQS queue {0} is set to be removed.'.format(name))
|
||||
ret.update({'comment': comt, 'result': None})
|
||||
self.assertDictEqual(boto_sqs.absent(name), ret)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(BotoSqsTestCase, needs_daemon=False)
|
77
tests/unit/states/chef_test.py
Normal file
77
tests/unit/states/chef_test.py
Normal file
|
@ -0,0 +1,77 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import skipIf, TestCase
|
||||
from salttesting.mock import (
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON,
|
||||
MagicMock,
|
||||
patch)
|
||||
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.states import chef
|
||||
|
||||
chef.__salt__ = {}
|
||||
chef.__opts__ = {}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class ChefTestCase(TestCase):
|
||||
'''
|
||||
Test cases for salt.states.chef
|
||||
'''
|
||||
# 'client' function tests: 1
|
||||
|
||||
def test_client(self):
|
||||
'''
|
||||
Test to run chef-client
|
||||
'''
|
||||
name = 'my-chef-run'
|
||||
|
||||
ret = {'name': name,
|
||||
'result': False,
|
||||
'changes': {},
|
||||
'comment': ''}
|
||||
|
||||
mock = MagicMock(return_value={'retcode': 1, 'stdout': '',
|
||||
'stderr': 'error'})
|
||||
with patch.dict(chef.__salt__, {'chef.client': mock}):
|
||||
with patch.dict(chef.__opts__, {'test': True}):
|
||||
comt = ('\nerror')
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(chef.client(name), ret)
|
||||
|
||||
# 'solo' function tests: 1
|
||||
|
||||
def test_solo(self):
|
||||
'''
|
||||
Test to run chef-solo
|
||||
'''
|
||||
name = 'my-chef-run'
|
||||
|
||||
ret = {'name': name,
|
||||
'result': False,
|
||||
'changes': {},
|
||||
'comment': ''}
|
||||
|
||||
mock = MagicMock(return_value={'retcode': 1, 'stdout': '',
|
||||
'stderr': 'error'})
|
||||
with patch.dict(chef.__salt__, {'chef.solo': mock}):
|
||||
with patch.dict(chef.__opts__, {'test': True}):
|
||||
comt = ('\nerror')
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(chef.solo(name), ret)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(ChefTestCase, needs_daemon=False)
|
Loading…
Add table
Reference in a new issue