Merge branch '2017.7' into improve-net-load

This commit is contained in:
Nicole Thomas 2017-11-20 10:59:54 -05:00 committed by GitHub
commit bce50154e5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
63 changed files with 1338 additions and 453 deletions

View file

@ -1,11 +1,11 @@
---
<% vagrant = system('which vagrant 2>/dev/null >/dev/null') %>
<% version = '2016.11.8' %>
<% version = '2017.7.1' %>
<% platformsfile = ENV['SALT_KITCHEN_PLATFORMS'] || '.kitchen/platforms.yml' %>
<% driverfile = ENV['SALT_KITCHEN_DRIVER'] || '.kitchen/driver.yml' %>
<% if File.exists?(driverfile) %>
<%= File.read(driverfile) %>
<%= ERB.new(File.read(driverfile)).result %>
<% else %>
driver:
name: docker
@ -19,6 +19,8 @@ driver:
disable_upstart: false
provision_command:
- echo 'L /run/docker.sock - - - - /docker.sock' > /etc/tmpfiles.d/docker.conf
transport:
name: sftp
<% end %>
sudo: false
@ -31,8 +33,8 @@ provisioner:
log_level: info
require_chef: false
remote_states:
name: git://github.com/gtmanfred/salt-jenkins.git
branch: 2016.11
name: git://github.com/saltstack/salt-jenkins.git
branch: 2017.7
repo: git
testingdir: /testing
salt_copy_filter:
@ -51,7 +53,7 @@ provisioner:
- git.salt
- kitchen
<% if File.exists?(platformsfile) %>
<%= File.read(platformsfile) %>
<%= ERB.new(File.read(platformsfile)).result %>
<% else %>
platforms:
- name: fedora
@ -163,6 +165,21 @@ suites:
testing_dir: /tmp/kitchen/testing
clone_repo: false
salttesting_namespec: salttesting==2017.6.1
- name: py3
excludes:
- centos-6
- ubuntu-14.04
provisioner:
pillars:
top.sls:
base:
"*":
- jenkins
jenkins.sls:
testing_dir: /tmp/kitchen/testing
clone_repo: false
py3: true
salttesting_namespec: salttesting==2017.6.1
verifier:
name: shell
remote_exec: true

View file

@ -1,9 +1,10 @@
# This file is only used for running the test suite with kitchen-salt.
source "https://rubygems.org"
source 'https://rubygems.org'
gem "test-kitchen"
gem "kitchen-salt", :git => 'https://github.com/gtmanfred/kitchen-salt.git'
gem 'test-kitchen'
gem 'kitchen-salt', :git => 'https://github.com/saltstack/kitchen-salt.git'
gem 'kitchen-sync'
gem 'git'
group :docker do

View file

@ -19,14 +19,18 @@ Salt SSH allows for salt routines to be executed using only SSH for transport
Options
=======
.. program:: salt-ssh
.. include:: _includes/common-options.rst
.. option:: --hard-crash
Raise any original exception rather than exiting gracefully. Default: False.
.. option:: -r, --raw, --raw-shell
Execute a raw shell command.
.. option:: --priv
Specify the SSH private key file to be used for authentication.
.. option:: --roster
Define which roster system to use, this defines if a database backend,
@ -53,38 +57,117 @@ Options
the more running process the faster communication should be, default
is 25.
.. option:: --extra-filerefs=EXTRA_FILEREFS
Pass in extra files to include in the state tarball.
.. option:: --min-extra-modules=MIN_EXTRA_MODS
One or comma-separated list of extra Python modulesto be included
into Minimal Salt.
.. option:: --thin-extra-modules=THIN_EXTRA_MODS
One or comma-separated list of extra Python modulesto be included
into Thin Salt.
.. option:: -v, --verbose
Turn on command verbosity, display jid.
.. option:: -s, --static
Return the data from minions as a group after they all return.
.. option:: -w, --wipe
Remove the deployment of the salt files when done executing.
.. option:: -W, --rand-thin-dir
Select a random temp dir to deploy on the remote system. The dir
will be cleaned after the execution.
.. option:: -t, --regen-thin, --thin
Trigger a thin tarball regeneration. This is needed if custom
grains/modules/states have been added or updated.
.. option:: --python2-bin=PYTHON2_BIN
Path to a python2 binary which has salt installed.
.. option:: --python3-bin=PYTHON3_BIN
Path to a python3 binary which has salt installed.
.. option:: --jid=JID
Pass a JID to be used instead of generating one.
Authentication Options
----------------------
.. option:: --priv=SSH_PRIV
Specify the SSH private key file to be used for authentication.
.. option:: -i, --ignore-host-keys
Disables StrictHostKeyChecking to relax acceptance of new and unknown
host keys.
By default ssh host keys are honored and connections will ask for
approval. Use this option to disable StrictHostKeyChecking.
.. option:: --no-host-keys
Fully ignores ssh host keys which by default are honored and connections
would ask for approval. Useful if the host key of a remote server has
would ask for approval. Useful if the host key of a remote server has
changed and would still error with --ignore-host-keys.
.. option:: --user=SSH_USER
Set the default user to attempt to use when authenticating.
.. option:: --passwd
Set the default password to attempt to use when authenticating.
.. option:: --askpass
Interactively ask for the SSH password with no echo - avoids password
in process args and stored in history.
.. option:: --key-deploy
Set this flag to attempt to deploy the authorized ssh key with all
minions. This combined with --passwd can make initial deployment of keys
very fast and easy.
.. program:: salt
.. option:: --identities-only
.. include:: _includes/common-options.rst
Use the only authentication identity files configured in the ssh_config
files. See IdentitiesOnly flag in man ssh_config.
.. include:: _includes/target-selection-ssh.rst
.. option:: --sudo
Run command via sudo.
Scan Roster Options
-------------------
.. option:: --scan-ports=SSH_SCAN_PORTS
Comma-separated list of ports to scan in the scan roster.
.. option:: --scan-timeout=SSH_SCAN_TIMEOUT
Scanning socket timeout for the scan roster.
.. include:: _includes/logging-options.rst
.. |logfile| replace:: /var/log/salt/ssh
.. |loglevel| replace:: ``warning``
.. include:: _includes/target-selection-ssh.rst
.. include:: _includes/output-options.rst

View file

@ -202,7 +202,7 @@ this.
# /srv/salt/orch/deploy.sls
{% set servers = salt['pillar.get']('servers', 'test') %}
{% set master = salt['pillat.get']('master', 'salt') %}
{% set master = salt['pillar.get']('master', 'salt') %}
create_instance:
salt.runner:
- name: cloud.profile

View file

@ -160,6 +160,7 @@ class Master(parsers.MasterOptionParser, DaemonsMixin): # pylint: disable=no-in
self.config['user'],
permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'],
root_dir=self.config['root_dir'],
)
# Clear out syndics from cachedir
for syndic_file in os.listdir(self.config['syndic_dir']):
@ -280,6 +281,7 @@ class Minion(parsers.MinionOptionParser, DaemonsMixin): # pylint: disable=no-in
self.config['user'],
permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'],
root_dir=self.config['root_dir'],
)
except OSError as error:
self.environment_failure(error)
@ -467,6 +469,7 @@ class ProxyMinion(parsers.ProxyMinionOptionParser, DaemonsMixin): # pylint: dis
self.config['user'],
permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'],
root_dir=self.config['root_dir'],
)
except OSError as error:
self.environment_failure(error)
@ -575,6 +578,7 @@ class Syndic(parsers.SyndicOptionParser, DaemonsMixin): # pylint: disable=no-in
self.config['user'],
permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'],
root_dir=self.config['root_dir'],
)
except OSError as error:
self.environment_failure(error)

View file

@ -32,7 +32,10 @@ class SPM(parsers.SPMParser):
v_dirs = [
self.config['cachedir'],
]
verify_env(v_dirs, self.config['user'],)
verify_env(v_dirs,
self.config['user'],
root_dir=self.config['root_dir'],
)
verify_log(self.config)
client = salt.spm.SPMClient(ui, self.config)
client.run(self.args)

View file

@ -903,6 +903,8 @@ class Single(object):
ret = json.dumps({'local': opts_pkg})
return ret, retcode
if 'known_hosts_file' in self.opts:
opts_pkg['known_hosts_file'] = self.opts['known_hosts_file']
opts_pkg['file_roots'] = self.opts['file_roots']
opts_pkg['pillar_roots'] = self.opts['pillar_roots']
opts_pkg['ext_pillar'] = self.opts['ext_pillar']

View file

@ -289,6 +289,143 @@ def apply_(mods=None,
return highstate(**kwargs)
def request(mods=None,
**kwargs):
'''
.. versionadded:: 2017.7.3
Request that the local admin execute a state run via
`salt-call state.run_request`
All arguments match state.apply
CLI Example:
.. code-block:: bash
salt '*' state.request
salt '*' state.request test
salt '*' state.request test,pkgs
'''
kwargs['test'] = True
ret = apply_(mods, **kwargs)
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
serial = salt.payload.Serial(__opts__)
req = check_request()
req.update({kwargs.get('name', 'default'): {
'test_run': ret,
'mods': mods,
'kwargs': kwargs
}
})
cumask = os.umask(0o77)
try:
if salt.utils.is_windows():
# Make sure cache file isn't read-only
__salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
with salt.utils.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, OSError):
msg = 'Unable to write state request file {0}. Check permission.'
log.error(msg.format(notify_path))
os.umask(cumask)
return ret
def check_request(name=None):
'''
.. versionadded:: 2017.7.3
Return the state request information, if any
CLI Example:
.. code-block:: bash
salt '*' state.check_request
'''
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
serial = salt.payload.Serial(__opts__)
if os.path.isfile(notify_path):
with salt.utils.fopen(notify_path, 'rb') as fp_:
req = serial.load(fp_)
if name:
return req[name]
return req
return {}
def clear_request(name=None):
'''
.. versionadded:: 2017.7.3
Clear out the state execution request without executing it
CLI Example:
.. code-block:: bash
salt '*' state.clear_request
'''
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
serial = salt.payload.Serial(__opts__)
if not os.path.isfile(notify_path):
return True
if not name:
try:
os.remove(notify_path)
except (IOError, OSError):
pass
else:
req = check_request()
if name in req:
req.pop(name)
else:
return False
cumask = os.umask(0o77)
try:
if salt.utils.is_windows():
# Make sure cache file isn't read-only
__salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
with salt.utils.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, OSError):
msg = 'Unable to write state request file {0}. Check permission.'
log.error(msg.format(notify_path))
os.umask(cumask)
return True
def run_request(name='default', **kwargs):
'''
.. versionadded:: 2017.7.3
Execute the pending state request
CLI Example:
.. code-block:: bash
salt '*' state.run_request
'''
req = check_request()
if name not in req:
return {}
n_req = req[name]
if 'mods' not in n_req or 'kwargs' not in n_req:
return {}
req[name]['kwargs'].update(kwargs)
if 'test' in n_req['kwargs']:
n_req['kwargs'].pop('test')
if req:
ret = apply_(n_req['mods'], **n_req['kwargs'])
try:
os.remove(os.path.join(__opts__['cachedir'], 'req_state.p'))
except (IOError, OSError):
pass
return ret
return {}
def highstate(test=None, **kwargs):
'''
Retrieve the state data from the salt master for this minion and execute it

View file

@ -66,7 +66,8 @@ class SaltCloud(parsers.SaltCloudParser):
if self.config['verify_env']:
verify_env(
[os.path.dirname(self.config['conf_file'])],
salt_master_user
salt_master_user,
root_dir=self.config['root_dir'],
)
logfile = self.config['log_file']
if logfile is not None and not logfile.startswith('tcp://') \

View file

@ -498,16 +498,16 @@ def tar(options, tarfile, sources=None, dest=None,
.. code-block:: bash
salt '*' archive.tar -cjvf /tmp/salt.tar.bz2 {{grains.saltpath}} template=jinja
salt '*' archive.tar cjvf /tmp/salt.tar.bz2 {{grains.saltpath}} template=jinja
CLI Examples:
.. code-block:: bash
# Create a tarfile
salt '*' archive.tar -cjvf /tmp/tarfile.tar.bz2 /tmp/file_1,/tmp/file_2
salt '*' archive.tar cjvf /tmp/tarfile.tar.bz2 /tmp/file_1,/tmp/file_2
# Create a tarfile using globbing (2017.7.0 and later)
salt '*' archive.tar -cjvf /tmp/tarfile.tar.bz2 '/tmp/file_*'
salt '*' archive.tar cjvf /tmp/tarfile.tar.bz2 '/tmp/file_*'
# Unpack a tarfile
salt '*' archive.tar xf foo.tar dest=/target/directory
'''

View file

@ -51,6 +51,7 @@ import datetime
import logging
import json
import sys
import time
import email.mime.multipart
log = logging.getLogger(__name__)
@ -675,11 +676,23 @@ def get_scaling_policy_arn(as_group, scaling_policy_name, region=None,
salt '*' boto_asg.get_scaling_policy_arn mygroup mypolicy
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policies = conn.get_all_policies(as_group=as_group)
for policy in policies:
if policy.name == scaling_policy_name:
return policy.policy_arn
log.error('Could not convert: {0}'.format(as_group))
retries = 30
while retries > 0:
retries -= 1
try:
policies = conn.get_all_policies(as_group=as_group)
for policy in policies:
if policy.name == scaling_policy_name:
return policy.policy_arn
log.error('Could not convert: {0}'.format(as_group))
return None
except boto.exception.BotoServerError as e:
if e.error_code != 'Throttling':
raise
log.debug('Throttled by API, will retry in 5 seconds')
time.sleep(5)
log.error('Maximum number of retries exceeded')
return None
@ -761,11 +774,18 @@ def get_instances(name, lifecycle_state="InService", health_status="Healthy",
# get full instance info, so that we can return the attribute
instances = ec2_conn.get_only_instances(instance_ids=instance_ids)
if attributes:
return [[getattr(instance, attr).encode("ascii") for attr in attributes] for instance in instances]
return [[_convert_attribute(instance, attr) for attr in attributes] for instance in instances]
else:
# properly handle case when not all instances have the requested attribute
return [getattr(instance, attribute).encode("ascii") for instance in instances if getattr(instance, attribute)]
return [getattr(instance, attribute).encode("ascii") for instance in instances]
return [_convert_attribute(instance, attribute) for instance in instances if getattr(instance, attribute)]
def _convert_attribute(instance, attribute):
if attribute == "tags":
tags = dict(getattr(instance, attribute))
return {key.encode("utf-8"): value.encode("utf-8") for key, value in six.iteritems(tags)}
return getattr(instance, attribute).encode("ascii")
def enter_standby(name, instance_ids, should_decrement_desired_capacity=False,

View file

@ -910,8 +910,8 @@ def compare_container(first, second, ignore=None):
ret.setdefault(conf_dict, {})[item] = {'old': image1, 'new': image2}
else:
if item == 'Links':
val1 = _scrub_links(val1, first)
val2 = _scrub_links(val2, second)
val1 = sorted(_scrub_links(val1, first))
val2 = sorted(_scrub_links(val2, second))
if val1 != val2:
ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2}
# Check for optionally-present items that were in the second container
@ -933,8 +933,8 @@ def compare_container(first, second, ignore=None):
ret.setdefault(conf_dict, {})[item] = {'old': image1, 'new': image2}
else:
if item == 'Links':
val1 = _scrub_links(val1, first)
val2 = _scrub_links(val2, second)
val1 = sorted(_scrub_links(val1, first))
val2 = sorted(_scrub_links(val2, second))
if val1 != val2:
ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2}
return ret

View file

@ -164,7 +164,7 @@ def _setup_conn(**kwargs):
if client_key_file:
kubernetes.client.configuration.key_file = client_key_file
if client_key:
elif client_key:
with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k:
k.write(base64.b64decode(client_key))
kubernetes.client.configuration.key_file = k.name

View file

@ -6,9 +6,10 @@ Module for sending messages to Mattermost
:configuration: This module can be used by either passing an api_url and hook
directly or by specifying both in a configuration profile in the salt
master/minion config.
For example:
master/minion config. For example:
.. code-block:: yaml
mattermost:
hook: peWcBiMOS9HrZG15peWcBiMOS9HrZG15
api_url: https://example.com
@ -35,6 +36,7 @@ __virtualname__ = 'mattermost'
def __virtual__():
'''
Return virtual name of the module.
:return: The virtual name of the module.
'''
return __virtualname__
@ -43,6 +45,7 @@ def __virtual__():
def _get_hook():
'''
Retrieves and return the Mattermost's configured hook
:return: String: the hook string
'''
hook = __salt__['config.get']('mattermost.hook') or \
@ -56,6 +59,7 @@ def _get_hook():
def _get_api_url():
'''
Retrieves and return the Mattermost's configured api url
:return: String: the api url string
'''
api_url = __salt__['config.get']('mattermost.api_url') or \
@ -69,6 +73,7 @@ def _get_api_url():
def _get_channel():
'''
Retrieves the Mattermost's configured channel
:return: String: the channel string
'''
channel = __salt__['config.get']('mattermost.channel') or \
@ -80,6 +85,7 @@ def _get_channel():
def _get_username():
'''
Retrieves the Mattermost's configured username
:return: String: the username string
'''
username = __salt__['config.get']('mattermost.username') or \
@ -95,14 +101,18 @@ def post_message(message,
hook=None):
'''
Send a message to a Mattermost channel.
:param channel: The channel name, either will work.
:param username: The username of the poster.
:param message: The message to send to the Mattermost channel.
:param api_url: The Mattermost api url, if not specified in the configuration.
:param hook: The Mattermost hook, if not specified in the configuration.
:return: Boolean if message was sent successfully.
CLI Example:
.. code-block:: bash
salt '*' mattermost.post_message message='Build is done"
'''
if not api_url:

View file

@ -689,9 +689,24 @@ def _parse_settings_eth(opts, iface_type, enabled, iface):
if opt in opts:
result[opt] = opts[opt]
for opt in ['ipaddrs', 'ipv6addrs']:
if opt in opts:
result[opt] = opts[opt]
if 'ipaddrs' in opts:
result['ipaddrs'] = []
for opt in opts['ipaddrs']:
if salt.utils.validate.net.ipv4_addr(opt):
ip, prefix = [i.strip() for i in opt.split('/')]
result['ipaddrs'].append({'ipaddr': ip, 'prefix': prefix})
else:
msg = 'ipv4 CIDR is invalid'
log.error(msg)
raise AttributeError(msg)
if 'ipv6addrs' in opts:
for opt in opts['ipv6addrs']:
if not salt.utils.validate.net.ipv6_addr(opt):
msg = 'ipv6 CIDR is invalid'
log.error(msg)
raise AttributeError(msg)
result['ipv6addrs'] = opts['ipv6addrs']
if 'enable_ipv6' in opts:
result['enable_ipv6'] = opts['enable_ipv6']

View file

@ -710,7 +710,9 @@ class _policy_info(object):
'lgpo_section': self.password_policy_gpedit_path,
'Settings': {
'Function': '_in_range_inclusive',
'Args': {'min': 0, 'max': 86313600}
'Args': {'min': 1,
'max': 86313600,
'zero_value': 0xffffffff}
},
'NetUserModal': {
'Modal': 0,
@ -718,7 +720,9 @@ class _policy_info(object):
},
'Transform': {
'Get': '_seconds_to_days',
'Put': '_days_to_seconds'
'Put': '_days_to_seconds',
'GetArgs': {'zero_value': 0xffffffff},
'PutArgs': {'zero_value': 0xffffffff}
},
},
'MinPasswordAge': {
@ -750,7 +754,7 @@ class _policy_info(object):
},
},
'PasswordComplexity': {
'Policy': 'Passwords must meet complexity requirements',
'Policy': 'Password must meet complexity requirements',
'lgpo_section': self.password_policy_gpedit_path,
'Settings': self.enabled_one_disabled_zero.keys(),
'Secedit': {
@ -2369,7 +2373,10 @@ class _policy_info(object):
'''
converts a number of seconds to days
'''
zero_value = kwargs.get('zero_value', 0)
if val is not None:
if val == zero_value:
return 0
return val / 86400
else:
return 'Not Defined'
@ -2379,7 +2386,10 @@ class _policy_info(object):
'''
converts a number of days to seconds
'''
zero_value = kwargs.get('zero_value', 0)
if val is not None:
if val == 0:
return zero_value
return val * 86400
else:
return 'Not Defined'
@ -2491,9 +2501,11 @@ class _policy_info(object):
def _in_range_inclusive(cls, val, **kwargs):
'''
checks that a value is in an inclusive range
The value for 0 used by Max Password Age is actually 0xffffffff
'''
minimum = 0
maximum = 1
minimum = kwargs.get('min', 0)
maximum = kwargs.get('max', 1)
zero_value = kwargs.get('zero_value', 0)
if isinstance(val, six.string_types):
if val.lower() == 'not defined':
@ -2503,12 +2515,8 @@ class _policy_info(object):
val = int(val)
except ValueError:
return False
if 'min' in kwargs:
minimum = kwargs['min']
if 'max' in kwargs:
maximum = kwargs['max']
if val is not None:
if val >= minimum and val <= maximum:
if minimum <= val <= maximum or val == zero_value:
return True
else:
return False
@ -2640,11 +2648,7 @@ class _policy_info(object):
or values
'''
log.debug('item == {0}'.format(item))
value_lookup = False
if 'value_lookup' in kwargs:
value_lookup = kwargs['value_lookup']
else:
value_lookup = False
value_lookup = kwargs.get('value_lookup', False)
if 'lookup' in kwargs:
for k, v in six.iteritems(kwargs['lookup']):
if value_lookup:

View file

@ -52,6 +52,7 @@ from salt.exceptions import (CommandExecutionError,
SaltRenderError)
import salt.utils
import salt.utils.pkg
import salt.utils.path
import salt.syspaths
import salt.payload
from salt.exceptions import MinionError
@ -641,33 +642,10 @@ def _get_repo_details(saltenv):
# Do some safety checks on the repo_path as its contents can be removed,
# this includes check for bad coding
system_root = os.environ.get('SystemRoot', r'C:\Windows')
deny_paths = (
r'[a-z]\:\\$', # C:\, D:\, etc
r'\\$', # \
re.escape(system_root) # C:\Windows
)
if not salt.utils.path.safe_path(
path=local_dest,
allow_path='\\'.join([system_root, 'TEMP'])):
# Since the above checks anything in C:\Windows, there are some
# directories we may want to make exceptions for
allow_paths = (
re.escape('\\'.join([system_root, 'TEMP'])), # C:\Windows\TEMP
)
# Check the local_dest to make sure it's not one of the bad paths
good_path = True
for d_path in deny_paths:
if re.match(d_path, local_dest, flags=re.IGNORECASE) is not None:
# Found deny path
good_path = False
# If local_dest is one of the bad paths, check for exceptions
if not good_path:
for a_path in allow_paths:
if re.match(a_path, local_dest, flags=re.IGNORECASE) is not None:
# Found exception
good_path = True
if not good_path:
raise CommandExecutionError(
'Attempting to delete files from a possibly unsafe location: '
'{0}'.format(local_dest)

View file

@ -444,8 +444,9 @@ def stop(name):
try:
win32serviceutil.StopService(name)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed To Stop {0}: {1}'.format(name, exc[2]))
if exc[0] != 1062:
raise CommandExecutionError(
'Failed To Stop {0}: {1}'.format(name, exc[2]))
attempts = 0
while info(name)['Status'] in ['Running', 'Stop Pending'] \

View file

@ -6,8 +6,7 @@ or for problem solving if your minion is having problems.
.. versionadded:: 0.12.0
:depends: - pythoncom
- wmi
:depends: - wmi
'''
# Import Python Libs

View file

@ -92,28 +92,31 @@ def halt(timeout=5, in_seconds=False):
Halt a running system.
Args:
timeout (int): Number of seconds before halting the system. Default is
5 seconds.
in_seconds (bool): Whether to treat timeout as seconds or minutes.
timeout (int):
Number of seconds before halting the system. Default is 5 seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: True if successful, otherwise False
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.halt 5
salt '*' system.halt 5 True
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def init(runlevel): # pylint: disable=unused-argument
'''
Change the system runlevel on sysV compatible systems
Change the system runlevel on sysV compatible systems. Not applicable to
Windows
CLI Example:
@ -136,14 +139,18 @@ def poweroff(timeout=5, in_seconds=False):
Power off a running system.
Args:
timeout (int): Number of seconds before powering off the system. Default
is 5 seconds.
in_seconds (bool): Whether to treat timeout as seconds or minutes.
timeout (int):
Number of seconds before powering off the system. Default is 5
seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: True if successful, otherwise False
bool: ``True`` if successful, otherwise ``False``
CLI Example:
@ -160,29 +167,35 @@ def reboot(timeout=5, in_seconds=False, wait_for_reboot=False, # pylint: disabl
Reboot a running system.
Args:
timeout (int): Number of minutes/seconds before rebooting the system.
Minutes vs seconds depends on the value of ``in_seconds``. Default
timeout (int):
The number of minutes/seconds before rebooting the system. Use of
minutes or seconds depends on the value of ``in_seconds``. Default
is 5 minutes.
in_seconds (bool): Whether to treat timeout as seconds or minutes.
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
wait_for_reboot (bool): Sleeps for timeout + 30 seconds after reboot has
been initiated. This may be useful for use in a highstate if a
reboot should be performed and the return data of the highstate is
not required. If return data is required, consider using the reboot
state instead of this module.
wait_for_reboot (bool)
``True`` will sleep for timeout + 30 seconds after reboot has been
initiated. This is useful for use in a highstate. For example, you
may have states that you want to apply only after the reboot.
Default is ``False``.
.. versionadded:: 2015.8.0
only_on_pending_reboot (bool): If this is set to True, then the reboot
will only proceed if the system reports a pending reboot. To
optionally reboot in a highstate, consider using the reboot state
instead of this module.
only_on_pending_reboot (bool):
If this is set to ``True``, then the reboot will only proceed
if the system reports a pending reboot. Setting this parameter to
``True`` could be useful when calling this function from a final
housekeeping state intended to be executed at the end of a state run
(using *order: last*). Default is ``False``.
Returns:
bool: True if successful (a reboot will occur), otherwise False
bool: ``True`` if successful (a reboot will occur), otherwise ``False``
CLI Example:
@ -191,20 +204,16 @@ def reboot(timeout=5, in_seconds=False, wait_for_reboot=False, # pylint: disabl
salt '*' system.reboot 5
salt '*' system.reboot 5 True
As example of invoking this function from within a final housekeeping state
is as follows:
Example:
Invoking this function from a final housekeeping state:
.. code-block:: yaml
final housekeeping:
final_housekeeping:
module.run:
- name: system.reboot
- only_on_pending_reboot: True
- order: last
'''
ret = shutdown(timeout=timeout, reboot=True, in_seconds=in_seconds,
only_on_pending_reboot=only_on_pending_reboot)
@ -221,50 +230,63 @@ def shutdown(message=None, timeout=5, force_close=True, reboot=False, # pylint:
Shutdown a running system.
Args:
message (str): A message to display to the user before shutting down.
timeout (int): The length of time that the shutdown dialog box should be
displayed, in seconds. While this dialog box is displayed, the
shutdown can be stopped by the shutdown_abort function.
message (str):
The message to display to the user before shutting down.
timeout (int):
The length of time (in seconds) that the shutdown dialog box should
be displayed. While this dialog box is displayed, the shutdown can
be aborted using the ``system.shutdown_abort`` function.
If timeout is not zero, InitiateSystemShutdown displays a dialog box
on the specified computer. The dialog box displays the name of the
user who called the function, displays the message specified by the
lpMessage parameter, and prompts the user to log off. The dialog box
beeps when it is created and remains on top of other windows in the
system. The dialog box can be moved but not closed. A timer counts
down the remaining time before a forced shutdown.
user who called the function, the message specified by the lpMessage
parameter, and prompts the user to log off. The dialog box beeps
when it is created and remains on top of other windows (system
modal). The dialog box can be moved but not closed. A timer counts
down the remaining time before the shutdown occurs.
If timeout is zero, the computer shuts down without displaying the
dialog box, and the shutdown cannot be stopped by shutdown_abort.
If timeout is zero, the computer shuts down immediately without
displaying the dialog box and cannot be stopped by
``system.shutdown_abort``.
Default is 5 minutes
in_seconds (bool): Whether to treat timeout as seconds or minutes.
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
force_close (bool): True to force close all open applications. False
displays a dialog box instructing the user to close the
applications.
force_close (bool):
``True`` will force close all open applications. ``False`` will
display a dialog box instructing the user to close open
applications. Default is ``True``.
reboot (bool): True restarts the computer immediately after shutdown.
False caches to disk and safely powers down the system.
reboot (bool):
``True`` restarts the computer immediately after shutdown. ``False``
powers down the system. Default is ``False``.
only_on_pending_reboot (bool): If this is set to True, then the shutdown
will only proceed if the system reports a pending reboot. To
optionally shutdown in a highstate, consider using the shutdown
state instead of this module.
only_on_pending_reboot (bool):
If ``True`` the shutdown will only proceed if there is a reboot
pending. ``False`` will shutdown the system. Default is ``False``.
Returns:
bool: True if successful (a shutdown or reboot will occur), otherwise
False
bool:
``True`` if successful (a shutdown or reboot will occur), otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown 5
salt '*' system.shutdown "System will shutdown in 5 minutes"
'''
if six.PY2:
message = _to_unicode(message)
@ -294,7 +316,7 @@ def shutdown_hard():
Shutdown a running system with no timeout or warning.
Returns:
bool: True if successful, otherwise False
bool: ``True`` if successful, otherwise ``False``
CLI Example:
@ -312,7 +334,7 @@ def shutdown_abort():
aborted.
Returns:
bool: True if successful, otherwise False
bool: ``True`` if successful, otherwise ``False``
CLI Example:
@ -337,7 +359,7 @@ def lock():
Lock the workstation.
Returns:
bool: True if successful, otherwise False
bool: ``True`` if successful, otherwise ``False``
CLI Example:
@ -353,12 +375,14 @@ def set_computer_name(name):
Set the Windows computer name
Args:
name (str): The new name to give the computer. Requires a reboot to take
effect.
name (str):
The new name to give the computer. Requires a reboot to take effect.
Returns:
dict: Returns a dictionary containing the old and new names if
successful. False if not.
dict:
Returns a dictionary containing the old and new names if successful.
``False`` if not.
CLI Example:
@ -389,7 +413,9 @@ def get_pending_computer_name():
error message will be logged to the minion log.
Returns:
str: The pending name if restart is pending, otherwise returns None.
str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example:
@ -412,7 +438,7 @@ def get_computer_name():
Get the Windows computer name
Returns:
str: Returns the computer name if found. Otherwise returns False
str: Returns the computer name if found. Otherwise returns ``False``.
CLI Example:
@ -429,10 +455,12 @@ def set_computer_desc(desc=None):
Set the Windows computer description
Args:
desc (str): The computer description
desc (str):
The computer description
Returns:
bool: True if successful, otherwise False
str: Description if successful, otherwise ``False``
CLI Example:
@ -475,8 +503,8 @@ def get_system_info():
Get system information.
Returns:
dict: Returns a Dictionary containing information about the system to
include name, description, version, etc...
dict: Dictionary containing information about the system to include
name, description, version, etc...
CLI Example:
@ -529,7 +557,8 @@ def get_computer_desc():
Get the Windows computer description
Returns:
str: The computer description if found, otherwise False
str: Returns the computer description if found. Otherwise returns
``False``.
CLI Example:
@ -546,12 +575,12 @@ get_computer_description = salt.utils.alias_function(get_computer_desc, 'get_com
def get_hostname():
'''
.. versionadded:: 2016.3.0
Get the hostname of the windows minion
.. versionadded:: 2016.3.0
Returns:
str: The hostname of the windows minion
str: Returns the hostname of the windows minion
CLI Example:
@ -566,16 +595,16 @@ def get_hostname():
def set_hostname(hostname):
'''
.. versionadded:: 2016.3.0
Set the hostname of the windows minion, requires a restart before this will
be updated.
Set the hostname of the windows minion, requires a restart before this
will be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: True if successful, otherwise False
bool: ``True`` if successful, otherwise ``False``
CLI Example:
@ -597,37 +626,41 @@ def join_domain(domain,
account_exists=False,
restart=False):
'''
Join a computer to an Active Directory domain. Requires reboot.
Join a computer to an Active Directory domain. Requires a reboot.
Args:
domain (str): The domain to which the computer should be joined, e.g.
domain (str):
The domain to which the computer should be joined, e.g.
``example.com``
username (str): Username of an account which is authorized to join
computers to the specified domain. Need to be either fully qualified
like ``user@domain.tld`` or simply ``user``
username (str):
Username of an account which is authorized to join computers to the
specified domain. Needs to be either fully qualified like
``user@domain.tld`` or simply ``user``
password (str): Password of the specified user
password (str):
Password of the specified user
account_ou (str): The DN of the OU below which the account for this
computer should be created when joining the domain, e.g.
account_ou (str):
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool): If set to ``True`` the computer will only join
the domain if the account already exists. If set to ``False`` the
computer account will be created if it does not exist, otherwise it
will use the existing account. Default is False.
account_exists (bool):
If set to ``True`` the computer will only join the domain if the
account already exists. If set to ``False`` the computer account
will be created if it does not exist, otherwise it will use the
existing account. Default is ``False``
restart (bool): Restarts the computer after a successful join
restart (bool):
``True`` will restart the computer after a successful join. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Dictionary if successful
Raises:
CommandExecutionError: Raises an error if _join_domain returns anything
other than 0
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
@ -741,33 +774,41 @@ def unjoin_domain(username=None,
disable=False,
restart=False):
r'''
Unjoin a computer from an Active Directory Domain. Requires restart.
Unjoin a computer from an Active Directory Domain. Requires a restart.
Args:
username (str): Username of an account which is authorized to manage
computer accounts on the domain. Need to be fully qualified like
``user@domain.tld`` or ``domain.tld\user``. If domain not specified,
the passed domain will be used. If computer account doesn't need to
be disabled, can be None.
password (str): Password of the specified user
username (str):
Username of an account which is authorized to manage computer
accounts on the domain. Needs to be a fully qualified name like
``user@domain.tld`` or ``domain.tld\user``. If the domain is not
specified, the passed domain will be used. If the computer account
doesn't need to be disabled after the computer is unjoined, this can
be ``None``.
domain (str): The domain from which to unjoin the computer. Can be None.
password (str):
The password of the specified user
workgroup (str): The workgroup to join the computer to. Default is
``WORKGROUP``
domain (str):
The domain from which to unjoin the computer. Can be ``None``
workgroup (str):
The workgroup to join the computer to. Default is ``WORKGROUP``
.. versionadded:: 2015.8.2/2015.5.7
disable (bool): Disable the computer account in Active Directory. True
to disable. Default is False
disable (bool):
``True`` to disable the computer account in Active Directory.
Default is ``False``
restart (bool): Restart the computer after successful unjoin
restart (bool):
``True`` will restart the computer after successful unjoin. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Dictionary if successful, otherwise False
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
@ -859,15 +900,16 @@ def get_domain_workgroup():
def _try_parse_datetime(time_str, fmts):
'''
Attempts to parse the input time_str as a date.
A helper function that attempts to parse the input time_str as a date.
Args:
time_str (str): A string representing the time
fmts (list): A list of date format strings
Returns:
datetime: A datetime object if parsed properly, otherwise None
datetime: Returns a datetime object if parsed properly, otherwise None
'''
result = None
for fmt in fmts:
@ -910,7 +952,9 @@ def set_system_time(newtime):
Set the system time.
Args:
newtime (str): The time to set. Can be any of the following formats.
newtime (str):
The time to set. Can be any of the following formats:
- HH:MM:SS AM/PM
- HH:MM AM/PM
@ -918,7 +962,7 @@ def set_system_time(newtime):
- HH:MM (24 hour)
Returns:
bool: True if successful, otherwise False
bool: ``True`` if successful, otherwise ``False``
CLI Example:
@ -951,24 +995,16 @@ def set_system_date_time(years=None,
system year will be used. (Used by set_system_date and set_system_time)
Args:
years (int): Years digit, ie: 2015
months (int): Months digit: 1 - 12
days (int): Days digit: 1 - 31
hours (int): Hours digit: 0 - 23
minutes (int): Minutes digit: 0 - 59
seconds (int): Seconds digit: 0 - 59
Returns:
bool: True if successful
Raises:
CommandExecutionError: Raises an error if ``SetLocalTime`` function
fails
bool: ``True`` if successful, otherwise ``False``
CLI Example:
@ -1037,7 +1073,7 @@ def get_system_date():
Get the Windows system date
Returns:
str: The system date
str: Returns the system date
CLI Example:
@ -1054,7 +1090,8 @@ def set_system_date(newdate):
Set the Windows system date. Use <mm-dd-yy> format for the date.
Args:
newdate (str): The date to set. Can be any of the following formats:
newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
@ -1063,6 +1100,9 @@ def set_system_date(newdate):
- MM/DD/YY
- YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
@ -1087,7 +1127,7 @@ def start_time_service():
Start the Windows time service
Returns:
bool: True if successful, otherwise False.
bool: ``True`` if successful, otherwise ``False``
CLI Example:
@ -1103,7 +1143,7 @@ def stop_time_service():
Stop the Windows time service
Returns:
bool: True if successful, otherwise False
bool: ``True`` if successful, otherwise ``False``
CLI Example:
@ -1122,7 +1162,8 @@ def get_pending_component_servicing():
.. versionadded:: 2016.11.0
Returns:
bool: True if a reboot is pending, otherwise False.
bool: ``True`` if there are pending Component Based Servicing tasks,
otherwise ``False``
CLI Example:
@ -1146,12 +1187,14 @@ def get_pending_component_servicing():
def get_pending_domain_join():
'''
Determine whether there is a pending domain join action that requires a reboot.
Determine whether there is a pending domain join action that requires a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: True if a reboot is pending, otherwise False.
bool: ``True`` if there is a pending domain join action, otherwise
``False``
CLI Example:
@ -1193,7 +1236,8 @@ def get_pending_file_rename():
.. versionadded:: 2016.11.0
Returns:
bool: True if a reboot is pending, otherwise False.
bool: ``True`` if there are pending file rename operations, otherwise
``False``
CLI Example:
@ -1228,7 +1272,8 @@ def get_pending_servermanager():
.. versionadded:: 2016.11.0
Returns:
bool: True if a reboot is pending, otherwise False.
bool: ``True`` if there are pending Server Manager tasks, otherwise
``False``
CLI Example:
@ -1265,7 +1310,7 @@ def get_pending_update():
.. versionadded:: 2016.11.0
Returns:
bool: True if a reboot is pending, otherwise False.
bool: ``True`` if there are pending updates, otherwise ``False``
CLI Example:
@ -1305,14 +1350,14 @@ def set_reboot_required_witnessed():
current boot session. Also, in the scope of this key, the name *'Reboot
required'* will be assigned the value of *1*.
(For the time being, this this function is being used whenever an install
completes with exit code 3010 and this usage can be extended where
appropriate in the future.)
For the time being, this function is being used whenever an install
completes with exit code 3010 and can be extended where appropriate in the
future.
.. versionadded:: 2016.11.0
Returns:
bool: True if registry entry set successfuly, otherwise False.
bool: ``True`` if successful, otherwise ``False``
CLI Example:
@ -1330,16 +1375,18 @@ def set_reboot_required_witnessed():
def get_reboot_required_witnessed():
'''
This tells us if, at any time during the current boot session the salt
minion witnessed an event indicating that a reboot is required. (For the
time being, this function will return True if an install completed with exit
code 3010 during the current boot session and this usage can be extended
where appropriate in the future)
Determine if at any time during the current boot session the salt minion
witnessed an event indicating that a reboot is required.
This function will return ``True`` if an install completed with exit
code 3010 during the current boot session and can be extended where
appropriate in the future.
.. versionadded:: 2016.11.0
Returns:
bool: True if reboot required, otherwise False.
bool: ``True`` if the ``Requires reboot`` registry flag is set to ``1``,
otherwise ``False``
CLI Example:
@ -1361,7 +1408,7 @@ def get_pending_reboot():
.. versionadded:: 2016.11.0
Returns:
bool: True if pending reboot, otherwise False.
bool: ``True`` if the system is pending reboot, otherwise ``False``
CLI Example:

View file

@ -23,7 +23,14 @@ def output(ret, bar, **kwargs): # pylint: disable=unused-argument
Update the progress bar
'''
if 'return_count' in ret:
bar.update(ret['return_count'])
val = ret['return_count']
# Avoid to fail if targets are behind a syndic. In this case actual return count will be
# higher than targeted by MoM itself.
# TODO: implement a way to get the proper target minions count and remove this workaround.
# Details are in #44239.
if val > bar.maxval:
bar.maxval = val
bar.update(val)
return ''

View file

@ -235,25 +235,25 @@ class PillarCache(object):
return fresh_pillar.compile_pillar() # FIXME We are not yet passing pillar_dirs in here
def compile_pillar(self, *args, **kwargs): # Will likely just be pillar_dirs
log.debug('Scanning pillar cache for information about minion {0} and saltenv {1}'.format(self.minion_id, self.saltenv))
log.debug('Scanning pillar cache for information about minion {0} and pillarenv {1}'.format(self.minion_id, self.pillarenv))
log.debug('Scanning cache: {0}'.format(self.cache._dict))
# Check the cache!
if self.minion_id in self.cache: # Keyed by minion_id
# TODO Compare grains, etc?
if self.saltenv in self.cache[self.minion_id]:
if self.pillarenv in self.cache[self.minion_id]:
# We have a cache hit! Send it back.
log.debug('Pillar cache hit for minion {0} and saltenv {1}'.format(self.minion_id, self.saltenv))
return self.cache[self.minion_id][self.saltenv]
log.debug('Pillar cache hit for minion {0} and pillarenv {1}'.format(self.minion_id, self.pillarenv))
return self.cache[self.minion_id][self.pillarenv]
else:
# We found the minion but not the env. Store it.
fresh_pillar = self.fetch_pillar()
self.cache[self.minion_id][self.saltenv] = fresh_pillar
log.debug('Pillar cache miss for saltenv {0} for minion {1}'.format(self.saltenv, self.minion_id))
self.cache[self.minion_id][self.pillarenv] = fresh_pillar
log.debug('Pillar cache miss for pillarenv {0} for minion {1}'.format(self.pillarenv, self.minion_id))
return fresh_pillar
else:
# We haven't seen this minion yet in the cache. Store it.
fresh_pillar = self.fetch_pillar()
self.cache[self.minion_id] = {self.saltenv: fresh_pillar}
self.cache[self.minion_id] = {self.pillarenv: fresh_pillar}
log.debug('Pillar cache miss for minion {0}'.format(self.minion_id))
log.debug('Current pillar cache: {0}'.format(self.cache._dict)) # FIXME hack!
return fresh_pillar

View file

@ -6,8 +6,11 @@ from __future__ import absolute_import
# Import python libs
import os
import logging
import pickle
import logging
# Import Salt modules
import salt.utils.files
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['dummy']
@ -19,7 +22,7 @@ DETAILS = {}
DETAILS['services'] = {'apache': 'running', 'ntp': 'running', 'samba': 'stopped'}
DETAILS['packages'] = {'coreutils': '1.0', 'apache': '2.4', 'tinc': '1.4', 'redbull': '999.99'}
FILENAME = os.tmpnam()
FILENAME = salt.utils.files.mkstemp()
# Want logging!
log = logging.getLogger(__file__)

View file

@ -196,9 +196,7 @@ def __virtual__():
Only return if all the modules are available
'''
if not salt.utils.which('racadm'):
log.critical('fx2 proxy minion needs "racadm" to be installed.')
return False
return False, 'fx2 proxy minion needs "racadm" to be installed.'
return True

View file

@ -16,9 +16,21 @@ Dependencies
The ``napalm`` proxy module requires NAPALM_ library to be installed: ``pip install napalm``
Please check Installation_ for complete details.
.. _NAPALM: https://napalm.readthedocs.io
.. _Installation: https://napalm.readthedocs.io/en/latest/installation.html
.. _NAPALM: https://napalm-automation.net/
.. _Installation: http://napalm.readthedocs.io/en/latest/installation/index.html
.. note::
Beginning with Salt release 2017.7.3, it is recommended to use
``napalm`` >= ``2.0.0``. The library has been unified into a monolithic
package, as in opposite to separate packages per driver. For more details
you can check `this document <https://napalm-automation.net/reunification/>`_.
While it will still work with the old packages, bear in mind that the NAPALM
core team will maintain only the main ``napalm`` package.
Moreover, for additional capabilities, the users can always define a
library that extends NAPALM's base capabilities and configure the
``provider`` option (see below).
Pillar
------
@ -59,7 +71,7 @@ always_alive: ``True``
.. versionadded:: 2017.7.0
provider: ``napalm_base``
The module that provides the ``get_network_device`` function.
The library that provides the ``get_network_device`` function.
This option is useful when the user has more specific needs and requires
to extend the NAPALM capabilities using a private library implementation.
The only constraint is that the alternative library needs to have the
@ -129,17 +141,7 @@ from __future__ import absolute_import
import logging
log = logging.getLogger(__file__)
# Import third party lib
try:
# will try to import NAPALM
# https://github.com/napalm-automation/napalm
# pylint: disable=W0611
import napalm_base
# pylint: enable=W0611
HAS_NAPALM = True
except ImportError:
HAS_NAPALM = False
# Import Salt modules
from salt.ext import six
import salt.utils.napalm
@ -163,7 +165,7 @@ DETAILS = {}
def __virtual__():
return HAS_NAPALM or (False, 'Please install the NAPALM library: `pip install napalm`!')
return salt.utils.napalm.virtual(__opts__, 'napalm', __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported

View file

@ -75,6 +75,7 @@ from __future__ import unicode_literals
# Import salt lib
import salt.output
import salt.utils.network
from salt.ext import six
from salt.ext.six.moves import map
@ -812,7 +813,25 @@ def find(addr, best=True, display=_DEFAULT_DISPLAY):
ip = '' # pylint: disable=invalid-name
ipnet = None
results = {}
results = {
'int_net': [],
'int_descr': [],
'int_name': [],
'int_ip': [],
'int_mac': [],
'int_device': [],
'lldp_descr': [],
'lldp_int': [],
'lldp_device': [],
'lldp_mac': [],
'lldp_device_int': [],
'mac_device': [],
'mac_int': [],
'arp_device': [],
'arp_int': [],
'arp_mac': [],
'arp_ip': []
}
if isinstance(addr, int):
results['mac'] = findmac(vlan=addr, display=display)
@ -826,6 +845,8 @@ def find(addr, best=True, display=_DEFAULT_DISPLAY):
except IndexError:
# no problem, let's keep searching
pass
if salt.utils.network.is_ipv6(addr):
mac = False
if not mac:
try:
ip = napalm_helpers.convert(napalm_helpers.ip, addr) # pylint: disable=invalid-name

View file

@ -71,6 +71,12 @@ def orchestrate(mods,
)
__opts__['file_client'] = 'local'
minion = salt.minion.MasterMinion(__opts__)
if pillarenv is None and 'pillarenv' in __opts__:
pillarenv = __opts__['pillarenv']
if saltenv is None and 'saltenv' in __opts__:
saltenv = __opts__['saltenv']
running = minion.functions['state.sls'](
mods,
test,

View file

@ -13,6 +13,7 @@ import os
import re
import shlex
import stat
import string
import tarfile
from contextlib import closing
@ -765,12 +766,24 @@ def extracted(name,
return ret
urlparsed_source = _urlparse(source_match)
source_hash_basename = urlparsed_source.path or urlparsed_source.netloc
urlparsed_scheme = urlparsed_source.scheme
urlparsed_path = os.path.join(
urlparsed_source.netloc,
urlparsed_source.path).rstrip(os.sep)
source_is_local = urlparsed_source.scheme in salt.utils.files.LOCAL_PROTOS
# urlparsed_scheme will be the drive letter if this is a Windows file path
# This checks for a drive letter as the scheme and changes it to file
if urlparsed_scheme and \
urlparsed_scheme.lower() in string.ascii_lowercase:
urlparsed_path = ':'.join([urlparsed_scheme, urlparsed_path])
urlparsed_scheme = 'file'
source_hash_basename = urlparsed_path or urlparsed_source.netloc
source_is_local = urlparsed_scheme in salt.utils.files.LOCAL_PROTOS
if source_is_local:
# Get rid of "file://" from start of source_match
source_match = os.path.realpath(os.path.expanduser(urlparsed_source.path))
source_match = os.path.realpath(os.path.expanduser(urlparsed_path))
if not os.path.isfile(source_match):
ret['comment'] = 'Source file \'{0}\' does not exist'.format(
salt.utils.url.redact_http_basic_auth(source_match))

View file

@ -1496,13 +1496,8 @@ def accept_vpc_peering_connection(name=None, conn_id=None, conn_name=None,
'''
log.debug('Called state to accept VPC peering connection')
pending = __salt__['boto_vpc.is_peering_connection_pending'](
conn_id=conn_id,
conn_name=conn_name,
region=region,
key=key,
keyid=keyid,
profile=profile
)
conn_id=conn_id, conn_name=conn_name, region=region, key=key,
keyid=keyid, profile=profile)
ret = {
'name': name,
@ -1511,32 +1506,27 @@ def accept_vpc_peering_connection(name=None, conn_id=None, conn_name=None,
'comment': 'Boto VPC peering state'
}
if not pending['exists']:
if not pending:
ret['result'] = True
ret['changes'].update({
'old': 'No pending VPC peering connection found. '
'Nothing to be done.'
})
ret['changes'].update({'old':
'No pending VPC peering connection found. Nothing to be done.'})
return ret
if __opts__['test']:
ret['changes'].update({'old': 'Pending VPC peering connection found '
'and can be accepted'})
ret['changes'].update({'old':
'Pending VPC peering connection found and can be accepted'})
return ret
log.debug('Calling module to accept this VPC peering connection')
result = __salt__['boto_vpc.accept_vpc_peering_connection'](
conn_id=conn_id, name=conn_name, region=region, key=key,
fun = 'boto_vpc.accept_vpc_peering_connection'
log.debug('Calling `{0}()` to accept this VPC peering connection'.format(fun))
result = __salt__[fun](conn_id=conn_id, name=conn_name, region=region, key=key,
keyid=keyid, profile=profile)
if 'error' in result:
ret['comment'] = "Failed to request VPC peering: {0}".format(result['error'])
ret['comment'] = "Failed to accept VPC peering: {0}".format(result['error'])
ret['result'] = False
return ret
ret['changes'].update({
'old': '',
'new': result['msg']
})
ret['changes'].update({'old': '', 'new': result['msg']})
return ret

View file

@ -300,7 +300,7 @@ def present(name,
identifier
Custom-defined identifier for tracking the cron line for future crontab
edits. This defaults to the state id
edits. This defaults to the state name
special
A special keyword to specify periodicity (eg. @reboot, @hourly...).
@ -387,7 +387,7 @@ def absent(name,
identifier
Custom-defined identifier for tracking the cron line for future crontab
edits. This defaults to the state id
edits. This defaults to the state name
special
The special keyword used in the job (eg. @reboot, @hourly...).

View file

@ -19,12 +19,17 @@ DEVICE="{{name}}"
{%endif%}{% if ipaddr_end %}IPADDR_END="{{ipaddr_end}}"
{%endif%}{% if netmask %}NETMASK="{{netmask}}"
{%endif%}{% if prefix %}PREFIX="{{prefix}}"
{%endif%}{% if ipaddrs %}{% for i in ipaddrs -%}
IPADDR{{loop.index}}="{{i['ipaddr']}}"
PREFIX{{loop.index}}="{{i['prefix']}}"
{% endfor -%}
{%endif%}{% if gateway %}GATEWAY="{{gateway}}"
{%endif%}{% if enable_ipv6 %}IPV6INIT="yes"
{% if ipv6_autoconf %}IPV6_AUTOCONF="{{ipv6_autoconf}}"
{%endif%}{% if dhcpv6c %}DHCPV6C="{{dhcpv6c}}"
{%endif%}{% if ipv6addr %}IPV6ADDR="{{ipv6addr}}"
{%endif%}{% if ipv6gateway %}IPV6_DEFAULTGW="{{ipv6gateway}}"
{%endif%}{% if ipv6addrs %}IPV6ADDR_SECONDARIES="{{ ipv6addrs|join(' ') }}"
{%endif%}{% if ipv6_peerdns %}IPV6_PEERDNS="{{ipv6_peerdns}}"
{%endif%}{% if ipv6_defroute %}IPV6_DEFROUTE="{{ipv6_defroute}}"
{%endif%}{% if ipv6_peerroutes %}IPV6_PEERROUTES="{{ipv6_peerroutes}}"

View file

@ -44,7 +44,9 @@ def guess_archive_type(name):
Guess an archive type (tar, zip, or rar) by its file extension
'''
name = name.lower()
for ending in ('tar', 'tar.gz', 'tar.bz2', 'tar.xz', 'tgz', 'tbz2', 'txz',
for ending in ('tar', 'tar.gz', 'tgz',
'tar.bz2', 'tbz2', 'tbz',
'tar.xz', 'txz',
'tar.lzma', 'tlz'):
if name.endswith('.' + ending):
return 'tar'

View file

@ -30,6 +30,7 @@ import salt.utils
import salt.utils.url
import salt.fileclient
from salt.utils.odict import OrderedDict
import salt.utils.yamldumper
log = logging.getLogger(__name__)
@ -40,18 +41,6 @@ __all__ = [
GLOBAL_UUID = uuid.UUID('91633EBF-1C86-5E33-935A-28061F4B480E')
# To dump OrderedDict objects as regular dicts. Used by the yaml
# template filter.
class OrderedDictDumper(yaml.Dumper): # pylint: disable=W0232
pass
yaml.add_representer(OrderedDict,
yaml.representer.SafeRepresenter.represent_dict,
Dumper=OrderedDictDumper)
class SaltCacheLoader(BaseLoader):
'''
@ -717,8 +706,8 @@ class SerializerExtension(Extension, object):
return Markup(json.dumps(value, sort_keys=sort_keys, indent=indent).strip())
def format_yaml(self, value, flow_style=True):
yaml_txt = yaml.dump(value, default_flow_style=flow_style,
Dumper=OrderedDictDumper).strip()
yaml_txt = salt.utils.yamldumper.safe_dump(
value, default_flow_style=flow_style).strip()
if yaml_txt.endswith('\n...'):
yaml_txt = yaml_txt[:len(yaml_txt)-4]
return Markup(yaml_txt)

View file

@ -15,27 +15,39 @@ Utils for the NAPALM modules and proxy.
.. versionadded:: 2017.7.0
'''
# Import Python libs
from __future__ import absolute_import
import traceback
import logging
import importlib
from functools import wraps
log = logging.getLogger(__file__)
import salt.utils
# Import Salt libs
from salt.ext import six as six
import salt.output
import salt.utils
# Import third party lib
# Import third party libs
try:
# will try to import NAPALM
# https://github.com/napalm-automation/napalm
# pylint: disable=W0611
import napalm_base
import napalm
import napalm.base as napalm_base
# pylint: enable=W0611
HAS_NAPALM = True
HAS_NAPALM_BASE = False # doesn't matter anymore, but needed for the logic below
try:
NAPALM_MAJOR = int(napalm.__version__.split('.')[0])
except AttributeError:
NAPALM_MAJOR = 0
except ImportError:
HAS_NAPALM = False
try:
import napalm_base
HAS_NAPALM_BASE = True
except ImportError:
HAS_NAPALM_BASE = False
try:
# try importing ConnectionClosedException
@ -46,7 +58,7 @@ try:
except ImportError:
HAS_CONN_CLOSED_EXC_CLASS = False
from salt.ext import six as six
log = logging.getLogger(__file__)
def is_proxy(opts):
@ -81,7 +93,7 @@ def virtual(opts, virtualname, filename):
'''
Returns the __virtual__.
'''
if HAS_NAPALM and (is_proxy(opts) or is_minion(opts)):
if ((HAS_NAPALM and NAPALM_MAJOR >= 2) or HAS_NAPALM_BASE) and (is_proxy(opts) or is_minion(opts)):
return virtualname
else:
return (

View file

@ -174,3 +174,60 @@ def _get_reparse_data(path):
win32file.CloseHandle(fileHandle)
return reparseData
def safe_path(path, allow_path=None):
r'''
.. versionadded:: 2017.7.3
Checks that the path is safe for modification by Salt. For example, you
wouldn't want to have salt delete the contents of ``C:\Windows``. The
following directories are considered unsafe:
- C:\, D:\, E:\, etc.
- \
- C:\Windows
Args:
path (str): The path to check
allow_paths (str, list): A directory or list of directories inside of
path that may be safe. For example: ``C:\Windows\TEMP``
Returns:
bool: True if safe, otherwise False
'''
# Create regex definitions for directories that may be unsafe to modify
system_root = os.environ.get('SystemRoot', 'C:\\Windows')
deny_paths = (
r'[a-z]\:\\$', # C:\, D:\, etc
r'\\$', # \
re.escape(system_root) # C:\Windows
)
# Make allow_path a list
if allow_path and not isinstance(allow_path, list):
allow_path = [allow_path]
# Create regex definition for directories we may want to make exceptions for
allow_paths = list()
if allow_path:
for item in allow_path:
allow_paths.append(re.escape(item))
# Check the path to make sure it's not one of the bad paths
good_path = True
for d_path in deny_paths:
if re.match(d_path, path, flags=re.IGNORECASE) is not None:
# Found deny path
good_path = False
# If local_dest is one of the bad paths, check for exceptions
if not good_path:
for a_path in allow_paths:
if re.match(a_path, path, flags=re.IGNORECASE) is not None:
# Found exception
good_path = True
return good_path

View file

@ -31,6 +31,8 @@ import salt.utils
log = logging.getLogger(__name__)
ROOT_DIR = 'c:\\salt' if salt.utils.is_windows() else '/'
def zmq_version():
'''
@ -192,13 +194,13 @@ def verify_files(files, user):
return True
def verify_env(dirs, user, permissive=False, pki_dir='', skip_extra=False):
def verify_env(dirs, user, permissive=False, pki_dir='', skip_extra=False, root_dir=ROOT_DIR):
'''
Verify that the named directories are in place and that the environment
can shake the salt
'''
if salt.utils.is_windows():
return win_verify_env(dirs, permissive, pki_dir, skip_extra)
return win_verify_env(root_dir, dirs, permissive, pki_dir, skip_extra)
import pwd # after confirming not running Windows
try:
pwnam = pwd.getpwnam(user)
@ -523,18 +525,21 @@ def verify_log(opts):
log.warning('Insecure logging configuration detected! Sensitive data may be logged.')
def win_verify_env(dirs, permissive=False, pki_dir='', skip_extra=False):
def win_verify_env(path, dirs, permissive=False, pki_dir='', skip_extra=False):
'''
Verify that the named directories are in place and that the environment
can shake the salt
'''
import salt.utils.win_functions
import salt.utils.win_dacl
import salt.utils.path
# Get the root path directory where salt is installed
path = dirs[0]
while os.path.basename(path) not in ['salt', 'salt-tests-tmpdir']:
path, base = os.path.split(path)
# Make sure the file_roots is not set to something unsafe since permissions
# on that directory are reset
if not salt.utils.path.safe_path(path=path):
raise CommandExecutionError(
'`file_roots` set to a possibly unsafe location: {0}'.format(path)
)
# Create the root path directory if missing
if not os.path.isdir(path):

View file

@ -1133,9 +1133,14 @@ def get_name(principal):
try:
return win32security.LookupAccountSid(None, sid_obj)[0]
except TypeError:
raise CommandExecutionError(
'Could not find User for {0}'.format(principal))
except (pywintypes.error, TypeError) as exc:
if type(exc) == pywintypes.error:
win_error = win32api.FormatMessage(exc.winerror).rstrip('\n')
message = 'Error resolving {0} ({1})'.format(principal, win_error)
else:
message = 'Error resolving {0}'.format(principal)
raise CommandExecutionError(message)
def get_owner(obj_name):
@ -1173,7 +1178,7 @@ def get_owner(obj_name):
owner_sid = 'S-1-1-0'
else:
raise CommandExecutionError(
'Failed to set permissions: {0}'.format(exc.strerror))
'Failed to get owner: {0}'.format(exc.strerror))
return get_name(win32security.ConvertSidToStringSid(owner_sid))

View file

@ -983,7 +983,9 @@ class TestDaemon(object):
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
TMP,
],
RUNTIME_VARS.RUNNING_TESTS_USER)
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=master_opts['root_dir'],
)
cls.master_opts = master_opts
cls.minion_opts = minion_opts

View file

@ -0,0 +1,17 @@
core:
salt.state:
- tgt: 'doesnotexist*'
- sls:
- core
test-state:
salt.state:
- tgt: '*'
- sls:
- include-test
cmd.run:
salt.function:
- tgt: '*'
- arg:
- echo test

View file

@ -0,0 +1,17 @@
core:
salt.state:
- tgt: 'minion*'
- sls:
- core
test-state:
salt.state:
- tgt: '*'
- sls:
- include-test
cmd.run:
salt.function:
- tgt: '*'
- arg:
- echo test

View file

@ -0,0 +1,4 @@
ssh-file-test:
file.managed:
- name: /tmp/test
- contents: 'test'

View file

@ -61,3 +61,19 @@ class PillarModuleTest(ModuleCase):
self.assertDictContainsSubset(
{'knights': ['Lancelot', 'Galahad', 'Bedevere', 'Robin']},
get_items)
def test_pillar_command_line(self):
'''
Test to ensure when using pillar override
on command line works
'''
# test when pillar is overwriting previous pillar
overwrite = self.run_function('pillar.items', pillar={"monty":
"overwrite"})
self.assertDictContainsSubset({'monty': 'overwrite'}, overwrite)
# test when using additional pillar
additional = self.run_function('pillar.items', pillar={"new":
"additional"})
self.assertDictContainsSubset({'new': 'additional'}, additional)

View file

@ -186,7 +186,7 @@ class SaltUtilSyncPillarTest(ModuleCase):
'''))
pillar_refresh = self.run_function('saltutil.refresh_pillar')
wait = self.run_function('test.sleep', [1])
wait = self.run_function('test.sleep', [5])
post_pillar = self.run_function('pillar.raw')
self.assertIn(pillar_key, post_pillar.get(pillar_key, 'didnotwork'))

View file

@ -0,0 +1,50 @@
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import destructiveTest
# Import Salt libs
import salt.utils
@destructiveTest
class ServiceModuleTest(ModuleCase):
'''
Module testing the service module
'''
def setUp(self):
self.service_name = 'cron'
cmd_name = 'crontab'
os_family = self.run_function('grains.get', ['os_family'])
if os_family == 'RedHat':
self.service_name = 'crond'
elif os_family == 'Arch':
self.service_name = 'systemd-journald'
cmd_name = 'systemctl'
if salt.utils.which(cmd_name) is None:
self.skipTest('{0} is not installed'.format(cmd_name))
def test_service_status_running(self):
'''
test service.status execution module
when service is running
'''
start_service = self.run_function('service.start', [self.service_name])
check_service = self.run_function('service.status', [self.service_name])
self.assertTrue(check_service)
def test_service_status_dead(self):
'''
test service.status execution module
when service is dead
'''
stop_service = self.run_function('service.stop', [self.service_name])
check_service = self.run_function('service.status', [self.service_name])
self.assertFalse(check_service)

View file

@ -80,6 +80,57 @@ class StateRunnerTest(ShellCase):
self.assertFalse(os.path.exists('/tmp/ewu-2016-12-13'))
self.assertNotEqual(code, 0)
def test_orchestrate_target_exists(self):
'''
test orchestration when target exists
while using multiple states
'''
ret = self.run_run('state.orchestrate orch.target-exists')
first = [' ID: core',
' Function: salt.state',
' Result: True']
second = [' ID: test-state',
' Function: salt.state',
' Result: True']
third = [' ID: cmd.run',
' Function: salt.function',
' Result: True']
ret_out = [first, second, third]
for out in ret_out:
for item in out:
self.assertIn(item, ret)
def test_orchestrate_target_doesnt_exists(self):
'''
test orchestration when target doesnt exist
while using multiple states
'''
ret = self.run_run('state.orchestrate orch.target-doesnt-exists')
first = ['No minions matched the target. No command was sent, no jid was assigned.',
' ID: core',
' Function: salt.state',
' Result: False']
second = [' ID: test-state',
' Function: salt.state',
' Result: True']
third = [' ID: cmd.run',
' Function: salt.function',
' Result: True']
ret_out = [first, second, third]
for out in ret_out:
for item in out:
self.assertIn(item, ret)
def test_state_event(self):
'''
test to ensure state.event

View file

@ -39,8 +39,14 @@ class SPMBuildTest(SPMCase, ModuleCase):
@skipIf(salt.utils.which('fallocate') is None, 'fallocate not installed')
def test_spm_build_big_file(self):
'''
test spm build
test spm build with a big file
'''
# check to make sure there is enough space to run this test
check_space = self.run_function('status.diskusage', ['/'])
space = check_space['/']['available']
if space < 2000000:
self.skipTest('Not enough space on host to run this test')
big_file = self.run_function('cmd.run',
['fallocate -l 1G {0}'.format(os.path.join(self.formula_sls_dir,
'bigfile.txt'))])
@ -56,7 +62,7 @@ class SPMBuildTest(SPMCase, ModuleCase):
def test_spm_build_exclude(self):
'''
test spm build
test spm build while excluding directory
'''
git_dir = os.path.join(self.formula_sls_dir, '.git')
os.makedirs(git_dir)

View file

@ -6,11 +6,9 @@ salt-ssh testing
from __future__ import absolute_import
# Import salt testing libs
from tests.support.unit import skipIf
from tests.support.case import SSHCase
@skipIf(True, 'Not ready for production')
class SSHTest(SSHCase):
'''
Test general salt-ssh functionality

View file

@ -0,0 +1,25 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.case import SSHCase
from tests.support.unit import skipIf
# Import Salt Libs
import salt.utils
@skipIf(salt.utils.is_windows(), 'salt-ssh not available on Windows')
class SSHGrainsTest(SSHCase):
'''
testing grains with salt-ssh
'''
def test_grains_items(self):
'''
test grains.items with salt-ssh
'''
ret = self.run_function('grains.items')
self.assertEqual(ret['kernel'], 'Linux')
self.assertTrue(isinstance(ret, dict))

View file

@ -0,0 +1,25 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.case import SSHCase
from tests.support.unit import skipIf
# Import Salt Libs
import salt.utils
@skipIf(salt.utils.is_windows(), 'salt-ssh not available on Windows')
class SSHRawTest(SSHCase):
'''
testing salt-ssh with raw calls
'''
def test_ssh_raw(self):
'''
test salt-ssh with -r argument
'''
msg = 'running raw msg'
ret = self.run_function('echo {0}'.format(msg), raw=True)
self.assertEqual(ret['stdout'], msg + '\n')

View file

@ -0,0 +1,161 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import os
import shutil
# Import Salt Testing Libs
from tests.support.case import SSHCase
from tests.support.paths import TMP
# Import Salt Libs
from salt.ext import six
SSH_SLS = 'ssh_state_tests'
SSH_SLS_FILE = '/tmp/test'
class SSHStateTest(SSHCase):
'''
testing the state system with salt-ssh
'''
def _check_dict_ret(self, ret, val, exp_ret):
for key, value in ret.items():
self.assertEqual(value[val], exp_ret)
def _check_request(self, empty=False):
check = self.run_function('state.check_request', wipe=False)
if empty:
self.assertFalse(bool(check))
else:
self._check_dict_ret(ret=check['default']['test_run']['local']['return'],
val='__sls__', exp_ret=SSH_SLS)
def test_state_apply(self):
'''
test state.apply with salt-ssh
'''
ret = self.run_function('state.apply', [SSH_SLS])
self._check_dict_ret(ret=ret, val='__sls__', exp_ret=SSH_SLS)
check_file = self.run_function('file.file_exists', ['/tmp/test'])
self.assertTrue(check_file)
def test_state_show_sls(self):
'''
test state.show_sls with salt-ssh
'''
ret = self.run_function('state.show_sls', [SSH_SLS])
self._check_dict_ret(ret=ret, val='__sls__', exp_ret=SSH_SLS)
check_file = self.run_function('file.file_exists', [SSH_SLS_FILE], wipe=False)
self.assertFalse(check_file)
def test_state_show_top(self):
'''
test state.show_top with salt-ssh
'''
ret = self.run_function('state.show_top')
self.assertEqual(ret, {u'base': [u'master_tops_test', u'core']})
def test_state_single(self):
'''
state.single with salt-ssh
'''
ret_out = {'name': 'itworked',
'result': True,
'comment': 'Success!'}
single = self.run_function('state.single',
['test.succeed_with_changes name=itworked'])
for key, value in six.iteritems(single):
self.assertEqual(value['name'], ret_out['name'])
self.assertEqual(value['result'], ret_out['result'])
self.assertEqual(value['comment'], ret_out['comment'])
def test_show_highstate(self):
'''
state.show_highstate with salt-ssh
'''
high = self.run_function('state.show_highstate')
destpath = os.path.join(TMP, 'testfile')
self.assertTrue(isinstance(high, dict))
self.assertTrue(destpath in high)
self.assertEqual(high[destpath]['__env__'], 'base')
def test_state_high(self):
'''
state.high with salt-ssh
'''
ret_out = {'name': 'itworked',
'result': True,
'comment': 'Success!'}
high = self.run_function('state.high', ['"{"itworked": {"test": ["succeed_with_changes"]}}"'])
for key, value in six.iteritems(high):
self.assertEqual(value['name'], ret_out['name'])
self.assertEqual(value['result'], ret_out['result'])
self.assertEqual(value['comment'], ret_out['comment'])
def test_show_lowstate(self):
'''
state.show_lowstate with salt-ssh
'''
low = self.run_function('state.show_lowstate')
self.assertTrue(isinstance(low, list))
self.assertTrue(isinstance(low[0], dict))
def test_state_low(self):
'''
state.low with salt-ssh
'''
ret_out = {'name': 'itworked',
'result': True,
'comment': 'Success!'}
low = self.run_function('state.low', ['"{"state": "test", "fun": "succeed_with_changes", "name": "itworked"}"'])
for key, value in six.iteritems(low):
self.assertEqual(value['name'], ret_out['name'])
self.assertEqual(value['result'], ret_out['result'])
self.assertEqual(value['comment'], ret_out['comment'])
def test_state_request_check_clear(self):
'''
test state.request system with salt-ssh
while also checking and clearing request
'''
request = self.run_function('state.request', [SSH_SLS], wipe=False)
self._check_dict_ret(ret=request, val='__sls__', exp_ret=SSH_SLS)
self._check_request()
clear = self.run_function('state.clear_request', wipe=False)
self._check_request(empty=True)
def test_state_run_request(self):
'''
test state.request system with salt-ssh
while also running the request later
'''
request = self.run_function('state.request', [SSH_SLS], wipe=False)
self._check_dict_ret(ret=request, val='__sls__', exp_ret=SSH_SLS)
run = self.run_function('state.run_request', wipe=False)
check_file = self.run_function('file.file_exists', [SSH_SLS_FILE], wipe=False)
self.assertTrue(check_file)
def tearDown(self):
'''
make sure to clean up any old ssh directories
'''
salt_dir = self.run_function('config.get', ['thin_dir'], wipe=False)
if os.path.exists(salt_dir):
shutil.rmtree(salt_dir)
if os.path.exists(SSH_SLS_FILE):
os.remove(SSH_SLS_FILE)

View file

@ -7,7 +7,6 @@ from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.helpers import destructiveTest
from tests.support.mixins import SaltReturnAssertsMixin
@ -15,32 +14,58 @@ from tests.support.mixins import SaltReturnAssertsMixin
import salt.utils
INIT_DELAY = 5
SERVICE_NAME = 'crond'
@destructiveTest
@skipIf(salt.utils.which('crond') is None, 'crond not installed')
class ServiceTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the service state
'''
def setUp(self):
self.service_name = 'cron'
cmd_name = 'crontab'
os_family = self.run_function('grains.get', ['os_family'])
if os_family == 'RedHat':
self.service_name = 'crond'
elif os_family == 'Arch':
self.service_name = 'systemd-journald'
cmd_name = 'systemctl'
if salt.utils.which(cmd_name) is None:
self.skipTest('{0} is not installed'.format(cmd_name))
def check_service_status(self, exp_return):
'''
helper method to check status of service
'''
check_status = self.run_function('service.status', name=SERVICE_NAME)
check_status = self.run_function('service.status',
name=self.service_name)
if check_status is not exp_return:
self.fail('status of service is not returning correctly')
def test_service_running(self):
'''
test service.running state module
'''
stop_service = self.run_function('service.stop', self.service_name)
self.assertTrue(stop_service)
self.check_service_status(False)
start_service = self.run_state('service.running',
name=self.service_name)
self.assertTrue(start_service)
self.check_service_status(True)
def test_service_dead(self):
'''
test service.dead state module
'''
start_service = self.run_state('service.running', name=SERVICE_NAME)
start_service = self.run_state('service.running',
name=self.service_name)
self.assertSaltTrueReturn(start_service)
self.check_service_status(True)
ret = self.run_state('service.dead', name=SERVICE_NAME)
ret = self.run_state('service.dead', name=self.service_name)
self.assertSaltTrueReturn(ret)
self.check_service_status(False)
@ -48,11 +73,12 @@ class ServiceTest(ModuleCase, SaltReturnAssertsMixin):
'''
test service.dead state module with init_delay arg
'''
start_service = self.run_state('service.running', name=SERVICE_NAME)
start_service = self.run_state('service.running',
name=self.service_name)
self.assertSaltTrueReturn(start_service)
self.check_service_status(True)
ret = self.run_state('service.dead', name=SERVICE_NAME,
ret = self.run_state('service.dead', name=self.service_name,
init_delay=INIT_DELAY)
self.assertSaltTrueReturn(ret)
self.check_service_status(False)

View file

@ -130,6 +130,9 @@ TEST_SUITES = {
'returners':
{'display_name': 'Returners',
'path': 'integration/returners'},
'ssh-int':
{'display_name': 'SSH Integration',
'path': 'integration/ssh'},
'spm':
{'display_name': 'SPM',
'path': 'integration/spm'},
@ -412,6 +415,14 @@ class SaltTestsuiteParser(SaltCoverageTestingParser):
'SSH server on your machine. In certain environments, this '
'may be insecure! Default: False'
)
self.test_selection_group.add_option(
'--ssh-int',
dest='ssh-int',
action='store_true',
default=False,
help='Run salt-ssh integration tests. Requires to be run with --ssh'
'to spin up the SSH server on your machine.'
)
self.test_selection_group.add_option(
'-A',
'--api',

View file

@ -124,11 +124,14 @@ class ShellTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
def run_ssh(self, arg_str, with_retcode=False, timeout=25, catch_stderr=False):
def run_ssh(self, arg_str, with_retcode=False, timeout=25,
catch_stderr=False, wipe=False, raw=False):
'''
Execute salt-ssh
'''
arg_str = '-c {0} -i --priv {1} --roster-file {2} localhost {3} --out=json'.format(
arg_str = '{0} {1} -c {2} -i --priv {3} --roster-file {4} localhost {5} --out=json'.format(
' -W' if wipe else '',
' -r' if raw else '',
self.get_config_dir(),
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test'),
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'roster'),
@ -453,11 +456,14 @@ class ShellCase(ShellTestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixi
catch_stderr=catch_stderr,
timeout=timeout)
def run_ssh(self, arg_str, with_retcode=False, catch_stderr=False, timeout=60): # pylint: disable=W0221
def run_ssh(self, arg_str, with_retcode=False, catch_stderr=False,
timeout=60, wipe=True, raw=False): # pylint: disable=W0221
'''
Execute salt-ssh
'''
arg_str = '-ldebug -W -c {0} -i --priv {1} --roster-file {2} --out=json localhost {3}'.format(
arg_str = '{0} -ldebug{1} -c {2} -i --priv {3} --roster-file {4} --out=json localhost {5}'.format(
' -W' if wipe else '',
' -r' if raw else '',
self.get_config_dir(),
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test'),
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'roster'),
@ -797,11 +803,12 @@ class SSHCase(ShellCase):
def _arg_str(self, function, arg):
return '{0} {1}'.format(function, ' '.join(arg))
def run_function(self, function, arg=(), timeout=90, **kwargs):
def run_function(self, function, arg=(), timeout=90, wipe=True, raw=False, **kwargs):
'''
We use a 90s timeout here, which some slower systems do end up needing
'''
ret = self.run_ssh(self._arg_str(function, arg), timeout=timeout)
ret = self.run_ssh(self._arg_str(function, arg), timeout=timeout,
wipe=wipe, raw=raw)
try:
return json.loads(ret)['localhost']
except Exception:

View file

@ -107,7 +107,9 @@ class AdaptedConfigurationTestCaseMixin(object):
rdict['sock_dir'],
conf_dir
],
RUNTIME_VARS.RUNNING_TESTS_USER)
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=rdict['root_dir'],
)
rdict['config_dir'] = conf_dir
rdict['conf_file'] = os.path.join(conf_dir, config_for)

View file

@ -16,6 +16,7 @@ from tests.support.mock import (
patch)
# Import Salt Libs
from salt.ext.six.moves import range
import salt.modules.rh_ip as rh_ip
# Import 3rd-party libs
@ -60,7 +61,6 @@ class RhipTestCase(TestCase, LoaderModuleMockMixin):
'''
with patch.dict(rh_ip.__grains__, {'os': 'Fedora'}):
with patch.object(rh_ip, '_raise_error_iface', return_value=None):
self.assertRaises(AttributeError,
rh_ip.build_interface,
'iface', 'slave', True)
@ -70,34 +70,53 @@ class RhipTestCase(TestCase, LoaderModuleMockMixin):
rh_ip.build_interface,
'iface', 'eth', True, netmask='255.255.255.255', prefix=32,
test=True)
self.assertRaises(AttributeError,
rh_ip.build_interface,
'iface', 'eth', True, ipaddrs=['A'],
test=True)
self.assertRaises(AttributeError,
rh_ip.build_interface,
'iface', 'eth', True, ipv6addrs=['A'],
test=True)
with patch.object(rh_ip, '_parse_settings_bond', MagicMock()):
mock = jinja2.exceptions.TemplateNotFound('foo')
with patch.object(jinja2.Environment,
'get_template',
MagicMock(side_effect=mock)):
self.assertEqual(rh_ip.build_interface('iface',
'vlan',
True), '')
with patch.object(rh_ip, '_read_temp', return_value='A'):
for osrelease in range(5, 8):
with patch.dict(rh_ip.__grains__, {'os': 'RedHat', 'osrelease': str(osrelease)}):
with patch.object(rh_ip, '_raise_error_iface', return_value=None):
with patch.object(rh_ip, '_parse_settings_bond', MagicMock()):
mock = jinja2.exceptions.TemplateNotFound('foo')
with patch.object(jinja2.Environment,
'get_template', MagicMock()):
'get_template',
MagicMock(side_effect=mock)):
self.assertEqual(rh_ip.build_interface('iface',
'vlan',
True,
test='A'),
'A')
True), '')
with patch.object(rh_ip, '_write_file_iface',
return_value=None):
with patch.object(os.path, 'join',
return_value='A'):
with patch.object(rh_ip, '_read_file',
with patch.object(rh_ip, '_read_temp', return_value='A'):
with patch.object(jinja2.Environment,
'get_template', MagicMock()):
self.assertEqual(rh_ip.build_interface('iface',
'vlan',
True,
test='A'),
'A')
with patch.object(rh_ip, '_write_file_iface',
return_value=None):
with patch.object(os.path, 'join',
return_value='A'):
self.assertEqual(rh_ip.build_interface
('iface', 'vlan',
True), 'A')
with patch.object(rh_ip, '_read_file',
return_value='A'):
self.assertEqual(rh_ip.build_interface
('iface', 'vlan',
True), 'A')
if osrelease > 6:
with patch.dict(rh_ip.__salt__, {'network.interfaces': lambda: {'eth': True}}):
self.assertEqual(rh_ip.build_interface
('iface', 'eth', True,
ipaddrs=['127.0.0.1/8']), 'A')
self.assertEqual(rh_ip.build_interface
('iface', 'eth', True,
ipv6addrs=['fc00::1/128']), 'A')
def test_build_routes(self):
'''

View file

@ -21,6 +21,12 @@ from tests.support.mock import (
# Import Salt Libs
import salt.modules.win_dns_client as win_dns_client
try:
import wmi
HAS_WMI = True
except ImportError:
HAS_WMI = False
class Mockwmi(object):
'''
@ -59,6 +65,7 @@ class Mockwinapi(object):
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(not HAS_WMI, 'WMI only available on Windows')
class WinDnsClientTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.win_dns_client
@ -66,16 +73,13 @@ class WinDnsClientTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
# wmi and pythoncom modules are platform specific...
wmi = types.ModuleType('wmi')
pythoncom = types.ModuleType('pythoncom')
sys_modules_patcher = patch.dict('sys.modules', {'wmi': wmi, 'pythoncom': pythoncom})
mock_pythoncom = types.ModuleType('pythoncom')
sys_modules_patcher = patch.dict('sys.modules',
{'pythoncom': mock_pythoncom})
sys_modules_patcher.start()
self.addCleanup(sys_modules_patcher.stop)
self.WMI = Mock()
self.addCleanup(delattr, self, 'WMI')
wmi.WMI = Mock(return_value=self.WMI)
pythoncom.CoInitialize = Mock()
pythoncom.CoUninitialize = Mock()
return {win_dns_client: {'wmi': wmi}}
# 'get_dns_servers' function tests: 1
@ -90,7 +94,8 @@ class WinDnsClientTestCase(TestCase, LoaderModuleMockMixin):
patch.object(self.WMI, 'Win32_NetworkAdapter',
return_value=[Mockwmi()]), \
patch.object(self.WMI, 'Win32_NetworkAdapterConfiguration',
return_value=[Mockwmi()]):
return_value=[Mockwmi()]), \
patch.object(wmi, 'WMI', Mock(return_value=self.WMI)):
self.assertListEqual(win_dns_client.get_dns_servers
('Local Area Connection'),
['10.1.1.10'])
@ -113,23 +118,22 @@ class WinDnsClientTestCase(TestCase, LoaderModuleMockMixin):
'''
Test if it add the DNS server to the network interface.
'''
with patch('salt.utils.winapi.Com', MagicMock()):
with patch.object(self.WMI, 'Win32_NetworkAdapter',
return_value=[Mockwmi()]):
with patch.object(self.WMI, 'Win32_NetworkAdapterConfiguration',
return_value=[Mockwmi()]):
self.assertFalse(win_dns_client.add_dns('10.1.1.10',
'Ethernet'))
with patch('salt.utils.winapi.Com', MagicMock()), \
patch.object(self.WMI, 'Win32_NetworkAdapter',
return_value=[Mockwmi()]), \
patch.object(self.WMI, 'Win32_NetworkAdapterConfiguration',
return_value=[Mockwmi()]), \
patch.object(wmi, 'WMI', Mock(return_value=self.WMI)):
self.assertFalse(win_dns_client.add_dns('10.1.1.10', 'Ethernet'))
self.assertTrue(win_dns_client.add_dns
('10.1.1.10', 'Local Area Connection'))
self.assertTrue(win_dns_client.add_dns('10.1.1.10', 'Local Area Connection'))
with patch.object(win_dns_client, 'get_dns_servers',
MagicMock(return_value=['10.1.1.10'])):
with patch.dict(win_dns_client.__salt__,
{'cmd.retcode': MagicMock(return_value=0)}):
self.assertTrue(win_dns_client.add_dns('10.1.1.0',
'Local Area Connection'))
MagicMock(return_value=['10.1.1.10'])), \
patch.dict(win_dns_client.__salt__,
{'cmd.retcode': MagicMock(return_value=0)}), \
patch.object(wmi, 'WMI', Mock(return_value=self.WMI)):
self.assertTrue(win_dns_client.add_dns('10.1.1.0', 'Local Area Connection'))
# 'dns_dhcp' function tests: 1
@ -148,9 +152,10 @@ class WinDnsClientTestCase(TestCase, LoaderModuleMockMixin):
'''
Test if it get the type of DNS configuration (dhcp / static)
'''
with patch('salt.utils.winapi.Com', MagicMock()):
with patch.object(self.WMI, 'Win32_NetworkAdapter',
return_value=[Mockwmi()]):
with patch.object(self.WMI, 'Win32_NetworkAdapterConfiguration',
return_value=[Mockwmi()]):
self.assertTrue(win_dns_client.get_dns_config())
with patch('salt.utils.winapi.Com', MagicMock()), \
patch.object(self.WMI, 'Win32_NetworkAdapter',
return_value=[Mockwmi()]), \
patch.object(self.WMI, 'Win32_NetworkAdapterConfiguration',
return_value=[Mockwmi()]), \
patch.object(wmi, 'WMI', Mock(return_value=self.WMI)):
self.assertTrue(win_dns_client.get_dns_config())

View file

@ -5,7 +5,6 @@
# Import Python Libs
from __future__ import absolute_import
import types
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
@ -22,6 +21,12 @@ from tests.support.mock import (
import salt.utils
import salt.modules.win_network as win_network
try:
import wmi
HAS_WMI = True
except ImportError:
HAS_WMI = False
class Mockwmi(object):
'''
@ -64,12 +69,9 @@ class WinNetworkTestCase(TestCase, LoaderModuleMockMixin):
Test cases for salt.modules.win_network
'''
def setup_loader_modules(self):
# wmi modules are platform specific...
wmi = types.ModuleType('wmi')
self.WMI = Mock()
self.addCleanup(delattr, self, 'WMI')
wmi.WMI = Mock(return_value=self.WMI)
return {win_network: {'wmi': wmi}}
return {win_network: {}}
# 'ping' function tests: 1
@ -156,6 +158,7 @@ class WinNetworkTestCase(TestCase, LoaderModuleMockMixin):
# 'interfaces_names' function tests: 1
@skipIf(not HAS_WMI, "WMI only available on Windows")
def test_interfaces_names(self):
'''
Test if it return a list of all the interfaces names
@ -164,7 +167,8 @@ class WinNetworkTestCase(TestCase, LoaderModuleMockMixin):
with patch('salt.utils.winapi.Com', MagicMock()), \
patch.object(self.WMI, 'Win32_NetworkAdapter',
return_value=[Mockwmi()]), \
patch('salt.utils', Mockwinapi):
patch('salt.utils', Mockwinapi), \
patch.object(wmi, 'WMI', Mock(return_value=self.WMI)):
self.assertListEqual(win_network.interfaces_names(),
['Ethernet'])

View file

@ -3,7 +3,6 @@
# Import python libs
from __future__ import absolute_import
import sys
import types
# Import Salt libs
import salt.ext.six as six
@ -12,25 +11,16 @@ import salt.ext.six as six
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, Mock, patch, ANY
# wmi and pythoncom modules are platform specific...
wmi = types.ModuleType('wmi')
sys.modules['wmi'] = wmi
pythoncom = types.ModuleType('pythoncom')
sys.modules['pythoncom'] = pythoncom
if NO_MOCK is False:
WMI = Mock()
wmi.WMI = Mock(return_value=WMI)
pythoncom.CoInitialize = Mock()
pythoncom.CoUninitialize = Mock()
try:
import wmi
except ImportError:
pass
# This is imported late so mock can do its job
import salt.modules.win_status as status
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(sys.stdin.encoding != 'UTF-8', 'UTF-8 encoding required for this test is not supported')
@skipIf(status.HAS_WMI is False, 'This test requires Windows')
class TestProcsBase(TestCase):
def __init__(self, *args, **kwargs):
@ -55,8 +45,10 @@ class TestProcsBase(TestCase):
self.__processes.append(process)
def call_procs(self):
WMI = Mock()
WMI.win32_process = Mock(return_value=self.__processes)
self.result = status.procs()
with patch.object(wmi, 'WMI', Mock(return_value=WMI)):
self.result = status.procs()
class TestProcsCount(TestProcsBase):
@ -101,6 +93,7 @@ class TestProcsAttributes(TestProcsBase):
self.assertEqual(self.proc['user_domain'], self._expected_domain)
@skipIf(sys.stdin.encoding != 'UTF-8', 'UTF-8 encoding required for this test is not supported')
class TestProcsUnicodeAttributes(TestProcsBase):
def setUp(self):
unicode_str = u'\xc1'

View file

@ -20,6 +20,7 @@ from tests.support.mock import (
# Import Salt Libs
import salt.states.archive as archive
from salt.ext.six.moves import zip # pylint: disable=import-error,redefined-builtin
import salt.utils
def _isfile_side_effect(path):
@ -33,10 +34,13 @@ def _isfile_side_effect(path):
'''
return {
'/tmp/foo.tar.gz': True,
'c:\\tmp\\foo.tar.gz': True,
'/tmp/out': False,
'\\tmp\\out': False,
'/usr/bin/tar': True,
'/bin/tar': True,
'/tmp/test_extracted_tar': False,
'c:\\tmp\\test_extracted_tar': False,
}[path]
@ -59,8 +63,12 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin):
archive.extracted tar options
'''
source = '/tmp/foo.tar.gz'
tmp_dir = '/tmp/test_extracted_tar'
if salt.utils.is_windows():
source = 'c:\\tmp\\foo.tar.gz'
tmp_dir = 'c:\\tmp\\test_extracted_tar'
else:
source = '/tmp/foo.tar.gz'
tmp_dir = '/tmp/test_extracted_tar'
test_tar_opts = [
'--no-anchored foo',
'v -p --opt',
@ -94,25 +102,24 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(archive.__opts__, {'test': False,
'cachedir': tmp_dir,
'hash_type': 'sha256'}):
with patch.dict(archive.__salt__, {'file.directory_exists': mock_false,
'file.file_exists': mock_false,
'state.single': state_single_mock,
'file.makedirs': mock_true,
'cmd.run_all': mock_run,
'archive.list': list_mock,
'file.source_list': mock_source_list}):
with patch.dict(archive.__states__, {'file.directory': mock_true}):
with patch.object(os.path, 'isfile', isfile_mock):
for test_opts, ret_opts in zip(test_tar_opts, ret_tar_opts):
ret = archive.extracted(tmp_dir,
source,
options=test_opts,
enforce_toplevel=False)
ret_opts.append(source)
mock_run.assert_called_with(ret_opts,
cwd=tmp_dir + os.sep,
python_shell=False)
'hash_type': 'sha256'}),\
patch.dict(archive.__salt__, {'file.directory_exists': mock_false,
'file.file_exists': mock_false,
'state.single': state_single_mock,
'file.makedirs': mock_true,
'cmd.run_all': mock_run,
'archive.list': list_mock,
'file.source_list': mock_source_list}),\
patch.dict(archive.__states__, {'file.directory': mock_true}),\
patch.object(os.path, 'isfile', isfile_mock),\
patch('salt.utils.which', MagicMock(return_value=True)):
for test_opts, ret_opts in zip(test_tar_opts, ret_tar_opts):
archive.extracted(tmp_dir, source, options=test_opts,
enforce_toplevel=False)
ret_opts.append(source)
mock_run.assert_called_with(ret_opts, cwd=tmp_dir + os.sep,
python_shell=False)
def test_tar_gnutar(self):
'''
@ -142,15 +149,16 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin):
'file.makedirs': mock_true,
'cmd.run_all': run_all,
'archive.list': list_mock,
'file.source_list': mock_source_list}):
with patch.dict(archive.__states__, {'file.directory': mock_true}):
with patch.object(os.path, 'isfile', isfile_mock):
ret = archive.extracted('/tmp/out',
source,
options='xvzf',
enforce_toplevel=False,
keep=True)
self.assertEqual(ret['changes']['extracted_files'], 'stdout')
'file.source_list': mock_source_list}),\
patch.dict(archive.__states__, {'file.directory': mock_true}),\
patch.object(os.path, 'isfile', isfile_mock),\
patch('salt.utils.which', MagicMock(return_value=True)):
ret = archive.extracted(os.path.join(os.sep + 'tmp', 'out'),
source,
options='xvzf',
enforce_toplevel=False,
keep=True)
self.assertEqual(ret['changes']['extracted_files'], 'stdout')
def test_tar_bsdtar(self):
'''
@ -180,12 +188,13 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin):
'file.makedirs': mock_true,
'cmd.run_all': run_all,
'archive.list': list_mock,
'file.source_list': mock_source_list}):
with patch.dict(archive.__states__, {'file.directory': mock_true}):
with patch.object(os.path, 'isfile', isfile_mock):
ret = archive.extracted('/tmp/out',
source,
options='xvzf',
enforce_toplevel=False,
keep=True)
self.assertEqual(ret['changes']['extracted_files'], 'stderr')
'file.source_list': mock_source_list}),\
patch.dict(archive.__states__, {'file.directory': mock_true}),\
patch.object(os.path, 'isfile', isfile_mock),\
patch('salt.utils.which', MagicMock(return_value=True)):
ret = archive.extracted(os.path.join(os.sep + 'tmp', 'out'),
source,
options='xvzf',
enforce_toplevel=False,
keep=True)
self.assertEqual(ret['changes']['extracted_files'], 'stderr')

View file

@ -561,7 +561,6 @@ class TestCustomExtensions(TestCase):
# type of the rendered variable (should be unicode, which is the same as
# six.text_type). This should cover all use cases but also allow the test
# to pass on CentOS 6 running Python 2.7.
self.assertIn('!!python/unicode', rendered)
self.assertIn('str value', rendered)
self.assertIsInstance(rendered, six.text_type)

View file

@ -7,6 +7,7 @@
# Import Python libs
from __future__ import absolute_import
import os
import re
# Import Salt Testing libs
from tests.support.unit import TestCase
@ -44,7 +45,7 @@ class DocTestCase(TestCase):
salt_dir += '/'
cmd = 'grep -r :doc: ' + salt_dir
grep_call = salt.modules.cmdmod.run_stdout(cmd=cmd).split('\n')
grep_call = salt.modules.cmdmod.run_stdout(cmd=cmd).split(os.linesep)
test_ret = {}
for line in grep_call:
@ -52,12 +53,10 @@ class DocTestCase(TestCase):
if line.startswith('Binary'):
continue
if salt.utils.is_windows():
# Need the space after the colon so it doesn't split the drive
# letter
key, val = line.split(': ', 1)
else:
key, val = line.split(':', 1)
# Only split on colons not followed by a '\' as is the case with
# Windows Drives
regex = re.compile(r':(?!\\)')
key, val = regex.split(line, 1)
# Don't test man pages, this file,
# the page that documents to not use ":doc:", or

View file

@ -19,6 +19,7 @@ from tests.support.unit import TestCase, skipIf
from tests.support.paths import TMP, CODE_DIR
# Import salt libs
import salt.utils
from salt.utils import cloud
GPG_KEYDIR = os.path.join(TMP, 'gpg-keydir')
@ -123,6 +124,7 @@ class CloudUtilsTestCase(TestCase):
# we successful pass the place with os.write(tmpfd, ...
self.assertNotEqual("a bytes-like object is required, not 'str'", str(context.exception))
@skipIf(salt.utils.is_windows(), 'Not applicable to Windows')
def test_check_key_path_and_mode(self):
with tempfile.NamedTemporaryFile() as f:
key_file = f.name

View file

@ -111,7 +111,7 @@ class TestVerify(TestCase):
def test_verify_env(self):
root_dir = tempfile.mkdtemp(dir=TMP)
var_dir = os.path.join(root_dir, 'var', 'log', 'salt')
verify_env([var_dir], getpass.getuser())
verify_env([var_dir], getpass.getuser(), root_dir=root_dir)
self.assertTrue(os.path.exists(var_dir))
dir_stat = os.stat(var_dir)
self.assertEqual(dir_stat.st_uid, os.getuid())