Merge branch '2016.11' into 'develop'

Conflicts:
  - doc/ref/runners/all/index.rst
  - salt/modules/win_lgpo.py
  - salt/states/dockerio.py
This commit is contained in:
rallytime 2017-02-01 16:36:45 -07:00
commit eb61788f87
29 changed files with 509 additions and 399 deletions

View file

@ -26,6 +26,7 @@ Full list of Salt Cloud modules
opennebula
openstack
parallels
profitbricks
proxmox
pyrax
qingcloud
@ -36,3 +37,4 @@ Full list of Salt Cloud modules
softlayer_hw
virtualbox
vmware
vultrpy

View file

@ -11,6 +11,7 @@ engine modules
:template: autosummary.rst.tmpl
docker_events
hipchat
http_logstash
ircbot
logentries

View file

@ -0,0 +1,15 @@
.. _all-salt_executors:
=================
executors modules
=================
.. currentmodule:: salt.executors
.. autosummary::
:toctree:
:template: autosummary.rst.tmpl
direct_call
splay
sudo

View file

@ -8,7 +8,9 @@ execution modules
.. toctree::
salt.modules.group
salt.modules.pkg
salt.modules.user
.. currentmodule:: salt.modules
@ -40,6 +42,7 @@ execution modules
boto_cfn
boto_cloudtrail
boto_cloudwatch
boto_cloudwatch_event
boto_cognitoidentity
boto_datapipeline
boto_dynamodb
@ -53,6 +56,7 @@ execution modules
boto_lambda
boto_rds
boto_route53
boto_s3_bucket
boto_secgroup
boto_sns
boto_sqs
@ -64,10 +68,13 @@ execution modules
cabal
cassandra
cassandra_cql
celery
ceph
chassis
chef
chocolatey
chronos
cisconso
cloud
cmdmod
composer
@ -132,7 +139,6 @@ execution modules
gnomedesktop
gpg
grains
group
groupadd
grub_legacy
guestfs
@ -149,13 +155,18 @@ execution modules
img
incron
influx
influx08
infoblox
ini_manage
inspectlib
inspectlib.collector
inspectlib.dbhandle
inspectlib.entities
inspectlib.exceptions
inspectlib.fsdb
inspectlib.kiwiproc
inspectlib.query
inspector
introspect
ipmi
ipset
@ -199,7 +210,6 @@ execution modules
mac_service
mac_shadow
mac_softwareupdate
mac_user
mac_sysctl
mac_system
mac_timezone
@ -251,6 +261,7 @@ execution modules
openbsdrcctl
openbsdservice
openstack_config
openstack_mng
openvswitch
opkg
oracle
@ -298,6 +309,7 @@ execution modules
redismod
reg
rest_package
rest_sample_utils
rest_service
restartcheck
ret
@ -323,6 +335,7 @@ execution modules
sensors
serverdensity_device
service
servicenow
shadow
slack_notify
slsutil
@ -367,6 +380,7 @@ execution modules
telemetry
temp
test
testinframod
test_virtual
timezone
tls
@ -378,7 +392,6 @@ execution modules
udev
upstart
uptime
user
useradd
uwsgi
varnish
@ -400,16 +413,21 @@ execution modules
win_groupadd
win_iis
win_ip
win_lgpo
win_license
win_network
win_ntp
win_path
win_pkg
win_pki
win_powercfg
win_psget
win_repo
win_servermanager
win_service
win_shadow
win_smtp_server
win_snmp
win_status
win_system
win_task
@ -419,7 +437,7 @@ execution modules
win_wua
x509
xapi
xbps-pkg
xbpspkg
xfs
xmpp
yumpkg

View file

@ -1,5 +0,0 @@
salt.modules.xbps-pkg module
============================
.. automodule:: salt.modules.xbps-pkg
:members:

View file

@ -20,6 +20,7 @@ Follow one of the below links for further information and examples
no_out
no_return
overstatestage
pony
pprint_out
progress
raw

View file

@ -16,6 +16,7 @@ pillar modules
cobbler
confidant
consul_pillar
csvpillar
django_orm
ec2_pillar
etcd_pillar
@ -24,8 +25,10 @@ pillar modules
git_pillar
hg_pillar
hiera
http_json
http_yaml
libvirt
makostack
mongo
mysql
neutron

View file

@ -11,6 +11,7 @@ proxy modules
:template: autosummary.rst.tmpl
chronos
cisconso
esxi
fx2
junos

View file

@ -44,3 +44,4 @@ returner modules
sqlite3_return
syslog_return
xmpp_return
zabbix_return

View file

@ -11,6 +11,7 @@ runner modules
:template: autosummary.rst.tmpl
asam
auth
bgp
cache
cloud
@ -18,6 +19,7 @@ runner modules
doc
drac
error
event
f5
fileserver
git_pillar
@ -38,6 +40,7 @@ runner modules
salt
saltutil
sdb
smartos_vmadm
search
spacewalk
ssh
@ -46,4 +49,5 @@ runner modules
test
thin
virt
vistara
winrepo

View file

@ -13,6 +13,7 @@ sdb modules
confidant
consul
couchdb
env
etcd_db
keyring_db
memcached

View file

@ -31,6 +31,7 @@ state modules
boto_cfn
boto_cloudtrail
boto_cloudwatch_alarm
boto_cloudwatch_event
boto_cognitoidentity
boto_datapipeline
boto_dynamodb
@ -53,13 +54,16 @@ state modules
boto_vpc
bower
cabal
ceph
chef
chocolatey
chronos_job
cisconso
cloud
cmd
composer
cron
csf
cyg
ddns
debconfmod
@ -73,6 +77,7 @@ state modules
environ
eselect
etcd_mod
ethtool
esxi
event
file
@ -97,6 +102,8 @@ state modules
http
ifttt
incron
influxdb08_database
influxdb08_user
influxdb_database
influxdb_user
infoblox
@ -114,6 +121,7 @@ state modules
kmod
layman
ldap
libcloud_dns
linux_acl
locale
lvm
@ -180,6 +188,7 @@ state modules
powerpath
probes
process
proxy
pushover
pyenv
pyrax_queues
@ -222,6 +231,7 @@ state modules
sysrc
telemetry_alert
test
testinframod
timezone
tls
tomcat
@ -239,12 +249,15 @@ state modules
win_dns_client
win_firewall
win_iis
win_lgpo
win_license
win_network
win_path
win_pki
win_powercfg
win_servermanager
win_smtp_server
win_snmp
win_system
win_update
winrepo

View file

@ -10,8 +10,13 @@ thorium modules
:toctree:
:template: autosummary.rst.tmpl
calc
check
file
key
local
reg
runner
status
timer
wheel

View file

@ -402,7 +402,9 @@ class SaltCMD(parsers.SaltCMDOptionParser):
if isinstance(ret, str):
self.exit(2, '{0}\n'.format(ret))
for host in ret:
if isinstance(ret[host], string_types) and ret[host].startswith("Minion did not return"):
if isinstance(ret[host], string_types) \
and (ret[host].startswith("Minion did not return")
or ret[host] == 'VALUE TRIMMED'):
continue
for fun in ret[host]:
if fun not in docs and ret[host][fun]:

View file

@ -396,7 +396,8 @@ class SyncClientMixin(object):
with tornado.stack_context.StackContext(self.functions.context_dict.clone):
data['return'] = self.functions[fun](*args, **kwargs)
data['success'] = True
if 'data' in data['return']:
if isinstance(data['return'], dict) and 'data' in data['return']:
# some functions can return boolean values
data['success'] = salt.utils.check_state_result(data['return']['data'])
except (Exception, SystemExit) as ex:
if isinstance(ex, salt.exceptions.NotImplemented):

View file

@ -225,9 +225,9 @@ def __virtual__():
return False
salt.utils.warn_until(
'Nitrogen',
'Oxygen',
'This driver has been deprecated and will be removed in the '
'Nitrogen release of Salt. Please use the nova driver instead.'
'{version} release of Salt. Please use the nova driver instead.'
)
return __virtualname__

View file

@ -921,6 +921,9 @@ def _parse_settings_bond_1(opts, iface, bond_def):
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
if not (__grains__['os'] == "Ubuntu" and __grains__['osrelease_info'][0] >= 16):
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
@ -974,9 +977,6 @@ def _parse_settings_bond_2(opts, iface, bond_def):
_log_default_iface(iface, 'arp_interval', bond_def['arp_interval'])
bond.update({'arp_interval': bond_def['arp_interval']})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
if 'hashing-algorithm' in opts:
valid = ['layer2', 'layer2+3', 'layer3+4']
if opts['hashing-algorithm'] in valid:
@ -1107,6 +1107,9 @@ def _parse_settings_bond_5(opts, iface, bond_def):
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
return bond
@ -1143,6 +1146,9 @@ def _parse_settings_bond_6(opts, iface, bond_def):
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
return bond

View file

@ -56,7 +56,7 @@ def fullversion():
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
comps = line.split(':')
ret[comps[0].strip()] = comps[1].strip()
ret[comps[0].strip()] = comps[1].strip()
return ret
@ -228,7 +228,7 @@ def pvcreate(devices, override=True, **kwargs):
elif not override:
raise CommandExecutionError('Device "{0}" is already an LVM physical volume.'.format(device))
if not cmd[1:]:
if not cmd[2:]:
# All specified devices are already LVM volumes
return True

View file

@ -334,6 +334,9 @@ def _parse_settings_bond_1(opts, iface, bond_def):
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
return bond
@ -374,9 +377,6 @@ def _parse_settings_bond_2(opts, iface, bond_def):
_log_default_iface(iface, 'arp_interval', bond_def['arp_interval'])
bond.update({'arp_interval': bond_def['arp_interval']})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
if 'hashing-algorithm' in opts:
valid = ['layer2', 'layer2+3', 'layer3+4']
if opts['hashing-algorithm'] in valid:
@ -507,6 +507,9 @@ def _parse_settings_bond_5(opts, iface, bond_def):
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
return bond
@ -543,6 +546,9 @@ def _parse_settings_bond_6(opts, iface, bond_def):
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
return bond

View file

@ -869,7 +869,6 @@ def extracted(name,
else:
source_sum = {}
concurrent = bool(__opts__.get('sudo_user'))
if not source_is_local and not os.path.isfile(cached_source):
if __opts__['test']:
ret['result'] = None
@ -879,15 +878,14 @@ def extracted(name,
log.debug('%s is not in cache, downloading it', source_match)
file_result = __salt__['state.single']('file.managed',
cached_source,
source=source_match,
source_hash=source_hash,
source_hash_name=source_hash_name,
makedirs=True,
skip_verify=skip_verify,
saltenv=__env__,
concurrent=concurrent)
file_result = __states__['file.managed'](cached_source,
source=source_match,
source_hash=source_hash,
source_hash_name=source_hash_name,
makedirs=True,
skip_verify=skip_verify,
env=__env__)
log.debug('file.managed: {0}'.format(file_result))
# Prevent a traceback if errors prevented the above state from getting
@ -1341,13 +1339,11 @@ def extracted(name,
dirname,
' (dry-run only)' if __opts__['test'] else ''
)
dir_result = __salt__['state.single']('file.directory',
full_path,
user=user,
group=group,
recurse=recurse,
test=__opts__['test'],
concurrent=concurrent)
dir_result = __states__['file.directory'](full_path,
user=user,
group=group,
recurse=recurse,
test=__opts__['test'])
try:
dir_result = dir_result[next(iter(dir_result))]
except AttributeError:

View file

@ -505,11 +505,9 @@ def loaded(name, tag='latest', source=None, source_hash='', force=False):
return _ret_status(name=name, comment=comment)
tmp_filename = salt.utils.files.mkstemp()
__salt__['state.single']('file.managed',
name=tmp_filename,
source=source,
source_hash=source_hash,
concurrent=bool(__opts__.get('sudo_user')))
__states__['file.managed'](name=tmp_filename,
source=source,
source_hash=source_hash)
changes = {}
if image_infos['status']:

View file

@ -646,18 +646,18 @@ def crl_managed(name,
new_comp.pop('Next Update')
file_args, kwargs = _get_file_args(name, **kwargs)
new_crl = False
new_crl_created = False
if (current_comp == new_comp and
current_days_remaining > days_remaining and
__salt__['x509.verify_crl'](name, signing_cert)):
file_args['contents'] = __salt__[
'x509.get_pem_entry'](name, pem_type='X509 CRL')
else:
new_crl = True
new_crl_created = True
file_args['contents'] = new_crl
ret = __states__['file.managed'](**file_args)
if new_crl:
if new_crl_created:
ret['changes'] = {'Old': current, 'New': __salt__[
'x509.read_crl'](crl=new_crl)}
return ret

View file

@ -2271,7 +2271,7 @@ def is_public_ip(ip):
return True
addr = ip_to_int(ip)
if addr > 167772160 and addr < 184549375:
# 10.0.0.0/24
# 10.0.0.0/8
return False
elif addr > 3232235520 and addr < 3232301055:
# 192.168.0.0/16
@ -2279,6 +2279,9 @@ def is_public_ip(ip):
elif addr > 2886729728 and addr < 2887778303:
# 172.16.0.0/12
return False
elif addr > 2130706432 and addr < 2147483647:
# 127.0.0.0/8
return False
return True

View file

@ -1,334 +0,0 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Erik Johnson <erik@saltstack.com>`
'''
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting import skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import patch, NO_MOCK, NO_MOCK_REASON
ensure_in_syspath('../..')
# Import Python libs
import errno
import logging
import os
import shutil
# Import salt libs
import integration
import salt.utils
from salt import fileclient
from salt.ext import six
from salttesting.helpers import ensure_in_syspath, destructiveTest
ensure_in_syspath('..')
SALTENVS = ('base', 'dev')
FS_ROOT = os.path.join(integration.TMP, 'fileclient_fs_root')
CACHE_ROOT = os.path.join(integration.TMP, 'fileclient_cache_root')
SUBDIR = 'subdir'
SUBDIR_FILES = ('foo.txt', 'bar.txt', 'baz.txt')
def _get_file_roots():
return dict(
[(x, [os.path.join(FS_ROOT, x)]) for x in SALTENVS]
)
fileclient.__opts__ = {}
MOCKED_OPTS = {
'file_roots': _get_file_roots(),
'fileserver_backend': ['roots'],
'cachedir': CACHE_ROOT,
'file_client': 'local',
}
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
@destructiveTest
class FileclientTest(integration.ModuleCase):
'''
Tests for the fileclient. The LocalClient is the only thing we can test as
it is the only way we can mock the fileclient (the tests run from the
minion process, so the master cannot be mocked from test code).
'''
def setUp(self):
'''
No need to add a dummy foo.txt to muddy up the github repo, just make
our own fileserver root on-the-fly.
'''
def _new_dir(path):
'''
Add a new dir at ``path`` using os.makedirs. If the directory
already exists, remove it recursively and then try to create it
again.
'''
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# Just in case a previous test was interrupted, remove the
# directory and try adding it again.
shutil.rmtree(path)
os.makedirs(path)
else:
raise
# Crete the FS_ROOT
for saltenv in SALTENVS:
saltenv_root = os.path.join(FS_ROOT, saltenv)
# Make sure we have a fresh root dir for this saltenv
_new_dir(saltenv_root)
path = os.path.join(saltenv_root, 'foo.txt')
with salt.utils.fopen(path, 'w') as fp_:
fp_.write(
'This is a test file in the \'{0}\' saltenv.\n'
.format(saltenv)
)
subdir_abspath = os.path.join(saltenv_root, SUBDIR)
os.makedirs(subdir_abspath)
for subdir_file in SUBDIR_FILES:
path = os.path.join(subdir_abspath, subdir_file)
with salt.utils.fopen(path, 'w') as fp_:
fp_.write(
'This is file \'{0}\' in subdir \'{1} from saltenv '
'\'{2}\''.format(subdir_file, SUBDIR, saltenv)
)
# Create the CACHE_ROOT
_new_dir(CACHE_ROOT)
def tearDown(self):
'''
Remove the directories created for these tests
'''
shutil.rmtree(FS_ROOT)
shutil.rmtree(CACHE_ROOT)
def test_cache_dir(self):
'''
Ensure entire directory is cached to correct location
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_dir(
'salt://{0}'.format(SUBDIR),
saltenv,
cachedir=None
)
)
for subdir_file in SUBDIR_FILES:
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
'files',
saltenv,
SUBDIR,
subdir_file)
# Double check that the content of the cached file
# identifies it as being from the correct saltenv. The
# setUp function creates the file with the name of the
# saltenv mentioned in the file, so a simple 'in' check is
# sufficient here. If opening the file raises an exception,
# this is a problem, so we are not catching the exception
# and letting it be raised so that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(subdir_file in content)
self.assertTrue(SUBDIR in content)
self.assertTrue(saltenv in content)
def test_cache_dir_with_alternate_cachedir_and_absolute_path(self):
'''
Ensure entire directory is cached to correct location when an alternate
cachedir is specified and that cachedir is an absolute path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
alt_cachedir = os.path.join(integration.TMP, 'abs_cachedir')
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_dir(
'salt://{0}'.format(SUBDIR),
saltenv,
cachedir=alt_cachedir
)
)
for subdir_file in SUBDIR_FILES:
cache_loc = os.path.join(alt_cachedir,
'files',
saltenv,
SUBDIR,
subdir_file)
# Double check that the content of the cached file
# identifies it as being from the correct saltenv. The
# setUp function creates the file with the name of the
# saltenv mentioned in the file, so a simple 'in' check is
# sufficient here. If opening the file raises an exception,
# this is a problem, so we are not catching the exception
# and letting it be raised so that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(subdir_file in content)
self.assertTrue(SUBDIR in content)
self.assertTrue(saltenv in content)
def test_cache_dir_with_alternate_cachedir_and_relative_path(self):
'''
Ensure entire directory is cached to correct location when an alternate
cachedir is specified and that cachedir is a relative path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
alt_cachedir = 'foo'
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_dir(
'salt://{0}'.format(SUBDIR),
saltenv,
cachedir=alt_cachedir
)
)
for subdir_file in SUBDIR_FILES:
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
alt_cachedir,
'files',
saltenv,
SUBDIR,
subdir_file)
# Double check that the content of the cached file
# identifies it as being from the correct saltenv. The
# setUp function creates the file with the name of the
# saltenv mentioned in the file, so a simple 'in' check is
# sufficient here. If opening the file raises an exception,
# this is a problem, so we are not catching the exception
# and letting it be raised so that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(subdir_file in content)
self.assertTrue(SUBDIR in content)
self.assertTrue(saltenv in content)
def test_cache_file(self):
'''
Ensure file is cached to correct location
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_file('salt://foo.txt', saltenv, cachedir=None)
)
cache_loc = os.path.join(
fileclient.__opts__['cachedir'], 'files', saltenv, 'foo.txt')
# Double check that the content of the cached file identifies
# it as being from the correct saltenv. The setUp function
# creates the file with the name of the saltenv mentioned in
# the file, so a simple 'in' check is sufficient here. If
# opening the file raises an exception, this is a problem, so
# we are not catching the exception and letting it be raised so
# that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(saltenv in content)
def test_cache_file_with_alternate_cachedir_and_absolute_path(self):
'''
Ensure file is cached to correct location when an alternate cachedir is
specified and that cachedir is an absolute path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
alt_cachedir = os.path.join(integration.TMP, 'abs_cachedir')
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_file('salt://foo.txt',
saltenv,
cachedir=alt_cachedir)
)
cache_loc = os.path.join(alt_cachedir,
'files',
saltenv,
'foo.txt')
# Double check that the content of the cached file identifies
# it as being from the correct saltenv. The setUp function
# creates the file with the name of the saltenv mentioned in
# the file, so a simple 'in' check is sufficient here. If
# opening the file raises an exception, this is a problem, so
# we are not catching the exception and letting it be raised so
# that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(saltenv in content)
def test_cache_file_with_alternate_cachedir_and_relative_path(self):
'''
Ensure file is cached to correct location when an alternate cachedir is
specified and that cachedir is a relative path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
alt_cachedir = 'foo'
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_file('salt://foo.txt',
saltenv,
cachedir=alt_cachedir)
)
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
alt_cachedir,
'files',
saltenv,
'foo.txt')
# Double check that the content of the cached file identifies
# it as being from the correct saltenv. The setUp function
# creates the file with the name of the saltenv mentioned in
# the file, so a simple 'in' check is sufficient here. If
# opening the file raises an exception, this is a problem, so
# we are not catching the exception and letting it be raised so
# that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(saltenv in content)
if __name__ == '__main__':
integration.run_tests(FileclientTest)

View file

@ -4,16 +4,45 @@
'''
# Import Python libs
from __future__ import absolute_import
import errno
import logging
import os
import shutil
log = logging.getLogger(__name__)
# Import Salt Testing libs
from salttesting.unit import skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.helpers import ensure_in_syspath, destructiveTest
from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON
ensure_in_syspath('../..')
# Import salt libs
import integration
import salt.utils
from salt import fileclient
from salt.ext import six
SALTENVS = ('base', 'dev')
FS_ROOT = os.path.join(integration.TMP, 'fileclient_fs_root')
CACHE_ROOT = os.path.join(integration.TMP, 'fileclient_cache_root')
SUBDIR = 'subdir'
SUBDIR_FILES = ('foo.txt', 'bar.txt', 'baz.txt')
def _get_file_roots():
return dict(
[(x, [os.path.join(FS_ROOT, x)]) for x in SALTENVS]
)
fileclient.__opts__ = {}
MOCKED_OPTS = {
'file_roots': _get_file_roots(),
'fileserver_backend': ['roots'],
'cachedir': CACHE_ROOT,
'file_client': 'local',
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
@ -42,6 +71,288 @@ class FileClientTest(integration.ModuleCase):
ret = fileclient.get_file_client(self.minion_opts)
self.assertEqual('remote_client', ret)
@skipIf(NO_MOCK, NO_MOCK_REASON)
@destructiveTest
class FileclientCacheTest(integration.ModuleCase):
'''
Tests for the fileclient caching. The LocalClient is the only thing we can
test as it is the only way we can mock the fileclient (the tests run from
the minion process, so the master cannot be mocked from test code).
'''
def setUp(self):
'''
No need to add a dummy foo.txt to muddy up the github repo, just make
our own fileserver root on-the-fly.
'''
def _new_dir(path):
'''
Add a new dir at ``path`` using os.makedirs. If the directory
already exists, remove it recursively and then try to create it
again.
'''
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# Just in case a previous test was interrupted, remove the
# directory and try adding it again.
shutil.rmtree(path)
os.makedirs(path)
else:
raise
# Crete the FS_ROOT
for saltenv in SALTENVS:
saltenv_root = os.path.join(FS_ROOT, saltenv)
# Make sure we have a fresh root dir for this saltenv
_new_dir(saltenv_root)
path = os.path.join(saltenv_root, 'foo.txt')
with salt.utils.fopen(path, 'w') as fp_:
fp_.write(
'This is a test file in the \'{0}\' saltenv.\n'
.format(saltenv)
)
subdir_abspath = os.path.join(saltenv_root, SUBDIR)
os.makedirs(subdir_abspath)
for subdir_file in SUBDIR_FILES:
path = os.path.join(subdir_abspath, subdir_file)
with salt.utils.fopen(path, 'w') as fp_:
fp_.write(
'This is file \'{0}\' in subdir \'{1} from saltenv '
'\'{2}\''.format(subdir_file, SUBDIR, saltenv)
)
# Create the CACHE_ROOT
_new_dir(CACHE_ROOT)
def tearDown(self):
'''
Remove the directories created for these tests
'''
shutil.rmtree(FS_ROOT)
shutil.rmtree(CACHE_ROOT)
def test_cache_dir(self):
'''
Ensure entire directory is cached to correct location
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_dir(
'salt://{0}'.format(SUBDIR),
saltenv,
cachedir=None
)
)
for subdir_file in SUBDIR_FILES:
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
'files',
saltenv,
SUBDIR,
subdir_file)
# Double check that the content of the cached file
# identifies it as being from the correct saltenv. The
# setUp function creates the file with the name of the
# saltenv mentioned in the file, so a simple 'in' check is
# sufficient here. If opening the file raises an exception,
# this is a problem, so we are not catching the exception
# and letting it be raised so that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(subdir_file in content)
self.assertTrue(SUBDIR in content)
self.assertTrue(saltenv in content)
def test_cache_dir_with_alternate_cachedir_and_absolute_path(self):
'''
Ensure entire directory is cached to correct location when an alternate
cachedir is specified and that cachedir is an absolute path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
alt_cachedir = os.path.join(integration.TMP, 'abs_cachedir')
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_dir(
'salt://{0}'.format(SUBDIR),
saltenv,
cachedir=alt_cachedir
)
)
for subdir_file in SUBDIR_FILES:
cache_loc = os.path.join(alt_cachedir,
'files',
saltenv,
SUBDIR,
subdir_file)
# Double check that the content of the cached file
# identifies it as being from the correct saltenv. The
# setUp function creates the file with the name of the
# saltenv mentioned in the file, so a simple 'in' check is
# sufficient here. If opening the file raises an exception,
# this is a problem, so we are not catching the exception
# and letting it be raised so that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(subdir_file in content)
self.assertTrue(SUBDIR in content)
self.assertTrue(saltenv in content)
def test_cache_dir_with_alternate_cachedir_and_relative_path(self):
'''
Ensure entire directory is cached to correct location when an alternate
cachedir is specified and that cachedir is a relative path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
alt_cachedir = 'foo'
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_dir(
'salt://{0}'.format(SUBDIR),
saltenv,
cachedir=alt_cachedir
)
)
for subdir_file in SUBDIR_FILES:
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
alt_cachedir,
'files',
saltenv,
SUBDIR,
subdir_file)
# Double check that the content of the cached file
# identifies it as being from the correct saltenv. The
# setUp function creates the file with the name of the
# saltenv mentioned in the file, so a simple 'in' check is
# sufficient here. If opening the file raises an exception,
# this is a problem, so we are not catching the exception
# and letting it be raised so that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(subdir_file in content)
self.assertTrue(SUBDIR in content)
self.assertTrue(saltenv in content)
def test_cache_file(self):
'''
Ensure file is cached to correct location
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_file('salt://foo.txt', saltenv, cachedir=None)
)
cache_loc = os.path.join(
fileclient.__opts__['cachedir'], 'files', saltenv, 'foo.txt')
# Double check that the content of the cached file identifies
# it as being from the correct saltenv. The setUp function
# creates the file with the name of the saltenv mentioned in
# the file, so a simple 'in' check is sufficient here. If
# opening the file raises an exception, this is a problem, so
# we are not catching the exception and letting it be raised so
# that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(saltenv in content)
def test_cache_file_with_alternate_cachedir_and_absolute_path(self):
'''
Ensure file is cached to correct location when an alternate cachedir is
specified and that cachedir is an absolute path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
alt_cachedir = os.path.join(integration.TMP, 'abs_cachedir')
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_file('salt://foo.txt',
saltenv,
cachedir=alt_cachedir)
)
cache_loc = os.path.join(alt_cachedir,
'files',
saltenv,
'foo.txt')
# Double check that the content of the cached file identifies
# it as being from the correct saltenv. The setUp function
# creates the file with the name of the saltenv mentioned in
# the file, so a simple 'in' check is sufficient here. If
# opening the file raises an exception, this is a problem, so
# we are not catching the exception and letting it be raised so
# that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(saltenv in content)
def test_cache_file_with_alternate_cachedir_and_relative_path(self):
'''
Ensure file is cached to correct location when an alternate cachedir is
specified and that cachedir is a relative path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
alt_cachedir = 'foo'
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_file('salt://foo.txt',
saltenv,
cachedir=alt_cachedir)
)
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
alt_cachedir,
'files',
saltenv,
'foo.txt')
# Double check that the content of the cached file identifies
# it as being from the correct saltenv. The setUp function
# creates the file with the name of the saltenv mentioned in
# the file, so a simple 'in' check is sufficient here. If
# opening the file raises an exception, this is a problem, so
# we are not catching the exception and letting it be raised so
# that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(saltenv in content)
if __name__ == '__main__':
from integration import run_tests
run_tests(FileClientTest)

View file

@ -2,8 +2,11 @@
# Import python libs
from __future__ import absolute_import
import logging
import re
log = logging.getLogger(__name__)
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
@ -23,7 +26,7 @@ class SysModuleTest(integration.ModuleCase):
'''
Make sure no functions are exposed that don't have valid docstrings
'''
docs = self.run_function('sys.doc')
mods = self.run_function('sys.list_modules')
nodoc = set()
noexample = set()
allow_failure = (
@ -58,15 +61,43 @@ class SysModuleTest(integration.ModuleCase):
'yumpkg5.expand_repo_def',
)
for fun in docs:
if fun.startswith('runtests_helpers'):
continue
if fun in allow_failure:
continue
if not isinstance(docs[fun], six.string_types):
nodoc.add(fun)
elif not re.search(r'([E|e]xample(?:s)?)+(?:.*)::?', docs[fun]):
noexample.add(fun)
batches = 2
mod_count = len(mods)
batch_size = mod_count / float(batches)
if batch_size.is_integer():
batch_size = int(batch_size)
else:
# Check if the module count is evenly divisible by the number of
# batches. If not, increase the batch_size by the number of batches
# being run. This ensures that we get the correct number of
# batches, and that we don't end up running sys.doc an extra time
# to cover the remainder. For example, if we had a batch count of 2
# and 121 modules, if we just divided by 2 we'd end up running
# sys.doc 3 times.
batch_size = int(batch_size) + batches
log.debug('test_valid_docs batch size = %s', batch_size)
start = 0
end = batch_size
while start <= mod_count:
log.debug('running sys.doc on mods[%s:%s]', start, end)
docs = self.run_function('sys.doc', mods[start:end])
if docs == 'VALUE TRIMMED':
self.fail(
'sys.doc output trimmed. It may be necessary to increase '
'the number of batches'
)
for fun in docs:
if fun.startswith('runtests_helpers'):
continue
if fun in allow_failure:
continue
if not isinstance(docs[fun], six.string_types):
nodoc.add(fun)
elif not re.search(r'([E|e]xample(?:s)?)+(?:.*)::?', docs[fun]):
noexample.add(fun)
start += batch_size
end += batch_size
if not nodoc and not noexample:
return

View file

@ -313,9 +313,11 @@ class MatchTest(integration.ShellCase, integration.ShellCaseCommonTestsMixIn):
Test to see if we're not auto-adding '*' and 'sys.doc' to the call
'''
data = self.run_salt('-d -t 20')
self.assertIn('user.add:', data)
if data:
self.assertIn('user.add:', data)
data = self.run_salt('"*" -d -t 20')
self.assertIn('user.add:', data)
if data:
self.assertIn('user.add:', data)
data = self.run_salt('"*" -d user -t 20')
self.assertIn('user.add:', data)
data = self.run_salt('"*" sys.doc -d user -t 20')

View file

@ -34,18 +34,29 @@ class LinuxLVMTestCase(TestCase):
'''
Tests LVM version info from lvm version
'''
mock = MagicMock(return_value='Library version : 1')
mock = MagicMock(return_value=
' LVM version: 2.02.168(2) (2016-11-30)\n'
' Library version: 1.03.01 (2016-11-30)\n'
' Driver version: 4.35.0\n'
)
with patch.dict(linux_lvm.__salt__, {'cmd.run': mock}):
self.assertEqual(linux_lvm.version(), '1')
self.assertEqual(linux_lvm.version(), '2.02.168(2) (2016-11-30)')
def test_fullversion(self):
'''
Tests all version info from lvm version
'''
mock = MagicMock(return_value='Library version : 1')
mock = MagicMock(return_value=
' LVM version: 2.02.168(2) (2016-11-30)\n'
' Library version: 1.03.01 (2016-11-30)\n'
' Driver version: 4.35.0\n'
)
with patch.dict(linux_lvm.__salt__, {'cmd.run': mock}):
self.assertDictEqual(linux_lvm.fullversion(),
{'Library version': '1'})
{'LVM version': '2.02.168(2) (2016-11-30)',
'Library version': '1.03.01 (2016-11-30)',
'Driver version': '4.35.0',
})
def test_pvdisplay(self):
'''
@ -152,7 +163,10 @@ class LinuxLVMTestCase(TestCase):
self.assertRaises(CommandExecutionError, linux_lvm.pvcreate, 'A')
pvdisplay = MagicMock(return_value=True)
# pvdisplay() would be called by pvcreate() twice: firstly to check
# whether a device is already initialized for use by LVM and then to
# ensure that the pvcreate executable did its job correctly.
pvdisplay = MagicMock(side_effect=[False, True])
with patch('salt.modules.linux_lvm.pvdisplay', pvdisplay):
with patch.object(os.path, 'exists', return_value=True):
ret = {'stdout': 'saltines', 'stderr': 'cheese', 'retcode': 0, 'pid': '1337'}
@ -160,6 +174,20 @@ class LinuxLVMTestCase(TestCase):
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertEqual(linux_lvm.pvcreate('A', metadatasize=1000), True)
def test_pvcreate_existing_pvs(self):
'''
Test a scenario when all the submitted devices are already LVM PVs.
'''
pvdisplay = MagicMock(return_value=True)
with patch('salt.modules.linux_lvm.pvdisplay', pvdisplay):
with patch.object(os.path, 'exists', return_value=True):
ret = {'stdout': 'saltines', 'stderr': 'cheese', 'retcode': 0, 'pid': '1337'}
cmd_mock = MagicMock(return_value=ret)
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': cmd_mock}):
self.assertEqual(linux_lvm.pvcreate('A', metadatasize=1000),
True)
cmd_mock.assert_not_called()
def test_pvremove(self):
'''
Tests for remove a physical device being used as an LVM physical volume