mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch '2016.11' into bp-36336
This commit is contained in:
commit
03e5319124
29 changed files with 676 additions and 431 deletions
|
@ -26,6 +26,7 @@ Full list of Salt Cloud modules
|
|||
opennebula
|
||||
openstack
|
||||
parallels
|
||||
profitbricks
|
||||
proxmox
|
||||
pyrax
|
||||
qingcloud
|
||||
|
@ -36,3 +37,4 @@ Full list of Salt Cloud modules
|
|||
softlayer_hw
|
||||
virtualbox
|
||||
vmware
|
||||
vultrpy
|
||||
|
|
|
@ -11,6 +11,7 @@ engine modules
|
|||
:template: autosummary.rst.tmpl
|
||||
|
||||
docker_events
|
||||
hipchat
|
||||
http_logstash
|
||||
logentries
|
||||
logstash
|
||||
|
|
15
doc/ref/executors/all/index.rst
Normal file
15
doc/ref/executors/all/index.rst
Normal file
|
@ -0,0 +1,15 @@
|
|||
.. _all-salt_executors:
|
||||
|
||||
=================
|
||||
executors modules
|
||||
=================
|
||||
|
||||
.. currentmodule:: salt.executors
|
||||
|
||||
.. autosummary::
|
||||
:toctree:
|
||||
:template: autosummary.rst.tmpl
|
||||
|
||||
direct_call
|
||||
splay
|
||||
sudo
|
|
@ -8,7 +8,9 @@ execution modules
|
|||
|
||||
.. toctree::
|
||||
|
||||
salt.modules.group
|
||||
salt.modules.pkg
|
||||
salt.modules.user
|
||||
|
||||
.. currentmodule:: salt.modules
|
||||
|
||||
|
@ -39,6 +41,7 @@ execution modules
|
|||
boto_cfn
|
||||
boto_cloudtrail
|
||||
boto_cloudwatch
|
||||
boto_cloudwatch_event
|
||||
boto_cognitoidentity
|
||||
boto_datapipeline
|
||||
boto_dynamodb
|
||||
|
@ -52,6 +55,7 @@ execution modules
|
|||
boto_lambda
|
||||
boto_rds
|
||||
boto_route53
|
||||
boto_s3_bucket
|
||||
boto_secgroup
|
||||
boto_sns
|
||||
boto_sqs
|
||||
|
@ -63,10 +67,13 @@ execution modules
|
|||
cabal
|
||||
cassandra
|
||||
cassandra_cql
|
||||
celery
|
||||
ceph
|
||||
chassis
|
||||
chef
|
||||
chocolatey
|
||||
chronos
|
||||
cisconso
|
||||
cloud
|
||||
cmdmod
|
||||
composer
|
||||
|
@ -131,7 +138,6 @@ execution modules
|
|||
gnomedesktop
|
||||
gpg
|
||||
grains
|
||||
group
|
||||
groupadd
|
||||
grub_legacy
|
||||
guestfs
|
||||
|
@ -148,13 +154,18 @@ execution modules
|
|||
img
|
||||
incron
|
||||
influx
|
||||
influx08
|
||||
infoblox
|
||||
ini_manage
|
||||
inspectlib
|
||||
inspectlib.collector
|
||||
inspectlib.dbhandle
|
||||
inspectlib.entities
|
||||
inspectlib.exceptions
|
||||
inspectlib.fsdb
|
||||
inspectlib.kiwiproc
|
||||
inspectlib.query
|
||||
inspector
|
||||
introspect
|
||||
ipmi
|
||||
ipset
|
||||
|
@ -198,7 +209,6 @@ execution modules
|
|||
mac_service
|
||||
mac_shadow
|
||||
mac_softwareupdate
|
||||
mac_user
|
||||
mac_sysctl
|
||||
mac_system
|
||||
mac_timezone
|
||||
|
@ -250,6 +260,7 @@ execution modules
|
|||
openbsdrcctl
|
||||
openbsdservice
|
||||
openstack_config
|
||||
openstack_mng
|
||||
openvswitch
|
||||
opkg
|
||||
oracle
|
||||
|
@ -295,6 +306,7 @@ execution modules
|
|||
redismod
|
||||
reg
|
||||
rest_package
|
||||
rest_sample_utils
|
||||
rest_service
|
||||
restartcheck
|
||||
ret
|
||||
|
@ -319,6 +331,7 @@ execution modules
|
|||
sensors
|
||||
serverdensity_device
|
||||
service
|
||||
servicenow
|
||||
shadow
|
||||
slack_notify
|
||||
slsutil
|
||||
|
@ -361,6 +374,7 @@ execution modules
|
|||
telemetry
|
||||
temp
|
||||
test
|
||||
testinframod
|
||||
test_virtual
|
||||
timezone
|
||||
tls
|
||||
|
@ -371,7 +385,6 @@ execution modules
|
|||
udev
|
||||
upstart
|
||||
uptime
|
||||
user
|
||||
useradd
|
||||
uwsgi
|
||||
varnish
|
||||
|
@ -393,16 +406,21 @@ execution modules
|
|||
win_groupadd
|
||||
win_iis
|
||||
win_ip
|
||||
win_lgpo
|
||||
win_license
|
||||
win_network
|
||||
win_ntp
|
||||
win_path
|
||||
win_pkg
|
||||
win_pki
|
||||
win_powercfg
|
||||
win_psget
|
||||
win_repo
|
||||
win_servermanager
|
||||
win_service
|
||||
win_shadow
|
||||
win_smtp_server
|
||||
win_snmp
|
||||
win_status
|
||||
win_system
|
||||
win_task
|
||||
|
@ -412,7 +430,7 @@ execution modules
|
|||
win_wua
|
||||
x509
|
||||
xapi
|
||||
xbps-pkg
|
||||
xbpspkg
|
||||
xfs
|
||||
xmpp
|
||||
yumpkg
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
salt.modules.xbps-pkg module
|
||||
============================
|
||||
|
||||
.. automodule:: salt.modules.xbps-pkg
|
||||
:members:
|
|
@ -20,6 +20,7 @@ Follow one of the below links for further information and examples
|
|||
no_out
|
||||
no_return
|
||||
overstatestage
|
||||
pony
|
||||
pprint_out
|
||||
progress
|
||||
raw
|
||||
|
|
|
@ -16,6 +16,7 @@ pillar modules
|
|||
cobbler
|
||||
confidant
|
||||
consul_pillar
|
||||
csvpillar
|
||||
django_orm
|
||||
ec2_pillar
|
||||
etcd_pillar
|
||||
|
@ -24,8 +25,10 @@ pillar modules
|
|||
git_pillar
|
||||
hg_pillar
|
||||
hiera
|
||||
http_json
|
||||
http_yaml
|
||||
libvirt
|
||||
makostack
|
||||
mongo
|
||||
mysql
|
||||
neutron
|
||||
|
|
|
@ -11,6 +11,7 @@ proxy modules
|
|||
:template: autosummary.rst.tmpl
|
||||
|
||||
chronos
|
||||
cisconso
|
||||
esxi
|
||||
fx2
|
||||
junos
|
||||
|
|
|
@ -44,3 +44,4 @@ returner modules
|
|||
sqlite3_return
|
||||
syslog_return
|
||||
xmpp_return
|
||||
zabbix_return
|
||||
|
|
|
@ -11,12 +11,14 @@ runner modules
|
|||
:template: autosummary.rst.tmpl
|
||||
|
||||
asam
|
||||
auth
|
||||
cache
|
||||
cloud
|
||||
ddns
|
||||
doc
|
||||
drac
|
||||
error
|
||||
event
|
||||
f5
|
||||
fileserver
|
||||
git_pillar
|
||||
|
@ -36,6 +38,7 @@ runner modules
|
|||
salt
|
||||
saltutil
|
||||
sdb
|
||||
smartos_vmadm
|
||||
search
|
||||
spacewalk
|
||||
ssh
|
||||
|
@ -44,4 +47,5 @@ runner modules
|
|||
test
|
||||
thin
|
||||
virt
|
||||
vistara
|
||||
winrepo
|
||||
|
|
|
@ -13,6 +13,7 @@ sdb modules
|
|||
confidant
|
||||
consul
|
||||
couchdb
|
||||
env
|
||||
etcd_db
|
||||
keyring_db
|
||||
memcached
|
||||
|
|
|
@ -31,6 +31,7 @@ state modules
|
|||
boto_cfn
|
||||
boto_cloudtrail
|
||||
boto_cloudwatch_alarm
|
||||
boto_cloudwatch_event
|
||||
boto_cognitoidentity
|
||||
boto_datapipeline
|
||||
boto_dynamodb
|
||||
|
@ -53,13 +54,16 @@ state modules
|
|||
boto_vpc
|
||||
bower
|
||||
cabal
|
||||
ceph
|
||||
chef
|
||||
chocolatey
|
||||
chronos_job
|
||||
cisconso
|
||||
cloud
|
||||
cmd
|
||||
composer
|
||||
cron
|
||||
csf
|
||||
cyg
|
||||
ddns
|
||||
debconfmod
|
||||
|
@ -73,6 +77,7 @@ state modules
|
|||
environ
|
||||
eselect
|
||||
etcd_mod
|
||||
ethtool
|
||||
esxi
|
||||
event
|
||||
file
|
||||
|
@ -97,6 +102,8 @@ state modules
|
|||
http
|
||||
ifttt
|
||||
incron
|
||||
influxdb08_database
|
||||
influxdb08_user
|
||||
influxdb_database
|
||||
influxdb_user
|
||||
infoblox
|
||||
|
@ -114,6 +121,7 @@ state modules
|
|||
kmod
|
||||
layman
|
||||
ldap
|
||||
libcloud_dns
|
||||
linux_acl
|
||||
locale
|
||||
lvm
|
||||
|
@ -178,6 +186,7 @@ state modules
|
|||
powerpath
|
||||
probes
|
||||
process
|
||||
proxy
|
||||
pushover
|
||||
pyenv
|
||||
pyrax_queues
|
||||
|
@ -218,6 +227,7 @@ state modules
|
|||
sysrc
|
||||
telemetry_alert
|
||||
test
|
||||
testinframod
|
||||
timezone
|
||||
tls
|
||||
tomcat
|
||||
|
@ -235,12 +245,15 @@ state modules
|
|||
win_dns_client
|
||||
win_firewall
|
||||
win_iis
|
||||
win_lgpo
|
||||
win_license
|
||||
win_network
|
||||
win_path
|
||||
win_pki
|
||||
win_powercfg
|
||||
win_servermanager
|
||||
win_smtp_server
|
||||
win_snmp
|
||||
win_system
|
||||
win_update
|
||||
winrepo
|
||||
|
|
|
@ -10,8 +10,13 @@ thorium modules
|
|||
:toctree:
|
||||
:template: autosummary.rst.tmpl
|
||||
|
||||
calc
|
||||
check
|
||||
file
|
||||
key
|
||||
local
|
||||
reg
|
||||
runner
|
||||
status
|
||||
timer
|
||||
wheel
|
||||
|
|
|
@ -391,7 +391,9 @@ class SaltCMD(parsers.SaltCMDOptionParser):
|
|||
if isinstance(ret, str):
|
||||
self.exit(2, '{0}\n'.format(ret))
|
||||
for host in ret:
|
||||
if isinstance(ret[host], string_types) and ret[host].startswith("Minion did not return"):
|
||||
if isinstance(ret[host], string_types) \
|
||||
and (ret[host].startswith("Minion did not return")
|
||||
or ret[host] == 'VALUE TRIMMED'):
|
||||
continue
|
||||
for fun in ret[host]:
|
||||
if fun not in docs and ret[host][fun]:
|
||||
|
|
|
@ -394,7 +394,8 @@ class SyncClientMixin(object):
|
|||
with tornado.stack_context.StackContext(self.functions.context_dict.clone):
|
||||
data['return'] = self.functions[fun](*args, **kwargs)
|
||||
data['success'] = True
|
||||
if 'data' in data['return']:
|
||||
if isinstance(data['return'], dict) and 'data' in data['return']:
|
||||
# some functions can return boolean values
|
||||
data['success'] = salt.utils.check_state_result(data['return']['data'])
|
||||
except (Exception, SystemExit) as ex:
|
||||
if isinstance(ex, salt.exceptions.NotImplemented):
|
||||
|
|
|
@ -548,7 +548,10 @@ class SSH(object):
|
|||
}
|
||||
|
||||
# save load to the master job cache
|
||||
self.returners['{0}.save_load'.format(self.opts['master_job_cache'])](jid, job_load)
|
||||
if self.opts['master_job_cache'] == 'local_cache':
|
||||
self.returners['{0}.save_load'.format(self.opts['master_job_cache'])](jid, job_load, minions=self.targets.keys())
|
||||
else:
|
||||
self.returners['{0}.save_load'.format(self.opts['master_job_cache'])](jid, job_load)
|
||||
|
||||
for ret in self.handle_ssh(mine=mine):
|
||||
host = next(six.iterkeys(ret))
|
||||
|
|
|
@ -225,9 +225,9 @@ def __virtual__():
|
|||
return False
|
||||
|
||||
salt.utils.warn_until(
|
||||
'Nitrogen',
|
||||
'Oxygen',
|
||||
'This driver has been deprecated and will be removed in the '
|
||||
'Nitrogen release of Salt. Please use the nova driver instead.'
|
||||
'{version} release of Salt. Please use the nova driver instead.'
|
||||
)
|
||||
|
||||
return __virtualname__
|
||||
|
|
|
@ -224,21 +224,37 @@ def list_(name,
|
|||
Password-protected ZIP archives can still be listed by zipfile, so
|
||||
there is no reason to invoke the unzip command.
|
||||
'''
|
||||
dirs = []
|
||||
dirs = set()
|
||||
files = []
|
||||
links = []
|
||||
try:
|
||||
with contextlib.closing(zipfile.ZipFile(cached)) as zip_archive:
|
||||
for member in zip_archive.infolist():
|
||||
mode = member.external_attr >> 16
|
||||
path = member.filename
|
||||
if stat.S_ISLNK(mode):
|
||||
links.append(path)
|
||||
elif stat.S_ISDIR(mode):
|
||||
dirs.append(path)
|
||||
if salt.utils.is_windows():
|
||||
if path.endswith('/'):
|
||||
# zipfile.ZipInfo objects on windows use forward
|
||||
# slash at end of the directory name.
|
||||
dirs.add(path)
|
||||
else:
|
||||
files.append(path)
|
||||
else:
|
||||
files.append(path)
|
||||
return dirs, files, links
|
||||
mode = member.external_attr >> 16
|
||||
if stat.S_ISLNK(mode):
|
||||
links.append(path)
|
||||
elif stat.S_ISDIR(mode):
|
||||
dirs.add(path)
|
||||
else:
|
||||
files.append(path)
|
||||
|
||||
for path in files:
|
||||
# ZIP files created on Windows do not add entries
|
||||
# to the archive for directories. So, we'll need to
|
||||
# manually add them.
|
||||
dirname = ''.join(path.rpartition('/')[:2])
|
||||
if dirname:
|
||||
dirs.add(dirname)
|
||||
return list(dirs), files, links
|
||||
except zipfile.BadZipfile:
|
||||
raise CommandExecutionError('{0} is not a ZIP file'.format(name))
|
||||
|
||||
|
@ -366,10 +382,15 @@ def list_(name,
|
|||
item.sort()
|
||||
|
||||
if verbose:
|
||||
ret = {'dirs': dirs, 'files': files, 'links': links}
|
||||
ret['top_level_dirs'] = [x for x in dirs if x.count('/') == 1]
|
||||
ret['top_level_files'] = [x for x in files if x.count('/') == 0]
|
||||
ret['top_level_links'] = [x for x in links if x.count('/') == 0]
|
||||
ret = {'dirs': sorted(dirs),
|
||||
'files': sorted(files),
|
||||
'links': sorted(links)}
|
||||
ret['top_level_dirs'] = [x for x in ret['dirs']
|
||||
if x.count('/') == 1]
|
||||
ret['top_level_files'] = [x for x in ret['files']
|
||||
if x.count('/') == 0]
|
||||
ret['top_level_links'] = [x for x in ret['links']
|
||||
if x.count('/') == 0]
|
||||
else:
|
||||
ret = sorted(dirs + files + links)
|
||||
return ret
|
||||
|
|
|
@ -921,6 +921,9 @@ def _parse_settings_bond_1(opts, iface, bond_def):
|
|||
_log_default_iface(iface, binding, bond_def[binding])
|
||||
bond.update({binding: bond_def[binding]})
|
||||
|
||||
if 'primary' in opts:
|
||||
bond.update({'primary': opts['primary']})
|
||||
|
||||
if not (__grains__['os'] == "Ubuntu" and __grains__['osrelease_info'][0] >= 16):
|
||||
if 'use_carrier' in opts:
|
||||
if opts['use_carrier'] in _CONFIG_TRUE:
|
||||
|
@ -974,9 +977,6 @@ def _parse_settings_bond_2(opts, iface, bond_def):
|
|||
_log_default_iface(iface, 'arp_interval', bond_def['arp_interval'])
|
||||
bond.update({'arp_interval': bond_def['arp_interval']})
|
||||
|
||||
if 'primary' in opts:
|
||||
bond.update({'primary': opts['primary']})
|
||||
|
||||
if 'hashing-algorithm' in opts:
|
||||
valid = ['layer2', 'layer2+3', 'layer3+4']
|
||||
if opts['hashing-algorithm'] in valid:
|
||||
|
@ -1107,6 +1107,9 @@ def _parse_settings_bond_5(opts, iface, bond_def):
|
|||
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
|
||||
bond.update({'use_carrier': bond_def['use_carrier']})
|
||||
|
||||
if 'primary' in opts:
|
||||
bond.update({'primary': opts['primary']})
|
||||
|
||||
return bond
|
||||
|
||||
|
||||
|
@ -1143,6 +1146,9 @@ def _parse_settings_bond_6(opts, iface, bond_def):
|
|||
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
|
||||
bond.update({'use_carrier': bond_def['use_carrier']})
|
||||
|
||||
if 'primary' in opts:
|
||||
bond.update({'primary': opts['primary']})
|
||||
|
||||
return bond
|
||||
|
||||
|
||||
|
|
|
@ -334,6 +334,9 @@ def _parse_settings_bond_1(opts, iface, bond_def):
|
|||
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
|
||||
bond.update({'use_carrier': bond_def['use_carrier']})
|
||||
|
||||
if 'primary' in opts:
|
||||
bond.update({'primary': opts['primary']})
|
||||
|
||||
return bond
|
||||
|
||||
|
||||
|
@ -374,9 +377,6 @@ def _parse_settings_bond_2(opts, iface, bond_def):
|
|||
_log_default_iface(iface, 'arp_interval', bond_def['arp_interval'])
|
||||
bond.update({'arp_interval': bond_def['arp_interval']})
|
||||
|
||||
if 'primary' in opts:
|
||||
bond.update({'primary': opts['primary']})
|
||||
|
||||
if 'hashing-algorithm' in opts:
|
||||
valid = ['layer2', 'layer2+3', 'layer3+4']
|
||||
if opts['hashing-algorithm'] in valid:
|
||||
|
@ -507,6 +507,9 @@ def _parse_settings_bond_5(opts, iface, bond_def):
|
|||
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
|
||||
bond.update({'use_carrier': bond_def['use_carrier']})
|
||||
|
||||
if 'primary' in opts:
|
||||
bond.update({'primary': opts['primary']})
|
||||
|
||||
return bond
|
||||
|
||||
|
||||
|
@ -543,6 +546,9 @@ def _parse_settings_bond_6(opts, iface, bond_def):
|
|||
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
|
||||
bond.update({'use_carrier': bond_def['use_carrier']})
|
||||
|
||||
if 'primary' in opts:
|
||||
bond.update({'primary': opts['primary']})
|
||||
|
||||
return bond
|
||||
|
||||
|
||||
|
|
|
@ -52,6 +52,7 @@ from salt.exceptions import SaltInvocationError
|
|||
import salt.utils.dictupdate as dictupdate
|
||||
from salt.ext.six import string_types
|
||||
from salt.ext.six.moves import range
|
||||
from salt.ext.six import StringIO
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
__virtualname__ = 'lgpo'
|
||||
|
@ -109,7 +110,7 @@ try:
|
|||
VALUE_LIST_XPATH = etree.XPath('.//*[local-name() = "valueList"]')
|
||||
ENUM_ITEM_DISPLAY_NAME_XPATH = etree.XPath('.//*[local-name() = "item" and @*[local-name() = "displayName" = $display_name]]')
|
||||
ADMX_SEARCH_XPATH = etree.XPath('//*[local-name() = "policy" and @*[local-name() = "name"] = $policy_name and (@*[local-name() = "class"] = "Both" or @*[local-name() = "class"] = $registry_class)]')
|
||||
ADML_SEARCH_XPATH = etree.XPath('//*[text() = $policy_name and @*[local-name() = "id"]]')
|
||||
ADML_SEARCH_XPATH = etree.XPath('//*[starts-with(text(), $policy_name) and @*[local-name() = "id"]]')
|
||||
ADMX_DISPLAYNAME_SEARCH_XPATH = etree.XPath('//*[local-name() = "policy" and @*[local-name() = "displayName"] = $display_name and (@*[local-name() = "class"] = "Both" or @*[local-name() = "class"] = $registry_class) ]')
|
||||
PRESENTATION_ANCESTOR_XPATH = etree.XPath('ancestor::*[local-name() = "presentation"]')
|
||||
TEXT_ELEMENT_XPATH = etree.XPath('.//*[local-name() = "text"]')
|
||||
|
@ -2319,7 +2320,9 @@ class _policy_info(object):
|
|||
'GroupPolicy', 'User',
|
||||
'Registry.pol'),
|
||||
'hive': 'HKEY_USERS',
|
||||
'lgpo_section': 'User Configuration'
|
||||
'lgpo_section': 'User Configuration',
|
||||
'gpt_extension_location': 'gPCUserExtensionNames',
|
||||
'gpt_extension_guid': '[{35378EAC-683F-11D2-A89A-00C04FBBCFA2}{D02B1F73-3407-48AE-BA88-E8213C6761F1}]'
|
||||
},
|
||||
'Machine': {
|
||||
'policy_path': os.path.join(os.getenv('WINDIR'), 'System32',
|
||||
|
@ -2327,9 +2330,13 @@ class _policy_info(object):
|
|||
'Registry.pol'),
|
||||
'hive': 'HKEY_LOCAL_MACHINE',
|
||||
'lgpo_section': 'Computer Configuration',
|
||||
'gpt_extension_location': 'gPCMachineExtensionNames',
|
||||
'gpt_extension_guid': '[{35378EAC-683F-11D2-A89A-00C04FBBCFA2}{D02B1F72-3407-48AE-BA88-E8213C6761F1}]'
|
||||
},
|
||||
}
|
||||
self.reg_pol_header = u'\u5250\u6765\x01\x00'
|
||||
self.gpt_ini_path = os.path.join(os.getenv('WINDIR'), 'System32',
|
||||
'GroupPolicy', 'gpt.ini')
|
||||
|
||||
@classmethod
|
||||
def _notEmpty(cls, val, **kwargs):
|
||||
|
@ -2471,7 +2478,8 @@ class _policy_info(object):
|
|||
'''
|
||||
minimum = 0
|
||||
maximum = 1
|
||||
if isinstance(string_types, val):
|
||||
|
||||
if isinstance(val, string_types):
|
||||
if val.lower() == 'not defined':
|
||||
return True
|
||||
else:
|
||||
|
@ -2676,6 +2684,19 @@ def _updatePolicyElements(policy_item, regkey):
|
|||
return policy_item
|
||||
|
||||
|
||||
def _remove_unicode_encoding(xml_file):
|
||||
'''
|
||||
attempts to remove the "encoding='unicode'" from an xml file
|
||||
as lxml does not support that on a windows node currently
|
||||
see issue #38100
|
||||
'''
|
||||
with open(xml_file, 'rb') as f:
|
||||
xml_content = f.read()
|
||||
modified_xml = re.sub(r' encoding=[\'"]+unicode[\'"]+', '', xml_content.decode('utf-16'), count=1)
|
||||
xmltree = lxml.etree.parse(StringIO(modified_xml))
|
||||
return xmltree
|
||||
|
||||
|
||||
def _processPolicyDefinitions(policy_def_path='c:\\Windows\\PolicyDefinitions',
|
||||
display_language='en-US'):
|
||||
'''
|
||||
|
@ -2701,7 +2722,17 @@ def _processPolicyDefinitions(policy_def_path='c:\\Windows\\PolicyDefinitions',
|
|||
for t_admfile in files:
|
||||
admfile = os.path.join(root, t_admfile)
|
||||
parser = lxml.etree.XMLParser(remove_comments=True)
|
||||
xmltree = lxml.etree.parse(admfile, parser=parser)
|
||||
# see issue #38100
|
||||
try:
|
||||
xmltree = lxml.etree.parse(admfile, parser=parser)
|
||||
except lxml.etree.XMLSyntaxError:
|
||||
try:
|
||||
xmltree = _remove_unicode_encoding(admfile)
|
||||
except Exception:
|
||||
msg = ('A error was found while processing admx file {0},'
|
||||
' all policies from this file will be unavailable via this module')
|
||||
log.error(msg.format(admfile))
|
||||
continue
|
||||
namespaces = xmltree.getroot().nsmap
|
||||
namespace_string = ''
|
||||
if None in namespaces:
|
||||
|
@ -2752,7 +2783,17 @@ def _processPolicyDefinitions(policy_def_path='c:\\Windows\\PolicyDefinitions',
|
|||
raise SaltInvocationError(msg.format(display_language,
|
||||
display_language_fallback,
|
||||
t_admfile))
|
||||
xmltree = lxml.etree.parse(adml_file)
|
||||
try:
|
||||
xmltree = lxml.etree.parse(adml_file)
|
||||
except lxml.etree.XMLSyntaxError:
|
||||
# see issue #38100
|
||||
try:
|
||||
xmltree = _remove_unicode_encoding(adml_file)
|
||||
except Exception:
|
||||
msg = ('An error was found while processing adml file {0}, all policy'
|
||||
' languange data from this file will be unavailable via this module')
|
||||
log.error(msg.format(adml_file))
|
||||
continue
|
||||
if None in namespaces:
|
||||
namespaces['None'] = namespaces[None]
|
||||
namespaces.pop(None)
|
||||
|
@ -3270,6 +3311,7 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element,
|
|||
elif etree.QName(element).localname == 'decimal':
|
||||
# https://msdn.microsoft.com/en-us/library/dn605987(v=vs.85).aspx
|
||||
this_vtype = 'REG_DWORD'
|
||||
requested_val = this_element_value
|
||||
if this_element_value is not None:
|
||||
temp_val = ''
|
||||
for v in struct.unpack('2H', struct.pack('I', int(this_element_value))):
|
||||
|
@ -3280,13 +3322,14 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element,
|
|||
if 'storeAsText' in element.attrib:
|
||||
if element.attrib['storeAsText'].lower() == 'true':
|
||||
this_vtype = 'REG_SZ'
|
||||
if this_element_value is not None:
|
||||
this_element_value = str(this_element_value)
|
||||
if requested_val is not None:
|
||||
this_element_value = str(requested_val)
|
||||
if check_deleted:
|
||||
this_vtype = 'REG_SZ'
|
||||
elif etree.QName(element).localname == 'longDecimal':
|
||||
# https://msdn.microsoft.com/en-us/library/dn606015(v=vs.85).aspx
|
||||
this_vtype = 'REG_QWORD'
|
||||
requested_val = this_element_value
|
||||
if this_element_value is not None:
|
||||
temp_val = ''
|
||||
for v in struct.unpack('4H', struct.pack('I', int(this_element_value))):
|
||||
|
@ -3297,8 +3340,8 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element,
|
|||
if 'storeAsText' in element.attrib:
|
||||
if element.attrib['storeAsText'].lower() == 'true':
|
||||
this_vtype = 'REG_SZ'
|
||||
if this_element_value is not None:
|
||||
this_element_value = str(this_element_value)
|
||||
if requested_val is not None:
|
||||
this_element_value = str(requested_val)
|
||||
elif etree.QName(element).localname == 'text':
|
||||
# https://msdn.microsoft.com/en-us/library/dn605969(v=vs.85).aspx
|
||||
this_vtype = 'REG_SZ'
|
||||
|
@ -3913,17 +3956,98 @@ def _regexSearchKeyValueCombo(policy_data, policy_regpath, policy_regkey):
|
|||
return None
|
||||
|
||||
|
||||
def _write_regpol_data(data_to_write, policy_file_path):
|
||||
def _write_regpol_data(data_to_write,
|
||||
policy_file_path,
|
||||
gpt_ini_path,
|
||||
gpt_extension,
|
||||
gpt_extension_guid):
|
||||
'''
|
||||
helper function to actually write the data to a Registry.pol file
|
||||
|
||||
also updates/edits the gpt.ini file to include the ADM policy extensions
|
||||
to let the computer know user and/or machine registry policy files need
|
||||
to be processed
|
||||
|
||||
data_to_write: data to write into the user/machine registry.pol file
|
||||
policy_file_path: path to the registry.pol file
|
||||
gpt_ini_path: path to gpt.ini file
|
||||
gpt_extension: gpt extension list name from _policy_info class for this registry class gpt_extension_location
|
||||
gpt_extension_guid: admx registry extension guid for the class
|
||||
'''
|
||||
try:
|
||||
if data_to_write:
|
||||
reg_pol_header = u'\u5250\u6765\x01\x00'
|
||||
if not os.path.exists(policy_file_path):
|
||||
ret = __salt__['file.makedirs'](policy_file_path)
|
||||
with open(policy_file_path, 'wb') as pol_file:
|
||||
if not data_to_write.startswith(reg_pol_header):
|
||||
pol_file.write(reg_pol_header.encode('utf-16-le'))
|
||||
pol_file.write(data_to_write.encode('utf-16-le'))
|
||||
try:
|
||||
gpt_ini_data = ''
|
||||
if os.path.exists(gpt_ini_path):
|
||||
with open(gpt_ini_path, 'rb') as gpt_file:
|
||||
gpt_ini_data = gpt_file.read()
|
||||
if not _regexSearchRegPolData(r'\[General\]\r\n', gpt_ini_data):
|
||||
gpt_ini_data = '[General]\r\n' + gpt_ini_data
|
||||
if _regexSearchRegPolData(r'{0}='.format(re.escape(gpt_extension)), gpt_ini_data):
|
||||
# ensure the line contains the ADM guid
|
||||
gpt_ext_loc = re.search(r'^{0}=.*\r\n'.format(re.escape(gpt_extension)),
|
||||
gpt_ini_data,
|
||||
re.IGNORECASE | re.MULTILINE)
|
||||
gpt_ext_str = gpt_ini_data[gpt_ext_loc.start():gpt_ext_loc.end()]
|
||||
if not _regexSearchRegPolData(r'{0}'.format(re.escape(gpt_extension_guid)),
|
||||
gpt_ext_str):
|
||||
gpt_ext_str = gpt_ext_str.split('=')
|
||||
gpt_ext_str[1] = gpt_extension_guid + gpt_ext_str[1]
|
||||
gpt_ext_str = '='.join(gpt_ext_str)
|
||||
gpt_ini_data = gpt_ini_data[0:gpt_ext_loc.start()] + gpt_ext_str + gpt_ini_data[gpt_ext_loc.end():]
|
||||
else:
|
||||
general_location = re.search(r'^\[General\]\r\n',
|
||||
gpt_ini_data,
|
||||
re.IGNORECASE | re.MULTILINE)
|
||||
gpt_ini_data = "{0}{1}={2}\r\n{3}".format(
|
||||
gpt_ini_data[general_location.start():general_location.end()],
|
||||
gpt_extension, gpt_extension_guid,
|
||||
gpt_ini_data[general_location.end():])
|
||||
# https://technet.microsoft.com/en-us/library/cc978247.aspx
|
||||
if _regexSearchRegPolData(r'Version=', gpt_ini_data):
|
||||
version_loc = re.search(r'^Version=.*\r\n',
|
||||
gpt_ini_data,
|
||||
re.IGNORECASE | re.MULTILINE)
|
||||
version_str = gpt_ini_data[version_loc.start():version_loc.end()]
|
||||
version_str = version_str.split('=')
|
||||
version_nums = struct.unpack('>2H', struct.pack('>I', int(version_str[1])))
|
||||
if gpt_extension.lower() == 'gPCMachineExtensionNames'.lower():
|
||||
version_nums = (version_nums[0], version_nums[1] + 1)
|
||||
elif gpt_extension.lower() == 'gPCUserExtensionNames'.lower():
|
||||
version_nums = (version_nums[0] + 1, version_nums[1])
|
||||
version_num = int("{0}{1}".format(str(version_nums[0]).zfill(4),
|
||||
str(version_nums[1]).zfill(4)), 16)
|
||||
gpt_ini_data = "{0}{1}={2}\r\n{3}".format(
|
||||
gpt_ini_data[0:version_loc.start()],
|
||||
'Version', version_num,
|
||||
gpt_ini_data[version_loc.end():])
|
||||
else:
|
||||
general_location = re.search(r'^\[General\]\r\n',
|
||||
gpt_ini_data,
|
||||
re.IGNORECASE | re.MULTILINE)
|
||||
if gpt_extension.lower() == 'gPCMachineExtensionNames'.lower():
|
||||
version_nums = (0, 1)
|
||||
elif gpt_extension.lower() == 'gPCUserExtensionNames'.lower():
|
||||
version_nums = (1, 0)
|
||||
gpt_ini_data = "{0}{1}={2}\r\n{3}".format(
|
||||
gpt_ini_data[general_location.start():general_location.end()],
|
||||
'Version',
|
||||
int("{0}{1}".format(str(version_nums[0]).zfill(4), str(version_nums[1]).zfill(4)), 16),
|
||||
gpt_ini_data[general_location.end():])
|
||||
if gpt_ini_data:
|
||||
with open(gpt_ini_path, 'wb') as gpt_file:
|
||||
gpt_file.write(gpt_ini_data)
|
||||
except Exception as e:
|
||||
msg = 'An error occurred attempting to write to {0}, the exception was {1}'.format(
|
||||
gpt_ini_path, e)
|
||||
raise CommandExecutionError(msg)
|
||||
except Exception as e:
|
||||
msg = 'An error occurred attempting to write to {0}, the exception was {1}'.format(policy_file_path, e)
|
||||
raise CommandExecutionError(msg)
|
||||
|
@ -4307,7 +4431,11 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
|
|||
enabled_value_string,
|
||||
existing_data,
|
||||
append_only=True)
|
||||
_write_regpol_data(existing_data, policy_data.admx_registry_classes[registry_class]['policy_path'])
|
||||
_write_regpol_data(existing_data,
|
||||
policy_data.admx_registry_classes[registry_class]['policy_path'],
|
||||
policy_data.gpt_ini_path,
|
||||
policy_data.admx_registry_classes[registry_class]['gpt_extension_location'],
|
||||
policy_data.admx_registry_classes[registry_class]['gpt_extension_guid'])
|
||||
except Exception as e:
|
||||
log.error('Unhandled exception {0} occurred while attempting to write Adm Template Policy File'.format(e))
|
||||
return False
|
||||
|
@ -4319,34 +4447,36 @@ def _getScriptSettingsFromIniFile(policy_info):
|
|||
helper function to parse/read a GPO Startup/Shutdown script file
|
||||
'''
|
||||
_existingData = _read_regpol_file(policy_info['ScriptIni']['IniPath'])
|
||||
_existingData = _existingData.split('\r\n')
|
||||
script_settings = {}
|
||||
this_section = None
|
||||
for eLine in _existingData:
|
||||
if eLine.startswith('[') and eLine.endswith(']'):
|
||||
this_section = eLine.replace('[', '').replace(']', '')
|
||||
log.debug('adding section {0}'.format(this_section))
|
||||
if this_section:
|
||||
script_settings[this_section] = {}
|
||||
else:
|
||||
if '=' in eLine:
|
||||
log.debug('working with config line {0}'.format(eLine))
|
||||
eLine = eLine.split('=')
|
||||
if this_section in script_settings:
|
||||
script_settings[this_section][eLine[0]] = eLine[1]
|
||||
if 'SettingName' in policy_info['ScriptIni']:
|
||||
log.debug('Setting Name is in policy_info')
|
||||
if policy_info['ScriptIni']['SettingName'] in script_settings[policy_info['ScriptIni']['Section']]:
|
||||
log.debug('the value is set in the file')
|
||||
return script_settings[policy_info['ScriptIni']['Section']][policy_info['ScriptIni']['SettingName']]
|
||||
if _existingData:
|
||||
_existingData = _existingData.split('\r\n')
|
||||
script_settings = {}
|
||||
this_section = None
|
||||
for eLine in _existingData:
|
||||
if eLine.startswith('[') and eLine.endswith(']'):
|
||||
this_section = eLine.replace('[', '').replace(']', '')
|
||||
log.debug('adding section {0}'.format(this_section))
|
||||
if this_section:
|
||||
script_settings[this_section] = {}
|
||||
else:
|
||||
if '=' in eLine:
|
||||
log.debug('working with config line {0}'.format(eLine))
|
||||
eLine = eLine.split('=')
|
||||
if this_section in script_settings:
|
||||
script_settings[this_section][eLine[0]] = eLine[1]
|
||||
if 'SettingName' in policy_info['ScriptIni']:
|
||||
log.debug('Setting Name is in policy_info')
|
||||
if policy_info['ScriptIni']['SettingName'] in script_settings[policy_info['ScriptIni']['Section']]:
|
||||
log.debug('the value is set in the file')
|
||||
return script_settings[policy_info['ScriptIni']['Section']][policy_info['ScriptIni']['SettingName']]
|
||||
else:
|
||||
return None
|
||||
elif policy_info['ScriptIni']['Section'] in script_settings:
|
||||
log.debug('no setting name')
|
||||
return script_settings[policy_info['ScriptIni']['Section']]
|
||||
else:
|
||||
log.debug('broad else')
|
||||
return None
|
||||
elif policy_info['ScriptIni']['Section'] in script_settings:
|
||||
log.debug('no setting name')
|
||||
return script_settings[policy_info['ScriptIni']['Section']]
|
||||
else:
|
||||
log.debug('broad else')
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def _writeGpoScript(psscript=False):
|
||||
|
@ -4457,6 +4587,9 @@ def _lookup_admin_template(policy_name,
|
|||
suggested_policies = ''
|
||||
if len(adml_search_results) > 1:
|
||||
multiple_adml_entries = True
|
||||
for adml_search_result in adml_search_results:
|
||||
if not adml_search_result.attrib['text'].strip() == policy_name:
|
||||
adml_search_results.remove(adml_search_result)
|
||||
for adml_search_result in adml_search_results:
|
||||
dmsg = 'found an ADML entry matching the string! {0} -- {1}'
|
||||
log.debug(dmsg.format(adml_search_result.tag,
|
||||
|
@ -4803,7 +4936,7 @@ def set_computer_policy(name,
|
|||
'''
|
||||
pol = {}
|
||||
pol[name] = setting
|
||||
ret = set(computer_policy=pol,
|
||||
ret = set_(computer_policy=pol,
|
||||
user_policy=None,
|
||||
cumulative_rights_assignments=cumulative_rights_assignments,
|
||||
adml_language=adml_language)
|
||||
|
@ -4840,7 +4973,7 @@ def set_user_policy(name,
|
|||
'''
|
||||
pol = {}
|
||||
pol[name] = setting
|
||||
ret = set(user_policy=pol,
|
||||
ret = set_(user_policy=pol,
|
||||
computer_policy=None,
|
||||
cumulative_rights_assignments=True,
|
||||
adml_language=adml_language)
|
||||
|
|
|
@ -203,12 +203,12 @@ def set_(name,
|
|||
else:
|
||||
user_policy = {}
|
||||
computer_policy = {}
|
||||
if policy_class == 'both':
|
||||
if policy_class.lower() == 'both':
|
||||
user_policy[name] = setting
|
||||
computer_policy[name] = setting
|
||||
elif policy_class == 'user':
|
||||
elif policy_class.lower() == 'user':
|
||||
user_policy[name] = setting
|
||||
elif policy_class == 'machine' or policy_class == 'computer':
|
||||
elif policy_class.lower() == 'machine' or policy_class.lower() == 'computer':
|
||||
computer_policy[name] = setting
|
||||
pol_data = {}
|
||||
pol_data['user'] = {'output_section': 'User Configuration',
|
||||
|
|
|
@ -646,18 +646,18 @@ def crl_managed(name,
|
|||
new_comp.pop('Next Update')
|
||||
|
||||
file_args, kwargs = _get_file_args(name, **kwargs)
|
||||
new_crl = False
|
||||
new_crl_created = False
|
||||
if (current_comp == new_comp and
|
||||
current_days_remaining > days_remaining and
|
||||
__salt__['x509.verify_crl'](name, signing_cert)):
|
||||
file_args['contents'] = __salt__[
|
||||
'x509.get_pem_entry'](name, pem_type='X509 CRL')
|
||||
else:
|
||||
new_crl = True
|
||||
new_crl_created = True
|
||||
file_args['contents'] = new_crl
|
||||
|
||||
ret = __states__['file.managed'](**file_args)
|
||||
if new_crl:
|
||||
if new_crl_created:
|
||||
ret['changes'] = {'Old': current, 'New': __salt__[
|
||||
'x509.read_crl'](crl=new_crl)}
|
||||
return ret
|
||||
|
|
|
@ -106,6 +106,9 @@ def query(key, keyid, method='GET', params=None, headers=None,
|
|||
if local_file:
|
||||
payload_hash = salt.utils.get_hash(local_file, form='sha256')
|
||||
|
||||
if path is None:
|
||||
path = ''
|
||||
|
||||
if not requesturl:
|
||||
requesturl = 'https://{0}/{1}'.format(endpoint, path)
|
||||
headers, requesturl = salt.utils.aws.sig4(
|
||||
|
@ -132,13 +135,13 @@ def query(key, keyid, method='GET', params=None, headers=None,
|
|||
|
||||
if method == 'PUT':
|
||||
if local_file:
|
||||
with salt.utils.fopen(local_file, 'r') as data:
|
||||
result = requests.request(method,
|
||||
requesturl,
|
||||
headers=headers,
|
||||
data=data,
|
||||
verify=verify_ssl,
|
||||
stream=True)
|
||||
data = salt.utils.fopen(local_file, 'r')
|
||||
result = requests.request(method,
|
||||
requesturl,
|
||||
headers=headers,
|
||||
data=data,
|
||||
verify=verify_ssl,
|
||||
stream=True)
|
||||
response = result.content
|
||||
elif method == 'GET' and local_file and not return_bin:
|
||||
result = requests.request(method,
|
||||
|
|
|
@ -1,334 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Erik Johnson <erik@saltstack.com>`
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting import skipIf
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
from salttesting.mock import patch, NO_MOCK, NO_MOCK_REASON
|
||||
|
||||
ensure_in_syspath('../..')
|
||||
|
||||
# Import Python libs
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
|
||||
# Import salt libs
|
||||
import integration
|
||||
import salt.utils
|
||||
from salt import fileclient
|
||||
from salt.ext import six
|
||||
from salttesting.helpers import ensure_in_syspath, destructiveTest
|
||||
ensure_in_syspath('..')
|
||||
|
||||
SALTENVS = ('base', 'dev')
|
||||
FS_ROOT = os.path.join(integration.TMP, 'fileclient_fs_root')
|
||||
CACHE_ROOT = os.path.join(integration.TMP, 'fileclient_cache_root')
|
||||
SUBDIR = 'subdir'
|
||||
SUBDIR_FILES = ('foo.txt', 'bar.txt', 'baz.txt')
|
||||
|
||||
|
||||
def _get_file_roots():
|
||||
return dict(
|
||||
[(x, [os.path.join(FS_ROOT, x)]) for x in SALTENVS]
|
||||
)
|
||||
|
||||
|
||||
fileclient.__opts__ = {}
|
||||
MOCKED_OPTS = {
|
||||
'file_roots': _get_file_roots(),
|
||||
'fileserver_backend': ['roots'],
|
||||
'cachedir': CACHE_ROOT,
|
||||
'file_client': 'local',
|
||||
}
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@destructiveTest
|
||||
class FileclientTest(integration.ModuleCase):
|
||||
'''
|
||||
Tests for the fileclient. The LocalClient is the only thing we can test as
|
||||
it is the only way we can mock the fileclient (the tests run from the
|
||||
minion process, so the master cannot be mocked from test code).
|
||||
'''
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
No need to add a dummy foo.txt to muddy up the github repo, just make
|
||||
our own fileserver root on-the-fly.
|
||||
'''
|
||||
def _new_dir(path):
|
||||
'''
|
||||
Add a new dir at ``path`` using os.makedirs. If the directory
|
||||
already exists, remove it recursively and then try to create it
|
||||
again.
|
||||
'''
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EEXIST:
|
||||
# Just in case a previous test was interrupted, remove the
|
||||
# directory and try adding it again.
|
||||
shutil.rmtree(path)
|
||||
os.makedirs(path)
|
||||
else:
|
||||
raise
|
||||
|
||||
# Crete the FS_ROOT
|
||||
for saltenv in SALTENVS:
|
||||
saltenv_root = os.path.join(FS_ROOT, saltenv)
|
||||
# Make sure we have a fresh root dir for this saltenv
|
||||
_new_dir(saltenv_root)
|
||||
|
||||
path = os.path.join(saltenv_root, 'foo.txt')
|
||||
with salt.utils.fopen(path, 'w') as fp_:
|
||||
fp_.write(
|
||||
'This is a test file in the \'{0}\' saltenv.\n'
|
||||
.format(saltenv)
|
||||
)
|
||||
|
||||
subdir_abspath = os.path.join(saltenv_root, SUBDIR)
|
||||
os.makedirs(subdir_abspath)
|
||||
for subdir_file in SUBDIR_FILES:
|
||||
path = os.path.join(subdir_abspath, subdir_file)
|
||||
with salt.utils.fopen(path, 'w') as fp_:
|
||||
fp_.write(
|
||||
'This is file \'{0}\' in subdir \'{1} from saltenv '
|
||||
'\'{2}\''.format(subdir_file, SUBDIR, saltenv)
|
||||
)
|
||||
|
||||
# Create the CACHE_ROOT
|
||||
_new_dir(CACHE_ROOT)
|
||||
|
||||
def tearDown(self):
|
||||
'''
|
||||
Remove the directories created for these tests
|
||||
'''
|
||||
shutil.rmtree(FS_ROOT)
|
||||
shutil.rmtree(CACHE_ROOT)
|
||||
|
||||
def test_cache_dir(self):
|
||||
'''
|
||||
Ensure entire directory is cached to correct location
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_dir(
|
||||
'salt://{0}'.format(SUBDIR),
|
||||
saltenv,
|
||||
cachedir=None
|
||||
)
|
||||
)
|
||||
for subdir_file in SUBDIR_FILES:
|
||||
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
|
||||
'files',
|
||||
saltenv,
|
||||
SUBDIR,
|
||||
subdir_file)
|
||||
# Double check that the content of the cached file
|
||||
# identifies it as being from the correct saltenv. The
|
||||
# setUp function creates the file with the name of the
|
||||
# saltenv mentioned in the file, so a simple 'in' check is
|
||||
# sufficient here. If opening the file raises an exception,
|
||||
# this is a problem, so we are not catching the exception
|
||||
# and letting it be raised so that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(subdir_file in content)
|
||||
self.assertTrue(SUBDIR in content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
def test_cache_dir_with_alternate_cachedir_and_absolute_path(self):
|
||||
'''
|
||||
Ensure entire directory is cached to correct location when an alternate
|
||||
cachedir is specified and that cachedir is an absolute path
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
alt_cachedir = os.path.join(integration.TMP, 'abs_cachedir')
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_dir(
|
||||
'salt://{0}'.format(SUBDIR),
|
||||
saltenv,
|
||||
cachedir=alt_cachedir
|
||||
)
|
||||
)
|
||||
for subdir_file in SUBDIR_FILES:
|
||||
cache_loc = os.path.join(alt_cachedir,
|
||||
'files',
|
||||
saltenv,
|
||||
SUBDIR,
|
||||
subdir_file)
|
||||
# Double check that the content of the cached file
|
||||
# identifies it as being from the correct saltenv. The
|
||||
# setUp function creates the file with the name of the
|
||||
# saltenv mentioned in the file, so a simple 'in' check is
|
||||
# sufficient here. If opening the file raises an exception,
|
||||
# this is a problem, so we are not catching the exception
|
||||
# and letting it be raised so that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(subdir_file in content)
|
||||
self.assertTrue(SUBDIR in content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
def test_cache_dir_with_alternate_cachedir_and_relative_path(self):
|
||||
'''
|
||||
Ensure entire directory is cached to correct location when an alternate
|
||||
cachedir is specified and that cachedir is a relative path
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
alt_cachedir = 'foo'
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_dir(
|
||||
'salt://{0}'.format(SUBDIR),
|
||||
saltenv,
|
||||
cachedir=alt_cachedir
|
||||
)
|
||||
)
|
||||
for subdir_file in SUBDIR_FILES:
|
||||
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
|
||||
alt_cachedir,
|
||||
'files',
|
||||
saltenv,
|
||||
SUBDIR,
|
||||
subdir_file)
|
||||
# Double check that the content of the cached file
|
||||
# identifies it as being from the correct saltenv. The
|
||||
# setUp function creates the file with the name of the
|
||||
# saltenv mentioned in the file, so a simple 'in' check is
|
||||
# sufficient here. If opening the file raises an exception,
|
||||
# this is a problem, so we are not catching the exception
|
||||
# and letting it be raised so that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(subdir_file in content)
|
||||
self.assertTrue(SUBDIR in content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
def test_cache_file(self):
|
||||
'''
|
||||
Ensure file is cached to correct location
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_file('salt://foo.txt', saltenv, cachedir=None)
|
||||
)
|
||||
cache_loc = os.path.join(
|
||||
fileclient.__opts__['cachedir'], 'files', saltenv, 'foo.txt')
|
||||
# Double check that the content of the cached file identifies
|
||||
# it as being from the correct saltenv. The setUp function
|
||||
# creates the file with the name of the saltenv mentioned in
|
||||
# the file, so a simple 'in' check is sufficient here. If
|
||||
# opening the file raises an exception, this is a problem, so
|
||||
# we are not catching the exception and letting it be raised so
|
||||
# that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
def test_cache_file_with_alternate_cachedir_and_absolute_path(self):
|
||||
'''
|
||||
Ensure file is cached to correct location when an alternate cachedir is
|
||||
specified and that cachedir is an absolute path
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
alt_cachedir = os.path.join(integration.TMP, 'abs_cachedir')
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_file('salt://foo.txt',
|
||||
saltenv,
|
||||
cachedir=alt_cachedir)
|
||||
)
|
||||
cache_loc = os.path.join(alt_cachedir,
|
||||
'files',
|
||||
saltenv,
|
||||
'foo.txt')
|
||||
# Double check that the content of the cached file identifies
|
||||
# it as being from the correct saltenv. The setUp function
|
||||
# creates the file with the name of the saltenv mentioned in
|
||||
# the file, so a simple 'in' check is sufficient here. If
|
||||
# opening the file raises an exception, this is a problem, so
|
||||
# we are not catching the exception and letting it be raised so
|
||||
# that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
def test_cache_file_with_alternate_cachedir_and_relative_path(self):
|
||||
'''
|
||||
Ensure file is cached to correct location when an alternate cachedir is
|
||||
specified and that cachedir is a relative path
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
alt_cachedir = 'foo'
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_file('salt://foo.txt',
|
||||
saltenv,
|
||||
cachedir=alt_cachedir)
|
||||
)
|
||||
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
|
||||
alt_cachedir,
|
||||
'files',
|
||||
saltenv,
|
||||
'foo.txt')
|
||||
# Double check that the content of the cached file identifies
|
||||
# it as being from the correct saltenv. The setUp function
|
||||
# creates the file with the name of the saltenv mentioned in
|
||||
# the file, so a simple 'in' check is sufficient here. If
|
||||
# opening the file raises an exception, this is a problem, so
|
||||
# we are not catching the exception and letting it be raised so
|
||||
# that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
integration.run_tests(FileclientTest)
|
|
@ -4,16 +4,45 @@
|
|||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting.unit import skipIf
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
from salttesting.helpers import ensure_in_syspath, destructiveTest
|
||||
from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON
|
||||
ensure_in_syspath('../..')
|
||||
|
||||
# Import salt libs
|
||||
import integration
|
||||
import salt.utils
|
||||
from salt import fileclient
|
||||
from salt.ext import six
|
||||
|
||||
SALTENVS = ('base', 'dev')
|
||||
FS_ROOT = os.path.join(integration.TMP, 'fileclient_fs_root')
|
||||
CACHE_ROOT = os.path.join(integration.TMP, 'fileclient_cache_root')
|
||||
SUBDIR = 'subdir'
|
||||
SUBDIR_FILES = ('foo.txt', 'bar.txt', 'baz.txt')
|
||||
|
||||
|
||||
def _get_file_roots():
|
||||
return dict(
|
||||
[(x, [os.path.join(FS_ROOT, x)]) for x in SALTENVS]
|
||||
)
|
||||
|
||||
|
||||
fileclient.__opts__ = {}
|
||||
MOCKED_OPTS = {
|
||||
'file_roots': _get_file_roots(),
|
||||
'fileserver_backend': ['roots'],
|
||||
'cachedir': CACHE_ROOT,
|
||||
'file_client': 'local',
|
||||
}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
|
@ -42,6 +71,288 @@ class FileClientTest(integration.ModuleCase):
|
|||
ret = fileclient.get_file_client(self.minion_opts)
|
||||
self.assertEqual('remote_client', ret)
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@destructiveTest
|
||||
class FileclientCacheTest(integration.ModuleCase):
|
||||
'''
|
||||
Tests for the fileclient caching. The LocalClient is the only thing we can
|
||||
test as it is the only way we can mock the fileclient (the tests run from
|
||||
the minion process, so the master cannot be mocked from test code).
|
||||
'''
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
No need to add a dummy foo.txt to muddy up the github repo, just make
|
||||
our own fileserver root on-the-fly.
|
||||
'''
|
||||
def _new_dir(path):
|
||||
'''
|
||||
Add a new dir at ``path`` using os.makedirs. If the directory
|
||||
already exists, remove it recursively and then try to create it
|
||||
again.
|
||||
'''
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EEXIST:
|
||||
# Just in case a previous test was interrupted, remove the
|
||||
# directory and try adding it again.
|
||||
shutil.rmtree(path)
|
||||
os.makedirs(path)
|
||||
else:
|
||||
raise
|
||||
|
||||
# Crete the FS_ROOT
|
||||
for saltenv in SALTENVS:
|
||||
saltenv_root = os.path.join(FS_ROOT, saltenv)
|
||||
# Make sure we have a fresh root dir for this saltenv
|
||||
_new_dir(saltenv_root)
|
||||
|
||||
path = os.path.join(saltenv_root, 'foo.txt')
|
||||
with salt.utils.fopen(path, 'w') as fp_:
|
||||
fp_.write(
|
||||
'This is a test file in the \'{0}\' saltenv.\n'
|
||||
.format(saltenv)
|
||||
)
|
||||
|
||||
subdir_abspath = os.path.join(saltenv_root, SUBDIR)
|
||||
os.makedirs(subdir_abspath)
|
||||
for subdir_file in SUBDIR_FILES:
|
||||
path = os.path.join(subdir_abspath, subdir_file)
|
||||
with salt.utils.fopen(path, 'w') as fp_:
|
||||
fp_.write(
|
||||
'This is file \'{0}\' in subdir \'{1} from saltenv '
|
||||
'\'{2}\''.format(subdir_file, SUBDIR, saltenv)
|
||||
)
|
||||
|
||||
# Create the CACHE_ROOT
|
||||
_new_dir(CACHE_ROOT)
|
||||
|
||||
def tearDown(self):
|
||||
'''
|
||||
Remove the directories created for these tests
|
||||
'''
|
||||
shutil.rmtree(FS_ROOT)
|
||||
shutil.rmtree(CACHE_ROOT)
|
||||
|
||||
def test_cache_dir(self):
|
||||
'''
|
||||
Ensure entire directory is cached to correct location
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_dir(
|
||||
'salt://{0}'.format(SUBDIR),
|
||||
saltenv,
|
||||
cachedir=None
|
||||
)
|
||||
)
|
||||
for subdir_file in SUBDIR_FILES:
|
||||
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
|
||||
'files',
|
||||
saltenv,
|
||||
SUBDIR,
|
||||
subdir_file)
|
||||
# Double check that the content of the cached file
|
||||
# identifies it as being from the correct saltenv. The
|
||||
# setUp function creates the file with the name of the
|
||||
# saltenv mentioned in the file, so a simple 'in' check is
|
||||
# sufficient here. If opening the file raises an exception,
|
||||
# this is a problem, so we are not catching the exception
|
||||
# and letting it be raised so that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(subdir_file in content)
|
||||
self.assertTrue(SUBDIR in content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
def test_cache_dir_with_alternate_cachedir_and_absolute_path(self):
|
||||
'''
|
||||
Ensure entire directory is cached to correct location when an alternate
|
||||
cachedir is specified and that cachedir is an absolute path
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
alt_cachedir = os.path.join(integration.TMP, 'abs_cachedir')
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_dir(
|
||||
'salt://{0}'.format(SUBDIR),
|
||||
saltenv,
|
||||
cachedir=alt_cachedir
|
||||
)
|
||||
)
|
||||
for subdir_file in SUBDIR_FILES:
|
||||
cache_loc = os.path.join(alt_cachedir,
|
||||
'files',
|
||||
saltenv,
|
||||
SUBDIR,
|
||||
subdir_file)
|
||||
# Double check that the content of the cached file
|
||||
# identifies it as being from the correct saltenv. The
|
||||
# setUp function creates the file with the name of the
|
||||
# saltenv mentioned in the file, so a simple 'in' check is
|
||||
# sufficient here. If opening the file raises an exception,
|
||||
# this is a problem, so we are not catching the exception
|
||||
# and letting it be raised so that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(subdir_file in content)
|
||||
self.assertTrue(SUBDIR in content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
def test_cache_dir_with_alternate_cachedir_and_relative_path(self):
|
||||
'''
|
||||
Ensure entire directory is cached to correct location when an alternate
|
||||
cachedir is specified and that cachedir is a relative path
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
alt_cachedir = 'foo'
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_dir(
|
||||
'salt://{0}'.format(SUBDIR),
|
||||
saltenv,
|
||||
cachedir=alt_cachedir
|
||||
)
|
||||
)
|
||||
for subdir_file in SUBDIR_FILES:
|
||||
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
|
||||
alt_cachedir,
|
||||
'files',
|
||||
saltenv,
|
||||
SUBDIR,
|
||||
subdir_file)
|
||||
# Double check that the content of the cached file
|
||||
# identifies it as being from the correct saltenv. The
|
||||
# setUp function creates the file with the name of the
|
||||
# saltenv mentioned in the file, so a simple 'in' check is
|
||||
# sufficient here. If opening the file raises an exception,
|
||||
# this is a problem, so we are not catching the exception
|
||||
# and letting it be raised so that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(subdir_file in content)
|
||||
self.assertTrue(SUBDIR in content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
def test_cache_file(self):
|
||||
'''
|
||||
Ensure file is cached to correct location
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_file('salt://foo.txt', saltenv, cachedir=None)
|
||||
)
|
||||
cache_loc = os.path.join(
|
||||
fileclient.__opts__['cachedir'], 'files', saltenv, 'foo.txt')
|
||||
# Double check that the content of the cached file identifies
|
||||
# it as being from the correct saltenv. The setUp function
|
||||
# creates the file with the name of the saltenv mentioned in
|
||||
# the file, so a simple 'in' check is sufficient here. If
|
||||
# opening the file raises an exception, this is a problem, so
|
||||
# we are not catching the exception and letting it be raised so
|
||||
# that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
def test_cache_file_with_alternate_cachedir_and_absolute_path(self):
|
||||
'''
|
||||
Ensure file is cached to correct location when an alternate cachedir is
|
||||
specified and that cachedir is an absolute path
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
alt_cachedir = os.path.join(integration.TMP, 'abs_cachedir')
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_file('salt://foo.txt',
|
||||
saltenv,
|
||||
cachedir=alt_cachedir)
|
||||
)
|
||||
cache_loc = os.path.join(alt_cachedir,
|
||||
'files',
|
||||
saltenv,
|
||||
'foo.txt')
|
||||
# Double check that the content of the cached file identifies
|
||||
# it as being from the correct saltenv. The setUp function
|
||||
# creates the file with the name of the saltenv mentioned in
|
||||
# the file, so a simple 'in' check is sufficient here. If
|
||||
# opening the file raises an exception, this is a problem, so
|
||||
# we are not catching the exception and letting it be raised so
|
||||
# that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
def test_cache_file_with_alternate_cachedir_and_relative_path(self):
|
||||
'''
|
||||
Ensure file is cached to correct location when an alternate cachedir is
|
||||
specified and that cachedir is a relative path
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
alt_cachedir = 'foo'
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_file('salt://foo.txt',
|
||||
saltenv,
|
||||
cachedir=alt_cachedir)
|
||||
)
|
||||
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
|
||||
alt_cachedir,
|
||||
'files',
|
||||
saltenv,
|
||||
'foo.txt')
|
||||
# Double check that the content of the cached file identifies
|
||||
# it as being from the correct saltenv. The setUp function
|
||||
# creates the file with the name of the saltenv mentioned in
|
||||
# the file, so a simple 'in' check is sufficient here. If
|
||||
# opening the file raises an exception, this is a problem, so
|
||||
# we are not catching the exception and letting it be raised so
|
||||
# that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(FileClientTest)
|
||||
|
|
|
@ -2,8 +2,11 @@
|
|||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import re
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
ensure_in_syspath('../../')
|
||||
|
@ -23,7 +26,7 @@ class SysModuleTest(integration.ModuleCase):
|
|||
'''
|
||||
Make sure no functions are exposed that don't have valid docstrings
|
||||
'''
|
||||
docs = self.run_function('sys.doc')
|
||||
mods = self.run_function('sys.list_modules')
|
||||
nodoc = set()
|
||||
noexample = set()
|
||||
allow_failure = (
|
||||
|
@ -50,15 +53,43 @@ class SysModuleTest(integration.ModuleCase):
|
|||
'status.list2cmdline'
|
||||
)
|
||||
|
||||
for fun in docs:
|
||||
if fun.startswith('runtests_helpers'):
|
||||
continue
|
||||
if fun in allow_failure:
|
||||
continue
|
||||
if not isinstance(docs[fun], six.string_types):
|
||||
nodoc.add(fun)
|
||||
elif not re.search(r'([E|e]xample(?:s)?)+(?:.*)::?', docs[fun]):
|
||||
noexample.add(fun)
|
||||
batches = 2
|
||||
mod_count = len(mods)
|
||||
batch_size = mod_count / float(batches)
|
||||
if batch_size.is_integer():
|
||||
batch_size = int(batch_size)
|
||||
else:
|
||||
# Check if the module count is evenly divisible by the number of
|
||||
# batches. If not, increase the batch_size by the number of batches
|
||||
# being run. This ensures that we get the correct number of
|
||||
# batches, and that we don't end up running sys.doc an extra time
|
||||
# to cover the remainder. For example, if we had a batch count of 2
|
||||
# and 121 modules, if we just divided by 2 we'd end up running
|
||||
# sys.doc 3 times.
|
||||
batch_size = int(batch_size) + batches
|
||||
|
||||
log.debug('test_valid_docs batch size = %s', batch_size)
|
||||
start = 0
|
||||
end = batch_size
|
||||
while start <= mod_count:
|
||||
log.debug('running sys.doc on mods[%s:%s]', start, end)
|
||||
docs = self.run_function('sys.doc', mods[start:end])
|
||||
if docs == 'VALUE TRIMMED':
|
||||
self.fail(
|
||||
'sys.doc output trimmed. It may be necessary to increase '
|
||||
'the number of batches'
|
||||
)
|
||||
for fun in docs:
|
||||
if fun.startswith('runtests_helpers'):
|
||||
continue
|
||||
if fun in allow_failure:
|
||||
continue
|
||||
if not isinstance(docs[fun], six.string_types):
|
||||
nodoc.add(fun)
|
||||
elif not re.search(r'([E|e]xample(?:s)?)+(?:.*)::?', docs[fun]):
|
||||
noexample.add(fun)
|
||||
start += batch_size
|
||||
end += batch_size
|
||||
|
||||
if not nodoc and not noexample:
|
||||
return
|
||||
|
|
|
@ -313,9 +313,11 @@ class MatchTest(integration.ShellCase, integration.ShellCaseCommonTestsMixIn):
|
|||
Test to see if we're not auto-adding '*' and 'sys.doc' to the call
|
||||
'''
|
||||
data = self.run_salt('-d -t 20')
|
||||
self.assertIn('user.add:', data)
|
||||
if data:
|
||||
self.assertIn('user.add:', data)
|
||||
data = self.run_salt('"*" -d -t 20')
|
||||
self.assertIn('user.add:', data)
|
||||
if data:
|
||||
self.assertIn('user.add:', data)
|
||||
data = self.run_salt('"*" -d user -t 20')
|
||||
self.assertIn('user.add:', data)
|
||||
data = self.run_salt('"*" sys.doc -d user -t 20')
|
||||
|
|
Loading…
Add table
Reference in a new issue