mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch '2017.7' into 43386_2017_7_schedule_kwargs_pub
This commit is contained in:
commit
2681b7d3fa
18 changed files with 204 additions and 78 deletions
|
@ -373,7 +373,7 @@
|
|||
# interface: eth0
|
||||
# cidr: '10.0.0.0/8'
|
||||
|
||||
# The number of seconds a mine update runs.
|
||||
# The number of minutes between mine updates.
|
||||
#mine_interval: 60
|
||||
|
||||
# Windows platforms lack posix IPC and must rely on slower TCP based inter-
|
||||
|
|
|
@ -674,7 +674,7 @@ Note these can be defined in the pillar for a minion as well.
|
|||
|
||||
Default: ``60``
|
||||
|
||||
The number of seconds a mine update runs.
|
||||
The number of minutes between mine updates.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
|
|
@ -118,3 +118,53 @@ has to be closed after every command.
|
|||
.. code-block:: yaml
|
||||
|
||||
proxy_always_alive: False
|
||||
|
||||
``proxy_merge_pillar_in_opts``
|
||||
------------------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``False``.
|
||||
|
||||
Wheter the pillar data to be merged into the proxy configuration options.
|
||||
As multiple proxies can run on the same server, we may need different
|
||||
configuration options for each, while there's one single configuration file.
|
||||
The solution is merging the pillar data of each proxy minion into the opts.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy_merge_pillar_in_opts: True
|
||||
|
||||
``proxy_deep_merge_pillar_in_opts``
|
||||
-----------------------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``False``.
|
||||
|
||||
Deep merge of pillar data into configuration opts.
|
||||
This option is evaluated only when :conf_proxy:`proxy_merge_pillar_in_opts` is
|
||||
enabled.
|
||||
|
||||
``proxy_merge_pillar_in_opts_strategy``
|
||||
---------------------------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``smart``.
|
||||
|
||||
The strategy used when merging pillar configuration into opts.
|
||||
This option is evaluated only when :conf_proxy:`proxy_merge_pillar_in_opts` is
|
||||
enabled.
|
||||
|
||||
``proxy_mines_pillar``
|
||||
----------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``True``.
|
||||
|
||||
Allow enabling mine details using pillar data. This evaluates the mine
|
||||
configuration under the pillar, for the following regular minion options that
|
||||
are also equally available on the proxy minion: :conf_minion:`mine_interval`,
|
||||
and :conf_minion:`mine_functions`.
|
||||
|
|
|
@ -481,11 +481,17 @@ Alternatively the ``uninstaller`` can also simply repeat the URL of the msi file
|
|||
:param bool allusers: This parameter is specific to `.msi` installations. It
|
||||
tells `msiexec` to install the software for all users. The default is True.
|
||||
|
||||
:param bool cache_dir: If true, the entire directory where the installer resides
|
||||
will be recursively cached. This is useful for installers that depend on
|
||||
other files in the same directory for installation.
|
||||
:param bool cache_dir: If true when installer URL begins with salt://, the
|
||||
entire directory where the installer resides will be recursively cached.
|
||||
This is useful for installers that depend on other files in the same
|
||||
directory for installation.
|
||||
|
||||
.. note:: Only applies to salt: installer URLs.
|
||||
:param str cache_file:
|
||||
When installer URL begins with salt://, this indicates single file to copy
|
||||
down for use with the installer. Copied to the same location as the
|
||||
installer. Use this over ``cache_dir`` if there are many files in the
|
||||
directory and you only need a specific file and don't want to cache
|
||||
additional files that may reside in the installer directory.
|
||||
|
||||
Here's an example for a software package that has dependent files:
|
||||
|
||||
|
|
|
@ -334,7 +334,7 @@ VALID_OPTS = {
|
|||
# Whether or not scheduled mine updates should be accompanied by a job return for the job cache
|
||||
'mine_return_job': bool,
|
||||
|
||||
# Schedule a mine update every n number of seconds
|
||||
# The number of minutes between mine updates.
|
||||
'mine_interval': int,
|
||||
|
||||
# The ipc strategy. (i.e., sockets versus tcp, etc)
|
||||
|
@ -569,6 +569,23 @@ VALID_OPTS = {
|
|||
# False in 2016.3.0
|
||||
'add_proxymodule_to_opts': bool,
|
||||
|
||||
# Merge pillar data into configuration opts.
|
||||
# As multiple proxies can run on the same server, we may need different
|
||||
# configuration options for each, while there's one single configuration file.
|
||||
# The solution is merging the pillar data of each proxy minion into the opts.
|
||||
'proxy_merge_pillar_in_opts': bool,
|
||||
|
||||
# Deep merge of pillar data into configuration opts.
|
||||
# Evaluated only when `proxy_merge_pillar_in_opts` is True.
|
||||
'proxy_deep_merge_pillar_in_opts': bool,
|
||||
|
||||
# The strategy used when merging pillar into opts.
|
||||
# Considered only when `proxy_merge_pillar_in_opts` is True.
|
||||
'proxy_merge_pillar_in_opts_strategy': str,
|
||||
|
||||
# Allow enabling mine details using pillar data.
|
||||
'proxy_mines_pillar': bool,
|
||||
|
||||
# In some particular cases, always alive proxies are not beneficial.
|
||||
# This option can be used in those less dynamic environments:
|
||||
# the user can request the connection
|
||||
|
@ -1637,6 +1654,12 @@ DEFAULT_PROXY_MINION_OPTS = {
|
|||
'append_minionid_config_dirs': ['cachedir', 'pidfile', 'default_include', 'extension_modules'],
|
||||
'default_include': 'proxy.d/*.conf',
|
||||
|
||||
'proxy_merge_pillar_in_opts': False,
|
||||
'proxy_deep_merge_pillar_in_opts': False,
|
||||
'proxy_merge_pillar_in_opts_strategy': 'smart',
|
||||
|
||||
'proxy_mines_pillar': True,
|
||||
|
||||
# By default, proxies will preserve the connection.
|
||||
# If this option is set to False,
|
||||
# the connection with the remote dumb device
|
||||
|
|
|
@ -270,7 +270,7 @@ def raw_mod(opts, name, functions, mod='modules'):
|
|||
testmod['test.ping']()
|
||||
'''
|
||||
loader = LazyLoader(
|
||||
_module_dirs(opts, mod, 'rawmodule'),
|
||||
_module_dirs(opts, mod, 'module'),
|
||||
opts,
|
||||
tag='rawmodule',
|
||||
virtual_enable=False,
|
||||
|
|
|
@ -100,6 +100,7 @@ import salt.defaults.exitcodes
|
|||
import salt.cli.daemons
|
||||
import salt.log.setup
|
||||
|
||||
import salt.utils.dictupdate
|
||||
from salt.config import DEFAULT_MINION_OPTS
|
||||
from salt.defaults import DEFAULT_TARGET_DELIM
|
||||
from salt.executors import FUNCTION_EXECUTORS
|
||||
|
@ -3118,6 +3119,26 @@ class ProxyMinion(Minion):
|
|||
if 'proxy' not in self.opts:
|
||||
self.opts['proxy'] = self.opts['pillar']['proxy']
|
||||
|
||||
if self.opts.get('proxy_merge_pillar_in_opts'):
|
||||
# Override proxy opts with pillar data when the user required.
|
||||
self.opts = salt.utils.dictupdate.merge(self.opts,
|
||||
self.opts['pillar'],
|
||||
strategy=self.opts.get('proxy_merge_pillar_in_opts_strategy'),
|
||||
merge_lists=self.opts.get('proxy_deep_merge_pillar_in_opts', False))
|
||||
elif self.opts.get('proxy_mines_pillar'):
|
||||
# Even when not required, some details such as mine configuration
|
||||
# should be merged anyway whenever possible.
|
||||
if 'mine_interval' in self.opts['pillar']:
|
||||
self.opts['mine_interval'] = self.opts['pillar']['mine_interval']
|
||||
if 'mine_functions' in self.opts['pillar']:
|
||||
general_proxy_mines = self.opts.get('mine_functions', [])
|
||||
specific_proxy_mines = self.opts['pillar']['mine_functions']
|
||||
try:
|
||||
self.opts['mine_functions'] = general_proxy_mines + specific_proxy_mines
|
||||
except TypeError as terr:
|
||||
log.error('Unable to merge mine functions from the pillar in the opts, for proxy {}'.format(
|
||||
self.opts['id']))
|
||||
|
||||
fq_proxyname = self.opts['proxy']['proxytype']
|
||||
|
||||
# Need to load the modules so they get all the dunder variables
|
||||
|
|
|
@ -17,10 +17,10 @@ except ImportError:
|
|||
from pipes import quote as _cmd_quote
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.yast
|
||||
import salt.utils.preseed
|
||||
import salt.utils.kickstart
|
||||
import salt.utils.validate.path
|
||||
import salt.syspaths
|
||||
from salt.exceptions import SaltInvocationError
|
||||
|
||||
|
@ -403,6 +403,11 @@ def _bootstrap_deb(
|
|||
log.error('Required tool debootstrap is not installed.')
|
||||
return False
|
||||
|
||||
if static_qemu and not salt.utils.validate.path.is_executable(static_qemu):
|
||||
log.error('Required tool qemu not '
|
||||
'present/readable at: {0}'.format(static_qemu))
|
||||
return False
|
||||
|
||||
if isinstance(pkgs, (list, tuple)):
|
||||
pkgs = ','.join(pkgs)
|
||||
if isinstance(exclude_pkgs, (list, tuple)):
|
||||
|
@ -427,11 +432,13 @@ def _bootstrap_deb(
|
|||
|
||||
__salt__['cmd.run'](deb_args, python_shell=False)
|
||||
|
||||
__salt__['cmd.run'](
|
||||
'cp {qemu} {root}/usr/bin/'.format(
|
||||
qemu=_cmd_quote(static_qemu), root=_cmd_quote(root)
|
||||
if static_qemu:
|
||||
__salt__['cmd.run'](
|
||||
'cp {qemu} {root}/usr/bin/'.format(
|
||||
qemu=_cmd_quote(static_qemu), root=_cmd_quote(root)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
env = {'DEBIAN_FRONTEND': 'noninteractive',
|
||||
'DEBCONF_NONINTERACTIVE_SEEN': 'true',
|
||||
'LC_ALL': 'C',
|
||||
|
|
|
@ -31,7 +31,7 @@ def __virtual__():
|
|||
if __grains__['kernel'] in ('Linux', 'OpenBSD', 'NetBSD'):
|
||||
return __virtualname__
|
||||
return (False, 'The groupadd execution module cannot be loaded: '
|
||||
' only available on Linux, OpenBSD and NetBSD')
|
||||
' only available on Linux, OpenBSD and NetBSD')
|
||||
|
||||
|
||||
def add(name, gid=None, system=False, root=None):
|
||||
|
@ -44,12 +44,12 @@ def add(name, gid=None, system=False, root=None):
|
|||
|
||||
salt '*' group.add foo 3456
|
||||
'''
|
||||
cmd = 'groupadd '
|
||||
cmd = ['groupadd']
|
||||
if gid:
|
||||
cmd += '-g {0} '.format(gid)
|
||||
cmd.append('-g {0}'.format(gid))
|
||||
if system and __grains__['kernel'] != 'OpenBSD':
|
||||
cmd += '-r '
|
||||
cmd += name
|
||||
cmd.append('-r')
|
||||
cmd.append(name)
|
||||
|
||||
if root is not None:
|
||||
cmd.extend(('-R', root))
|
||||
|
@ -69,7 +69,7 @@ def delete(name, root=None):
|
|||
|
||||
salt '*' group.delete foo
|
||||
'''
|
||||
cmd = ('groupdel', name)
|
||||
cmd = ['groupdel', name]
|
||||
|
||||
if root is not None:
|
||||
cmd.extend(('-R', root))
|
||||
|
@ -140,7 +140,7 @@ def chgid(name, gid, root=None):
|
|||
pre_gid = __salt__['file.group_to_gid'](name)
|
||||
if gid == pre_gid:
|
||||
return True
|
||||
cmd = ('groupmod', '-g', gid, name)
|
||||
cmd = ['groupmod', '-g', gid, name]
|
||||
|
||||
if root is not None:
|
||||
cmd.extend(('-R', root))
|
||||
|
@ -170,15 +170,15 @@ def adduser(name, username, root=None):
|
|||
|
||||
if __grains__['kernel'] == 'Linux':
|
||||
if on_redhat_5:
|
||||
cmd = ('gpasswd', '-a', username, name)
|
||||
cmd = ['gpasswd', '-a', username, name]
|
||||
elif on_suse_11:
|
||||
cmd = ('usermod', '-A', name, username)
|
||||
cmd = ['usermod', '-A', name, username]
|
||||
else:
|
||||
cmd = ('gpasswd', '--add', username, name)
|
||||
cmd = ['gpasswd', '--add', username, name]
|
||||
if root is not None:
|
||||
cmd.extend(('-Q', root))
|
||||
else:
|
||||
cmd = ('usermod', '-G', name, username)
|
||||
cmd = ['usermod', '-G', name, username]
|
||||
if root is not None:
|
||||
cmd.extend(('-R', root))
|
||||
|
||||
|
@ -208,20 +208,20 @@ def deluser(name, username, root=None):
|
|||
if username in grp_info['members']:
|
||||
if __grains__['kernel'] == 'Linux':
|
||||
if on_redhat_5:
|
||||
cmd = ('gpasswd', '-d', username, name)
|
||||
cmd = ['gpasswd', '-d', username, name]
|
||||
elif on_suse_11:
|
||||
cmd = ('usermod', '-R', name, username)
|
||||
cmd = ['usermod', '-R', name, username]
|
||||
else:
|
||||
cmd = ('gpasswd', '--del', username, name)
|
||||
cmd = ['gpasswd', '--del', username, name]
|
||||
if root is not None:
|
||||
cmd.extend(('-R', root))
|
||||
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
|
||||
elif __grains__['kernel'] == 'OpenBSD':
|
||||
out = __salt__['cmd.run_stdout']('id -Gn {0}'.format(username),
|
||||
python_shell=False)
|
||||
cmd = 'usermod -S '
|
||||
cmd += ','.join([g for g in out.split() if g != str(name)])
|
||||
cmd += ' {0}'.format(username)
|
||||
cmd = ['usermod', '-S']
|
||||
cmd.append(','.join([g for g in out.split() if g != str(name)]))
|
||||
cmd.append('{0}'.format(username))
|
||||
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
|
||||
else:
|
||||
log.error('group.deluser is not yet supported on this platform')
|
||||
|
@ -249,13 +249,13 @@ def members(name, members_list, root=None):
|
|||
|
||||
if __grains__['kernel'] == 'Linux':
|
||||
if on_redhat_5:
|
||||
cmd = ('gpasswd', '-M', members_list, name)
|
||||
cmd = ['gpasswd', '-M', members_list, name]
|
||||
elif on_suse_11:
|
||||
for old_member in __salt__['group.info'](name).get('members'):
|
||||
__salt__['cmd.run']('groupmod -R {0} {1}'.format(old_member, name), python_shell=False)
|
||||
cmd = ('groupmod', '-A', members_list, name)
|
||||
cmd = ['groupmod', '-A', members_list, name]
|
||||
else:
|
||||
cmd = ('gpasswd', '--members', members_list, name)
|
||||
cmd = ['gpasswd', '--members', members_list, name]
|
||||
if root is not None:
|
||||
cmd.extend(('-R', root))
|
||||
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
|
||||
|
@ -270,7 +270,7 @@ def members(name, members_list, root=None):
|
|||
for user in members_list.split(","):
|
||||
if user:
|
||||
retcode = __salt__['cmd.retcode'](
|
||||
'usermod -G {0} {1}'.format(name, user),
|
||||
['usermod', '-G', name, user],
|
||||
python_shell=False)
|
||||
if not retcode == 0:
|
||||
break
|
||||
|
|
|
@ -199,12 +199,10 @@ def create(path,
|
|||
for entry in extra_search_dir:
|
||||
cmd.append('--extra-search-dir={0}'.format(entry))
|
||||
if never_download is True:
|
||||
if virtualenv_version_info >= (1, 10):
|
||||
if virtualenv_version_info >= (1, 10) and virtualenv_version_info < (14, 0, 0):
|
||||
log.info(
|
||||
'The virtualenv \'--never-download\' option has been '
|
||||
'deprecated in virtualenv(>=1.10), as such, the '
|
||||
'\'never_download\' option to `virtualenv.create()` has '
|
||||
'also been deprecated and it\'s not necessary anymore.'
|
||||
'--never-download was deprecated in 1.10.0, but reimplemented in 14.0.0. '
|
||||
'If this feature is needed, please install a supported virtualenv version.'
|
||||
)
|
||||
else:
|
||||
cmd.append('--never-download')
|
||||
|
|
|
@ -983,18 +983,6 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
|||
# Version 1.2.3 will apply to packages foo and bar
|
||||
salt '*' pkg.install foo,bar version=1.2.3
|
||||
|
||||
cache_file (str):
|
||||
A single file to copy down for use with the installer. Copied to the
|
||||
same location as the installer. Use this over ``cache_dir`` if there
|
||||
are many files in the directory and you only need a specific file
|
||||
and don't want to cache additional files that may reside in the
|
||||
installer directory. Only applies to files on ``salt://``
|
||||
|
||||
cache_dir (bool):
|
||||
True will copy the contents of the installer directory. This is
|
||||
useful for installations that are not a single file. Only applies to
|
||||
directories on ``salt://``
|
||||
|
||||
extra_install_flags (str):
|
||||
Additional install flags that will be appended to the
|
||||
``install_flags`` defined in the software definition file. Only
|
||||
|
@ -1286,7 +1274,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
|||
if use_msiexec:
|
||||
cmd = msiexec
|
||||
arguments = ['/i', cached_pkg]
|
||||
if pkginfo['version_num'].get('allusers', True):
|
||||
if pkginfo[version_num].get('allusers', True):
|
||||
arguments.append('ALLUSERS="1"')
|
||||
arguments.extend(salt.utils.shlex_split(install_flags))
|
||||
else:
|
||||
|
|
|
@ -391,7 +391,6 @@ def clean_old_jobs():
|
|||
Clean out the old jobs from the job cache
|
||||
'''
|
||||
if __opts__['keep_jobs'] != 0:
|
||||
cur = time.time()
|
||||
jid_root = _job_dir()
|
||||
|
||||
if not os.path.exists(jid_root):
|
||||
|
@ -421,7 +420,7 @@ def clean_old_jobs():
|
|||
shutil.rmtree(t_path)
|
||||
elif os.path.isfile(jid_file):
|
||||
jid_ctime = os.stat(jid_file).st_ctime
|
||||
hours_difference = (cur - jid_ctime) / 3600.0
|
||||
hours_difference = (time.time()- jid_ctime) / 3600.0
|
||||
if hours_difference > __opts__['keep_jobs'] and os.path.exists(t_path):
|
||||
# Remove the entire t_path from the original JID dir
|
||||
shutil.rmtree(t_path)
|
||||
|
@ -435,7 +434,7 @@ def clean_old_jobs():
|
|||
# Checking the time again prevents a possible race condition where
|
||||
# t_path JID dirs were created, but not yet populated by a jid file.
|
||||
t_path_ctime = os.stat(t_path).st_ctime
|
||||
hours_difference = (cur - t_path_ctime) / 3600.0
|
||||
hours_difference = (time.time() - t_path_ctime) / 3600.0
|
||||
if hours_difference > __opts__['keep_jobs']:
|
||||
shutil.rmtree(t_path)
|
||||
|
||||
|
|
|
@ -64,3 +64,14 @@ def is_readable(path):
|
|||
|
||||
# The path does not exist
|
||||
return False
|
||||
|
||||
|
||||
def is_executable(path):
|
||||
'''
|
||||
Check if a given path is executable by the current user.
|
||||
|
||||
:param path: The path to check
|
||||
:returns: True or False
|
||||
'''
|
||||
|
||||
return os.access(path, os.X_OK)
|
||||
|
|
|
@ -94,9 +94,11 @@ class GenesisTestCase(TestCase, LoaderModuleMockMixin):
|
|||
'cmd.run': MagicMock(),
|
||||
'disk.blkid': MagicMock(return_value={})}):
|
||||
with patch('salt.modules.genesis.salt.utils.which', return_value=True):
|
||||
param_set['params'].update(common_parms)
|
||||
self.assertEqual(genesis.bootstrap(**param_set['params']), None)
|
||||
genesis.__salt__['cmd.run'].assert_any_call(param_set['cmd'], python_shell=False)
|
||||
with patch('salt.modules.genesis.salt.utils.validate.path.is_executable',
|
||||
return_value=True):
|
||||
param_set['params'].update(common_parms)
|
||||
self.assertEqual(genesis.bootstrap(**param_set['params']), None)
|
||||
genesis.__salt__['cmd.run'].assert_any_call(param_set['cmd'], python_shell=False)
|
||||
|
||||
with patch.object(genesis, '_bootstrap_pacman', return_value='A') as pacman_patch:
|
||||
with patch.dict(genesis.__salt__, {'mount.umount': MagicMock(),
|
||||
|
|
|
@ -118,16 +118,16 @@ class GroupAddTestCase(TestCase, LoaderModuleMockMixin):
|
|||
'''
|
||||
os_version_list = [
|
||||
{'grains': {'kernel': 'Linux', 'os_family': 'RedHat', 'osmajorrelease': '5'},
|
||||
'cmd': ('gpasswd', '-a', 'root', 'test')},
|
||||
'cmd': ['gpasswd', '-a', 'root', 'test']},
|
||||
|
||||
{'grains': {'kernel': 'Linux', 'os_family': 'Suse', 'osmajorrelease': '11'},
|
||||
'cmd': ('usermod', '-A', 'test', 'root')},
|
||||
'cmd': ['usermod', '-A', 'test', 'root']},
|
||||
|
||||
{'grains': {'kernel': 'Linux'},
|
||||
'cmd': ('gpasswd', '--add', 'root', 'test')},
|
||||
'cmd': ['gpasswd', '--add', 'root', 'test']},
|
||||
|
||||
{'grains': {'kernel': 'OTHERKERNEL'},
|
||||
'cmd': ('usermod', '-G', 'test', 'root')},
|
||||
'cmd': ['usermod', '-G', 'test', 'root']},
|
||||
]
|
||||
|
||||
for os_version in os_version_list:
|
||||
|
@ -145,16 +145,16 @@ class GroupAddTestCase(TestCase, LoaderModuleMockMixin):
|
|||
'''
|
||||
os_version_list = [
|
||||
{'grains': {'kernel': 'Linux', 'os_family': 'RedHat', 'osmajorrelease': '5'},
|
||||
'cmd': ('gpasswd', '-d', 'root', 'test')},
|
||||
'cmd': ['gpasswd', '-d', 'root', 'test']},
|
||||
|
||||
{'grains': {'kernel': 'Linux', 'os_family': 'Suse', 'osmajorrelease': '11'},
|
||||
'cmd': ('usermod', '-R', 'test', 'root')},
|
||||
'cmd': ['usermod', '-R', 'test', 'root']},
|
||||
|
||||
{'grains': {'kernel': 'Linux'},
|
||||
'cmd': ('gpasswd', '--del', 'root', 'test')},
|
||||
'cmd': ['gpasswd', '--del', 'root', 'test']},
|
||||
|
||||
{'grains': {'kernel': 'OpenBSD'},
|
||||
'cmd': 'usermod -S foo root'},
|
||||
'cmd': ['usermod', '-S', 'foo', 'root']},
|
||||
]
|
||||
|
||||
for os_version in os_version_list:
|
||||
|
@ -180,16 +180,16 @@ class GroupAddTestCase(TestCase, LoaderModuleMockMixin):
|
|||
'''
|
||||
os_version_list = [
|
||||
{'grains': {'kernel': 'Linux', 'os_family': 'RedHat', 'osmajorrelease': '5'},
|
||||
'cmd': ('gpasswd', '-M', 'foo', 'test')},
|
||||
'cmd': ['gpasswd', '-M', 'foo', 'test']},
|
||||
|
||||
{'grains': {'kernel': 'Linux', 'os_family': 'Suse', 'osmajorrelease': '11'},
|
||||
'cmd': ('groupmod', '-A', 'foo', 'test')},
|
||||
'cmd': ['groupmod', '-A', 'foo', 'test']},
|
||||
|
||||
{'grains': {'kernel': 'Linux'},
|
||||
'cmd': ('gpasswd', '--members', 'foo', 'test')},
|
||||
'cmd': ['gpasswd', '--members', 'foo', 'test']},
|
||||
|
||||
{'grains': {'kernel': 'OpenBSD'},
|
||||
'cmd': 'usermod -G test foo'},
|
||||
'cmd': ['usermod', '-G', 'test', 'foo']},
|
||||
]
|
||||
|
||||
for os_version in os_version_list:
|
||||
|
|
|
@ -16,6 +16,7 @@ from tests.support.mock import (
|
|||
)
|
||||
# Import Salt Libs
|
||||
import salt.modules.hosts as hosts
|
||||
import salt.utils
|
||||
from salt.ext.six.moves import StringIO
|
||||
|
||||
|
||||
|
@ -92,8 +93,12 @@ class HostsTestCase(TestCase, LoaderModuleMockMixin):
|
|||
'''
|
||||
Tests true if the alias is set
|
||||
'''
|
||||
hosts_file = '/etc/hosts'
|
||||
if salt.utils.is_windows():
|
||||
hosts_file = r'C:\Windows\System32\Drivers\etc\hosts'
|
||||
|
||||
with patch('salt.modules.hosts.__get_hosts_filename',
|
||||
MagicMock(return_value='/etc/hosts')), \
|
||||
MagicMock(return_value=hosts_file)), \
|
||||
patch('os.path.isfile', MagicMock(return_value=False)), \
|
||||
patch.dict(hosts.__salt__,
|
||||
{'config.option': MagicMock(return_value=None)}):
|
||||
|
@ -139,7 +144,16 @@ class HostsTestCase(TestCase, LoaderModuleMockMixin):
|
|||
self.close()
|
||||
|
||||
def close(self):
|
||||
data[0] = self.getvalue()
|
||||
# Don't save unless there's something there. In Windows
|
||||
# the class gets initialized the first time with mode = w
|
||||
# which sets the initial value to ''. When the class closes
|
||||
# it clears out data and causes the test to fail.
|
||||
# I don't know why it get's initialized with a mode of 'w'
|
||||
# For the purposes of this test data shouldn't be empty
|
||||
# This is a problem with this class and not with the hosts
|
||||
# module
|
||||
if self.getvalue():
|
||||
data[0] = self.getvalue()
|
||||
StringIO.close(self)
|
||||
|
||||
expected = '\n'.join((
|
||||
|
@ -151,6 +165,7 @@ class HostsTestCase(TestCase, LoaderModuleMockMixin):
|
|||
mock_opt = MagicMock(return_value=None)
|
||||
with patch.dict(hosts.__salt__, {'config.option': mock_opt}):
|
||||
self.assertTrue(hosts.set_host('1.1.1.1', ' '))
|
||||
|
||||
self.assertEqual(data[0], expected)
|
||||
|
||||
# 'rm_host' function tests: 2
|
||||
|
@ -182,9 +197,13 @@ class HostsTestCase(TestCase, LoaderModuleMockMixin):
|
|||
'''
|
||||
Tests if specified host entry gets added from the hosts file
|
||||
'''
|
||||
hosts_file = '/etc/hosts'
|
||||
if salt.utils.is_windows():
|
||||
hosts_file = r'C:\Windows\System32\Drivers\etc\hosts'
|
||||
|
||||
with patch('salt.utils.fopen', mock_open()), \
|
||||
patch('salt.modules.hosts.__get_hosts_filename',
|
||||
MagicMock(return_value='/etc/hosts')):
|
||||
MagicMock(return_value=hosts_file)):
|
||||
mock_opt = MagicMock(return_value=None)
|
||||
with patch.dict(hosts.__salt__, {'config.option': mock_opt}):
|
||||
self.assertTrue(hosts.add_host('10.10.10.10', 'Salt1'))
|
||||
|
|
|
@ -109,10 +109,9 @@ class VirtualenvTestCase(TestCase, LoaderModuleMockMixin):
|
|||
|
||||
# Are we logging the deprecation information?
|
||||
self.assertIn(
|
||||
'INFO:The virtualenv \'--never-download\' option has been '
|
||||
'deprecated in virtualenv(>=1.10), as such, the '
|
||||
'\'never_download\' option to `virtualenv.create()` has '
|
||||
'also been deprecated and it\'s not necessary anymore.',
|
||||
'INFO:--never-download was deprecated in 1.10.0, '
|
||||
'but reimplemented in 14.0.0. If this feature is needed, '
|
||||
'please install a supported virtualenv version.',
|
||||
handler.messages
|
||||
)
|
||||
|
||||
|
|
|
@ -95,7 +95,10 @@ class LocalCacheCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin):
|
|||
local_cache.clean_old_jobs()
|
||||
|
||||
# Get the name of the JID directory that was created to test against
|
||||
jid_dir_name = jid_dir.rpartition('/')[2]
|
||||
if salt.utils.is_windows():
|
||||
jid_dir_name = jid_dir.rpartition('\\')[2]
|
||||
else:
|
||||
jid_dir_name = jid_dir.rpartition('/')[2]
|
||||
|
||||
# Assert the JID directory is still present to be cleaned after keep_jobs interval
|
||||
self.assertEqual([jid_dir_name], os.listdir(TMP_JID_DIR))
|
||||
|
|
Loading…
Add table
Reference in a new issue