Merge branch '2016.3' into 'develop'

Conflicts:
  - salt/client/ssh/__init__.py
  - salt/client/ssh/shell.py
  - salt/config/__init__.py
  - salt/grains/core.py
  - salt/utils/gitfs.py
This commit is contained in:
rallytime 2016-08-08 12:34:23 -06:00
commit de4b33f2e1
27 changed files with 275 additions and 127 deletions

View file

@ -129,6 +129,13 @@
# This data may contain sensitive data and should be protected accordingly.
#cachedir: /var/cache/salt/minion
# Append minion_id to these directories. Helps with
# multiple proxies and minions running on the same machine.
# Allowed elements in the list: pki_dir, cachedir, extension_modules
# Normally not needed unless running several proxies and/or minions on the same machine
# Defaults to ['cachedir'] for proxies, [] (empty list) for regular minions
#append_minionid_config_dirs:
# Verify and set permissions on configuration directories at startup.
#verify_env: True

View file

@ -84,6 +84,16 @@
# This data may contain sensitive data and should be protected accordingly.
#cachedir: /var/cache/salt/minion
# Append minion_id to these directories. Helps with
# multiple proxies and minions running on the same machine.
# Allowed elements in the list: pki_dir, cachedir, extension_modules
# Normally not needed unless running several proxies and/or minions on the same machine
# Defaults to ['cachedir'] for proxies, [] (empty list) for regular minions
# append_minionid_config_dirs:
# - cachedir
# Verify and set permissions on configuration directories at startup.
#verify_env: True

View file

@ -395,7 +395,24 @@ This directory may contain sensitive data and should be protected accordingly.
cachedir: /var/cache/salt/minion
.. conf_minion:: verify_env
.. conf_minion:: append_minionid_config_dirs
``append_minionid_config_dirs``
-------------------------------
Default: ``[]`` (the empty list) for regular minions, ``['cachedir']`` for proxy minions.
Append minion_id to these configuration directories. Helps with multiple proxies
and minions running on the same machine. Allowed elements in the list:
``pki_dir``, ``cachedir``, ``extension_modules``.
Normally not needed unless running several proxies and/or minions on the same machine.
.. code-block:: yaml
append_minionid_config_dirs:
- pki_dir
- cachedir
``verify_env``
--------------

View file

@ -0,0 +1,9 @@
===========================
Salt 2016.3.4 Release Notes
===========================
Version 2016.3.4 is a bugfix release for :doc:`2016.3.0
</topics/releases/2016.3.0>`.
- The `disk.wipe` execution module function has been modified
so that it correctly wipes a disk.

View file

@ -44,6 +44,10 @@ The information which can be stored in a roster ``target`` is the following:
# Optional parameters
port: # The target system's ssh port number
sudo: # Boolean to run command via sudo
sudo_user: # Str: Set this to execute Salt as a sudo user other than root.
# This user must be in the same system group as the remote user
# that is used to login and is specified above. Alternatively,
# the user must be a super-user.
tty: # Boolean: Set this option to True if sudo is also set to
# True and requiretty is also set on the target system
priv: # File path to ssh private key, defaults to salt-ssh.rsa

View file

@ -418,9 +418,9 @@ against the ``return`` statement in the ``if`` clause.
There are more examples of writing unit tests of varying complexities available
in the following docs:
* `Simple Unit Test Example<simple-unit-example>`
* `Complete Unit Test Example<complete-unit-example>`
* `Complex Unit Test Example<complex-unit-example>`
* :ref:`Simple Unit Test Example<simple-unit-example>`
* :ref:`Complete Unit Test Example<complete-unit-example>`
* :ref:`Complex Unit Test Example<complex-unit-example>`
.. note::
@ -455,7 +455,7 @@ Automated Test Runs
===================
SaltStack maintains a Jenkins server which can be viewed at
http://jenkins.saltstack.com. The tests executed from this Jenkins server
https://jenkins.saltstack.com. The tests executed from this Jenkins server
create fresh virtual machines for each test run, then execute the destructive
tests on the new, clean virtual machine. This allows for the execution of tests
across supported platforms.

View file

@ -409,7 +409,9 @@ class ProxyMinion(parsers.ProxyMinionOptionParser, DaemonsMixin): # pylint: dis
# Proxies get their ID from the command line. This may need to change in
# the future.
self.config['id'] = self.values.proxyid
# We used to set this here. Now it is set in ProxyMinionOptionParser
# by passing it via setup_config to config.minion_config
# self.config['id'] = self.values.proxyid
try:
if self.config['verify_env']:

View file

@ -135,6 +135,9 @@ SUDO=""
if [ -n "{{SUDO}}" ]
then SUDO="sudo "
fi
if [ "$SUDO" ]
then SUDO="sudo -u {{SUDO_USER}}"
fi
EX_PYTHON_INVALID={EX_THIN_PYTHON_INVALID}
PYTHON_CMDS="python3 python27 python2.7 python26 python2.6 python2 python"
for py_cmd in $PYTHON_CMDS
@ -278,6 +281,10 @@ class SSH(object):
'ssh_sudo',
salt.config.DEFAULT_MASTER_OPTS['ssh_sudo']
),
'sudo_user': self.opts.get(
'ssh_sudo_user',
salt.config.DEFAULT_MASTER_OPTS['ssh_sudo_user']
),
'identities_only': self.opts.get(
'ssh_identities_only',
salt.config.DEFAULT_MASTER_OPTS['ssh_identities_only']
@ -672,6 +679,7 @@ class Single(object):
mine=False,
minion_opts=None,
identities_only=False,
sudo_user=None,
remote_port_forwards=None,
**kwargs):
# Get mine setting and mine_functions if defined in kwargs (from roster)
@ -728,6 +736,7 @@ class Single(object):
'tty': tty,
'mods': self.mods,
'identities_only': identities_only,
'sudo_user': sudo_user,
'remote_port_forwards': remote_port_forwards}
# Pre apply changeable defaults
self.minion_opts = {
@ -968,6 +977,7 @@ class Single(object):
Prepare the command string
'''
sudo = 'sudo' if self.target['sudo'] else ''
sudo_user = self.target['sudo_user']
if '_caller_cachedir' in self.opts:
cachedir = self.opts['_caller_cachedir']
else:
@ -1012,6 +1022,7 @@ ARGS = {10}\n'''.format(self.minion_config,
cmd = SSH_SH_SHIM.format(
DEBUG=debug,
SUDO=sudo,
SUDO_USER=sudo_user,
SSH_PY_CODE=py_code_enc,
HOST_PY_MAJOR=sys.version_info[0],
)

View file

@ -63,6 +63,7 @@ class Shell(object):
tty=False,
mods=None,
identities_only=False,
sudo_user=None,
remote_port_forwards=None):
self.opts = opts
self.host = host

View file

@ -65,9 +65,14 @@ def need_deployment():
# If SUDOing then also give the super user group write permissions
sudo_gid = os.environ.get('SUDO_GID')
if sudo_gid:
os.chown(OPTIONS.saltdir, -1, int(sudo_gid))
stt = os.stat(OPTIONS.saltdir)
os.chmod(OPTIONS.saltdir, stt.st_mode | stat.S_IWGRP | stat.S_IRGRP | stat.S_IXGRP)
try:
os.chown(OPTIONS.saltdir, -1, int(sudo_gid))
stt = os.stat(OPTIONS.saltdir)
os.chmod(OPTIONS.saltdir, stt.st_mode | stat.S_IWGRP | stat.S_IRGRP | stat.S_IXGRP)
except OSError:
sys.stdout.write('\n\nUnable to set permissions on thin directory.\nIf sudo_user is set '
'and is not root, be certain the user is in the same group\nas the login user')
sys.exit(1)
# Delimiter emitted on stdout *only* to indicate shim message to master.
sys.stdout.write("{0}\ndeploy\n".format(OPTIONS.delimiter))

View file

@ -182,6 +182,11 @@ VALID_OPTS = {
# The directory to store all cache files.
'cachedir': str,
# Append minion_id to these directories. Helps with
# multiple proxies and minions running on the same machine.
# Allowed elements in the list: pki_dir, cachedir, extension_modules
'append_minionid_config_dirs': list,
# Flag to cache jobs locally.
'cache_jobs': bool,
@ -790,6 +795,7 @@ VALID_OPTS = {
'ssh_passwd': str,
'ssh_port': str,
'ssh_sudo': bool,
'ssh_sudo_user': str,
'ssh_timeout': float,
'ssh_user': str,
'ssh_scan_ports': str,
@ -915,6 +921,7 @@ DEFAULT_MINION_OPTS = {
'pki_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'pki', 'minion'),
'id': '',
'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'minion'),
'append_minionid_config_dirs': [],
'cache_jobs': False,
'grains_cache': False,
'grains_cache_expiration': 300,
@ -1352,6 +1359,7 @@ DEFAULT_MASTER_OPTS = {
'ssh_passwd': '',
'ssh_port': '22',
'ssh_sudo': False,
'ssh_sudo_user': '',
'ssh_timeout': 60,
'ssh_user': 'root',
'ssh_scan_ports': '22',
@ -1404,6 +1412,7 @@ DEFAULT_PROXY_MINION_OPTS = {
'log_file': os.path.join(salt.syspaths.LOGS_DIR, 'proxy'),
'add_proxymodule_to_opts': False,
'proxy_merge_grains_in_module': False,
'append_minionid_config_dirs': ['cachedir'],
'default_include': 'proxy.d/*.conf',
}
@ -1810,7 +1819,8 @@ def minion_config(path,
env_var='SALT_MINION_CONFIG',
defaults=None,
cache_minion_id=False,
ignore_config_errors=True):
ignore_config_errors=True,
minion_id=None):
'''
Reads in the minion configuration file and sets up special options
@ -1850,7 +1860,9 @@ def minion_config(path,
overrides.update(include_config(include, path, verbose=True,
exit_on_config_errors=not ignore_config_errors))
opts = apply_minion_config(overrides, defaults, cache_minion_id=cache_minion_id)
opts = apply_minion_config(overrides, defaults,
cache_minion_id=cache_minion_id,
minion_id=minion_id)
_validate_opts(opts)
return opts
@ -3002,7 +3014,8 @@ def get_id(opts, cache_minion_id=False):
def apply_minion_config(overrides=None,
defaults=None,
cache_minion_id=False):
cache_minion_id=False,
minion_id=None):
'''
Returns minion configurations dict.
'''
@ -3016,20 +3029,28 @@ def apply_minion_config(overrides=None,
opts['__cli'] = os.path.basename(sys.argv[0])
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')
# No ID provided. Will getfqdn save us?
using_ip_for_id = False
if not opts.get('id'):
opts['id'], using_ip_for_id = get_id(
opts,
cache_minion_id=cache_minion_id)
if minion_id:
opts['id'] = minion_id
else:
opts['id'], using_ip_for_id = get_id(
opts,
cache_minion_id=cache_minion_id)
# it does not make sense to append a domain to an IP based id
if not using_ip_for_id and 'append_domain' in opts:
opts['id'] = _append_domain(opts)
for directory in opts.get('append_minionid_config_dirs', []):
if directory in ['pki_dir', 'cachedir', 'extension_modules']:
newdirectory = os.path.join(opts[directory], opts['id'])
opts[directory] = newdirectory
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')
# Enabling open mode requires that the value be set to True, and
# nothing else!
opts['open_mode'] = opts['open_mode'] is True

View file

@ -1591,72 +1591,30 @@ def append_domain():
return grain
def ip4():
def ip_fqdn():
'''
Return a list of ipv4 addrs
Return ip address and FQDN grains
'''
if salt.utils.is_proxy():
return {}
return {'ipv4': salt.utils.network.ip_addrs(include_loopback=True)}
ret = {}
ret['ipv4'] = salt.utils.network.ip_addrs(include_loopback=True)
ret['ipv6'] = salt.utils.network.ip_addrs6(include_loopback=True)
_fqdn = hostname()['fqdn']
for socket_type, ipv_num in ((socket.AF_INET, '4'), (socket.AF_INET6, '6')):
key = 'fqdn_ip' + ipv_num
if not ret['ipv' + ipv_num]:
ret[key] = []
else:
try:
info = socket.getaddrinfo(_fqdn, None, socket_type)
ret[key] = list(set(item[4][0] for item in info))
except socket.error:
ret[key] = []
def fqdn_ip4():
'''
Return a list of ipv4 addrs of fqdn
'''
if salt.utils.is_proxy():
return {}
addrs = []
try:
hostname_grains = hostname()
info = socket.getaddrinfo(hostname_grains['fqdn'], None, socket.AF_INET)
addrs = list(set(item[4][0] for item in info))
except socket.error:
pass
return {'fqdn_ip4': addrs}
def has_ipv6():
'''
Check whether IPv6 is supported on this platform.
.. versionadded:: Carbon
'''
return {'has_ipv6': socket.has_ipv6}
def ip6():
'''
Return a list of ipv6 addrs
'''
if salt.utils.is_proxy():
return {}
return {'ipv6': salt.utils.network.ip_addrs6(include_loopback=True)}
def fqdn_ip6():
'''
Return a list of ipv6 addrs of fqdn
'''
if salt.utils.is_proxy():
return {}
addrs = []
try:
hostname_grains = hostname()
info = socket.getaddrinfo(hostname_grains['fqdn'], None, socket.AF_INET6)
addrs = list(set(item[4][0] for item in info))
except socket.error:
pass
return {'fqdn_ip6': addrs}
return ret
def ip_interfaces():

View file

@ -1418,7 +1418,7 @@ class Minion(MinionBase):
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'

View file

@ -195,7 +195,12 @@ def _get_virtual():
except KeyError:
__context__['pkg._get_virtual'] = {}
if HAS_APT:
apt_cache = apt.cache.Cache()
try:
apt_cache = apt.cache.Cache()
except SystemError as se:
msg = 'Failed to get virtual package information ({0})'.format(se)
log.error(msg)
raise CommandExecutionError(msg)
pkgs = getattr(apt_cache._cache, 'packages', [])
for pkg in pkgs:
for item in getattr(pkg, 'provides_list', []):
@ -281,7 +286,10 @@ def latest_version(*names, **kwargs):
if refresh:
refresh_db(cache_valid_time)
virtpkgs = _get_virtual()
try:
virtpkgs = _get_virtual()
except CommandExecutionError as cee:
raise CommandExecutionError(cee)
all_virt = set()
for provides in six.itervalues(virtpkgs):
all_virt.update(provides)
@ -1263,7 +1271,10 @@ def list_pkgs(versions_as_list=False,
# Check for virtual packages. We need dctrl-tools for this.
if not removed:
virtpkgs_all = _get_virtual()
try:
virtpkgs_all = _get_virtual()
except CommandExecutionError as cee:
raise CommandExecutionError(cee)
virtpkgs = set()
for realpkg, provides in six.iteritems(virtpkgs_all):
# grep-available returns info on all virtual packages. Ignore any

View file

@ -309,13 +309,16 @@ def wipe(device):
salt '*' disk.wipe /dev/sda1
'''
cmd = 'wipefs {0}'.format(device)
cmd = 'wipefs -a {0}'.format(device)
try:
out = __salt__['cmd.run_all'](cmd, python_shell=False)
except subprocess.CalledProcessError as err:
return False
if out['retcode'] == 0:
return True
else:
log.error('Error wiping device {0}: {1}'.format(device, out['stderr']))
return False
def dump(device, args=None):

View file

@ -8,6 +8,7 @@ from __future__ import absolute_import
# Import python libs
import json
import re
import logging
import random
import string
@ -307,16 +308,24 @@ def check_password(name, password, runas=None):
salt '*' rabbitmq.check_password rabbit_user password
'''
# try to get the rabbitmq-version - adapted from _get_rabbitmq_plugin
if runas is None:
runas = salt.utils.get_user()
try:
version = [int(i) for i in __salt__['pkg.version']('rabbitmq-server').split('-')[0].split('.')]
res = __salt__['cmd.run'](['rabbitmqctl', 'status'], runas=runas, python_shell=False)
server_version = re.search(r'\{rabbit,"RabbitMQ","(.+)"\}', res)
if server_version is None:
raise ValueError
server_version = server_version.group(1)
version = [int(i) for i in server_version.split('.')]
except ValueError:
version = (0, 0, 0)
if len(version) < 3:
version = (0, 0, 0)
if runas is None:
runas = salt.utils.get_user()
# rabbitmq introduced a native api to check a username and password in version 3.5.7.
if tuple(version) >= (3, 5, 7):
res = __salt__['cmd.run'](
@ -324,8 +333,8 @@ def check_password(name, password, runas=None):
runas=runas,
output_loglevel='quiet',
python_shell=False)
msg = 'password-check'
return _format_response(res, msg)
return 'Error:' not in res
cmd = ('rabbit_auth_backend_internal:check_user_login'
'(<<"{0}">>, [{{password, <<"{1}">>}}]).').format(

View file

@ -466,4 +466,4 @@ def make_repo(repodir, keyid=None, env=None, use_passphrase=False, gnupghome='/e
proc.close(terminate=True, kill=True)
cmd = 'createrepo --update {0}'.format(repodir)
__salt__['cmd.run'](cmd, runas=runas)
return __salt__['cmd.run_all'](cmd, runas=runas)

View file

@ -132,7 +132,7 @@ def custom():
return ret
@with_deprecated(globals(), "Boron")
@with_deprecated(globals(), "Carbon")
def uptime():
'''
Return the uptime for this system.

View file

@ -1598,14 +1598,17 @@ def download(*packages, **kwargs):
pkg_ret = {}
for dld_result in __zypper__.xml.call('download', *packages).getElementsByTagName("download-result"):
repo = dld_result.getElementsByTagName("repository")[0]
path = dld_result.getElementsByTagName("localfile")[0].getAttribute("path")
pkg_info = {
'repository-name': repo.getAttribute('name'),
'repository-alias': repo.getAttribute('alias'),
'path': path,
}
key = _get_first_aggregate_text(
dld_result.getElementsByTagName('name')
)
pkg_ret[key] = pkg_info
if __salt__['lowpkg.checksum'](pkg_info['path']):
pkg_ret[key] = pkg_info
if pkg_ret:
failed = [pkg for pkg in packages if pkg not in pkg_ret]

View file

@ -451,7 +451,7 @@ def _format_host(host, data):
line_max_len - 7)
hstrs.append(colorfmt.format(colors['CYAN'], totals, colors))
if __opts__.get('state_output_profile', False):
if __opts__.get('state_output_profile', True):
sum_duration = sum(rdurations)
duration_unit = 'ms'
# convert to seconds if duration is 1000ms or more
@ -524,7 +524,7 @@ def _format_terse(tcolor, comps, ret, colors, tabular):
c=colors, w='\n'.join(ret['warnings'])
)
fmt_string += u'{0}'
if __opts__.get('state_output_profile', False):
if __opts__.get('state_output_profile', True):
fmt_string += u'{6[start_time]!s} [{6[duration]!s} ms] '
fmt_string += u'{2:>10}.{3:<10} {4:7} Name: {1}{5}'
elif isinstance(tabular, str):
@ -536,7 +536,7 @@ def _format_terse(tcolor, comps, ret, colors, tabular):
c=colors, w='\n'.join(ret['warnings'])
)
fmt_string += u' {0} Name: {1} - Function: {2}.{3} - Result: {4}'
if __opts__.get('state_output_profile', False):
if __opts__.get('state_output_profile', True):
fmt_string += u' Started: - {6[start_time]!s} Duration: {6[duration]!s} ms'
fmt_string += u'{5}'

View file

@ -839,16 +839,11 @@ def latest(name,
elif remote_rev_type == 'sha1':
has_remote_rev = True
# If has_remote_rev is False, then either the remote rev could not
# be found with git ls-remote (in which case we won't know more
# until fetching) or we're going to be checking out a new branch
# and don't have to worry about fast-forwarding. So, we will set
# fast_forward to None (to signify uncertainty) unless there are
# local changes, in which case we will set it to False.
# If fast_forward is not boolean, then we don't know if this will
# be a fast forward or not, because a fetch is requirde.
fast_forward = None if not local_changes else False
if has_remote_rev:
# Remote rev already present
if (not revs_match and not update_head) \
and (branch is None or branch == local_branch):
ret['comment'] = remote_loc.capitalize() \
@ -861,25 +856,26 @@ def latest(name,
)
return ret
# No need to check if this is a fast_forward if we already know
# that it won't be (due to local changes).
if fast_forward is not False:
if base_rev is None:
# If we're here, the remote_rev exists in the local
# checkout but there is still no HEAD locally. A possible
# reason for this is that an empty repository existed there
# and a remote was added and fetched, but the repository
# was not fast-forwarded. Regardless, going from no HEAD to
# a locally-present rev is considered a fast-forward
# update, unless there are local changes.
fast_forward = not bool(local_changes)
else:
fast_forward = __salt__['git.merge_base'](
target,
refs=[base_rev, remote_rev],
is_ancestor=True,
user=user,
ignore_retcode=True)
# No need to check if this is a fast_forward if we already know
# that it won't be (due to local changes).
if fast_forward is not False:
if base_rev is None:
# If we're here, the remote_rev exists in the local
# checkout but there is still no HEAD locally. A
# possible reason for this is that an empty repository
# existed there and a remote was added and fetched, but
# the repository was not fast-forwarded. Regardless,
# going from no HEAD to a locally-present rev is
# considered a fast-forward update, unless there are
# local changes.
fast_forward = not bool(local_changes)
else:
fast_forward = __salt__['git.merge_base'](
target,
refs=[base_rev, remote_rev],
is_ancestor=True,
user=user,
ignore_retcode=True)
if fast_forward is False:
if not force_reset:

View file

@ -346,7 +346,7 @@ def repo(name,
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'Package repo at {0} will be rebuilt'.format(name)
ret['comment'] = 'Package repo metadata at {0} will be refreshed'.format(name)
return ret
# Need the check for None here, if env is not provided then it falls back
@ -363,6 +363,18 @@ def repo(name,
func = 'rpmbuild.make_repo'
break
__salt__[func](name, keyid, env, use_passphrase, gnupghome, runas)
ret['changes'] = {'refresh': True}
res = __salt__[func](name, keyid, env, use_passphrase, gnupghome, runas)
if res['retcode'] > 0:
ret['result'] = False
else:
ret['changes'] = {'refresh': True}
if res['stdout'] and res['stderr']:
ret['comment'] = "{0}\n{1}".format(res['stdout'], res['stderr'])
elif res['stdout']:
ret['comment'] = res['stdout']
elif res['stderr']:
ret['comment'] = res['stderr']
return ret

View file

@ -946,6 +946,7 @@ class GitPython(GitProvider):
while True:
depth += 1
if depth > SYMLINK_RECURSE_DEPTH:
blob = None
break
try:
file_blob = tree / path
@ -967,6 +968,7 @@ class GitPython(GitProvider):
break
except KeyError:
# File not found or repo_path points to a directory
blob = None
break
if isinstance(blob, git.Blob):
return blob, blob.hexsha, blob.mode
@ -1489,6 +1491,7 @@ class Pygit2(GitProvider):
while True:
depth += 1
if depth > SYMLINK_RECURSE_DEPTH:
blob = None
break
try:
entry = tree[path]
@ -1504,7 +1507,9 @@ class Pygit2(GitProvider):
)
else:
blob = self.repo[entry.oid]
break
except KeyError:
blob = None
break
if isinstance(blob, pygit2.Blob):
return blob, blob.hex, mode
@ -1840,6 +1845,7 @@ class Dulwich(GitProvider): # pylint: disable=abstract-method
while True:
depth += 1
if depth > SYMLINK_RECURSE_DEPTH:
blob = None
break
prefix_dirs, _, filename = path.rpartition(os.path.sep)
tree = self.walk_tree(tree, prefix_dirs)
@ -1861,6 +1867,7 @@ class Dulwich(GitProvider): # pylint: disable=abstract-method
blob = self.repo.get_object(oid)
break
except KeyError:
blob = None
break
if isinstance(blob, dulwich.objects.Blob):
return blob, blob.sha().hexdigest(), mode

View file

@ -1788,13 +1788,13 @@ class MinionOptionParser(six.with_metaclass(OptionParserMeta, MasterOptionParser
class ProxyMinionOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ProxyIdMixIn,
ConfigDirMixIn,
MergeConfigMixIn,
LogLevelMixIn,
RunUserMixin,
DaemonMixIn,
SaltfileMixIn,
ProxyIdMixIn)): # pylint: disable=no-init
SaltfileMixIn)): # pylint: disable=no-init
description = (
'The Salt proxy minion, connects to and controls devices not able to run a minion. '
@ -1807,8 +1807,13 @@ class ProxyMinionOptionParser(six.with_metaclass(OptionParserMeta,
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'proxy')
def setup_config(self):
try:
minion_id = self.values.proxyid
except AttributeError:
minion_id = None
return config.minion_config(self.get_config_file_path(),
cache_minion_id=False)
cache_minion_id=False, minion_id=minion_id)
class SyndicOptionParser(six.with_metaclass(OptionParserMeta,

View file

@ -197,6 +197,62 @@ class GitTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
finally:
shutil.rmtree(name, ignore_errors=True)
def test_latest_fast_forward(self):
'''
Test running git.latest state a second time after changes have been
made to the remote repo.
'''
def _head(cwd):
return self.run_function('git.rev_parse', [cwd, 'HEAD'])
repo_url = 'https://{0}/saltstack/salt-test-repo.git'.format(self.__domain)
mirror_dir = os.path.join(integration.TMP, 'salt_repo_mirror')
mirror_url = 'file://' + mirror_dir
admin_dir = os.path.join(integration.TMP, 'salt_repo_admin')
clone_dir = os.path.join(integration.TMP, 'salt_repo')
try:
# Mirror the repo
self.run_function('git.clone',
[mirror_dir, repo_url, None, '--mirror'])
# Make sure the directory for the mirror now exists
self.assertTrue(os.path.exists(mirror_dir))
# Clone the mirror twice, once to the admin location and once to
# the clone_dir
ret = self.run_state('git.latest', name=mirror_url, target=admin_dir)
self.assertSaltTrueReturn(ret)
ret = self.run_state('git.latest', name=mirror_url, target=clone_dir)
self.assertSaltTrueReturn(ret)
# Make a change to the repo by editing the file in the admin copy
# of the repo and committing.
head_pre = _head(admin_dir)
with open(os.path.join(admin_dir, 'LICENSE'), 'a') as fp_:
fp_.write('Hello world!')
self.run_function('git.commit', [admin_dir, 'Added a line', '-a'])
# Make sure HEAD is pointing to a new SHA so we know we properly
# committed our change.
head_post = _head(admin_dir)
self.assertNotEqual(head_pre, head_post)
# Push the change to the mirror
# NOTE: the test will fail if the salt-test-repo's default branch
# is changed.
self.run_function('git.push', [admin_dir, 'origin', 'develop'])
# Re-run the git.latest state on the clone_dir
ret = self.run_state('git.latest', name=mirror_url, target=clone_dir)
self.assertSaltTrueReturn(ret)
# Make sure that the clone_dir now has the correct SHA
self.assertEqual(head_post, _head(clone_dir))
finally:
for path in (mirror_dir, admin_dir, clone_dir):
shutil.rmtree(path, ignore_errors=True)
def test_present(self):
'''
git.present

View file

@ -114,7 +114,7 @@ class DiskTestCase(TestCase):
with patch.dict(disk.__salt__, {'cmd.run_all': mock}):
disk.wipe('/dev/sda')
mock.assert_called_once_with(
'wipefs /dev/sda',
'wipefs -a /dev/sda',
python_shell=False
)

View file

@ -390,6 +390,7 @@ class ZypperTestCase(TestCase):
test_out = {
'nmap': {
'path': u'/var/cache/zypp/packages/SLE-12-x86_64-Pool/x86_64/nmap-6.46-1.72.x86_64.rpm',
'repository-alias': u'SLE-12-x86_64-Pool',
'repository-name': u'SLE-12-x86_64-Pool'
}