Merge branch '2017.7' into fix-net-runner

This commit is contained in:
Nicole Thomas 2017-10-18 11:46:53 -04:00 committed by GitHub
commit 02ffb4f38e
73 changed files with 550 additions and 214 deletions

3
.gitignore vendored
View file

@ -88,3 +88,6 @@ tests/integration/cloud/providers/logs
# Private keys from the integration tests
tests/integration/cloud/providers/pki/minions
# Ignore tox virtualenvs
/.tox/

View file

@ -258,8 +258,8 @@ ignore-imports=no
[BASIC]
# Required attributes for module, separated by a comma
required-attributes=
# Required attributes for module, separated by a comma (will be removed in Pylint 2.0)
#required-attributes=
# List of builtins function names that should not be used, separated by a comma
bad-functions=map,filter,apply,input
@ -365,7 +365,8 @@ spelling-store-unknown-words=no
[CLASSES]
# List of interface methods to ignore, separated by a comma. This is used for
# instance to not check methods defines in Zope's Interface base class.
ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
# Will be removed in Pylint 2.0
#ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,__new__,setUp

Binary file not shown.

Before

Width:  |  Height:  |  Size: 790 KiB

After

Width:  |  Height:  |  Size: 438 KiB

View file

@ -245,8 +245,8 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
project = 'Salt'
version = salt.version.__version__
latest_release = '2017.7.1' # latest release
previous_release = '2016.11.7' # latest release from previous branch
latest_release = '2017.7.2' # latest release
previous_release = '2016.11.8' # latest release from previous branch
previous_release_dir = '2016.11' # path on web server for previous branch
next_release = '' # next release
next_release_dir = '' # path on web server for next release branch

View file

@ -1164,7 +1164,7 @@ be able to execute a certain module. The ``sys`` module is built into the minion
and cannot be disabled.
This setting can also tune the minion. Because all modules are loaded into system
memory, disabling modules will lover the minion's memory footprint.
memory, disabling modules will lower the minion's memory footprint.
Modules should be specified according to their file name on the system and not by
their virtual name. For example, to disable ``cmd``, use the string ``cmdmod`` which

View file

@ -6,7 +6,7 @@ Introduced in Salt version ``2017.7.0`` it is now possible to run select states
in parallel. This is accomplished very easily by adding the ``parallel: True``
option to your state declaration:
.. code_block:: yaml
.. code-block:: yaml
nginx:
service.running:
@ -24,7 +24,7 @@ state to finish.
Given this example:
.. code_block:: yaml
.. code-block:: yaml
sleep 10:
cmd.run:
@ -74,16 +74,16 @@ also complete.
Things to be Careful of
=======================
Parallel States does not prevent you from creating parallel conflicts on your
Parallel States do not prevent you from creating parallel conflicts on your
system. This means that if you start multiple package installs using Salt then
the package manager will block or fail. If you attempt to manage the same file
with multiple states in parallel then the result can produce an unexpected
file.
Make sure that the states you choose to run in parallel do not conflict, or
else, like in and parallel programming environment, the outcome may not be
else, like in any parallel programming environment, the outcome may not be
what you expect. Doing things like just making all states run in parallel
will almost certinly result in unexpected behavior.
will almost certainly result in unexpected behavior.
With that said, running states in parallel should be safe the vast majority
of the time and the most likely culprit for unexpected behavior is running

View file

@ -40,7 +40,7 @@ Set up an initial profile at /etc/salt/cloud.profiles or in the /etc/salt/cloud.
.. code-block:: yaml
scalewa-ubuntu:
scaleway-ubuntu:
provider: my-scaleway-config
image: Ubuntu Trusty (14.04 LTS)

View file

@ -123,7 +123,7 @@ to the module being tests one should do:
}
Consider this more extensive example from
``tests/unit/modules/test_libcloud_dns.py``::
``tests/unit/modules/test_libcloud_dns.py``:
.. code-block:: python
@ -319,7 +319,7 @@ function into ``__salt__`` that's actually a MagicMock instance.
def show_patch(self):
with patch.dict(my_module.__salt__,
{'function.to_replace': MagicMock()}:
{'function.to_replace': MagicMock()}):
# From this scope, carry on with testing, with a modified __salt__!

View file

@ -218,6 +218,7 @@ Server configuration values and their defaults:
# Bind to LDAP anonymously to determine group membership
# Active Directory does not allow anonymous binds without special configuration
# In addition, if auth.ldap.anonymous is True, empty bind passwords are not permitted.
auth.ldap.anonymous: False
# FOR TESTING ONLY, this is a VERY insecure setting.
@ -257,7 +258,11 @@ and groups, it re-authenticates as the user running the Salt commands.
If you are already aware of the structure of your DNs and permissions in your LDAP store are set such that
users can look up their own group memberships, then the first and second users can be the same. To tell Salt this is
the case, omit the ``auth.ldap.bindpw`` parameter. You can template the ``binddn`` like this:
the case, omit the ``auth.ldap.bindpw`` parameter. Note this is not the same thing as using an anonymous bind.
Most LDAP servers will not permit anonymous bind, and as mentioned above, if `auth.ldap.anonymous` is False you
cannot use an empty password.
You can template the ``binddn`` like this:
.. code-block:: yaml

View file

@ -4,9 +4,21 @@ Salt 2016.11.8 Release Notes
Version 2016.11.8 is a bugfix release for :ref:`2016.11.0 <release-2016-11-0>`.]
Anonymous Binds and LDAP/Active Directory
-----------------------------------------
When auth.ldap.anonymous is set to False, the bind password can no longer be empty.
Changes for v2016.11.7..v2016.11.8
----------------------------------
Security Fix
============
CVE-2017-14695 Directory traversal vulnerability in minion id validation in SaltStack. Allows remote minions with incorrect credentials to authenticate to a master via a crafted minion ID. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net)
CVE-2017-14696 Remote Denial of Service with a specially crafted authentication request. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net)
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
*Generated at: 2017-09-11T14:52:27Z*

View file

@ -0,0 +1,6 @@
============================
Salt 2016.11.9 Release Notes
============================
Version 2016.11.9 is a bugfix release for :ref:`2016.11.0 <release-2016-11-0>`.]

View file

@ -7,23 +7,9 @@ Version 2016.3.8 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`.
Changes for v2016.3.7..v2016.3.8
--------------------------------
New master configuration option `allow_minion_key_revoke`, defaults to True. This option
controls whether a minion can request that the master revoke its key. When True, a minion
can request a key revocation and the master will comply. If it is False, the key will not
be revoked by the msater.
Security Fix
============
New master configuration option `require_minion_sign_messages`
This requires that minions cryptographically sign the messages they
publish to the master. If minions are not signing, then log this information
at loglevel 'INFO' and drop the message without acting on it.
CVE-2017-14695 Directory traversal vulnerability in minion id validation in SaltStack. Allows remote minions with incorrect credentials to authenticate to a master via a crafted minion ID. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net)
New master configuration option `drop_messages_signature_fail`
Drop messages from minions when their signatures do not validate.
Note that when this option is False but `require_minion_sign_messages` is True
minions MUST sign their messages but the validity of their signatures
is ignored.
New minion configuration option `minion_sign_messages`
Causes the minion to cryptographically sign the payload of messages it places
on the event bus for the master. The payloads are signed with the minion's
private key so the master can verify the signature with its public key.
CVE-2017-14696 Remote Denial of Service with a specially crafted authentication request. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net)

View file

@ -0,0 +1,29 @@
===========================
Salt 2016.3.9 Release Notes
===========================
Version 2016.3.9 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`.
Changes for v2016.3.7..v2016.3.9
--------------------------------
New master configuration option `allow_minion_key_revoke`, defaults to True. This option
controls whether a minion can request that the master revoke its key. When True, a minion
can request a key revocation and the master will comply. If it is False, the key will not
be revoked by the msater.
New master configuration option `require_minion_sign_messages`
This requires that minions cryptographically sign the messages they
publish to the master. If minions are not signing, then log this information
at loglevel 'INFO' and drop the message without acting on it.
New master configuration option `drop_messages_signature_fail`
Drop messages from minions when their signatures do not validate.
Note that when this option is False but `require_minion_sign_messages` is True
minions MUST sign their messages but the validity of their signatures
is ignored.
New minion configuration option `minion_sign_messages`
Causes the minion to cryptographically sign the payload of messages it places
on the event bus for the master. The payloads are signed with the minion's
private key so the master can verify the signature with its public key.

View file

@ -7,18 +7,40 @@ Version 2017.7.2 is a bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.
Changes for v2017.7.1..v2017.7.2
--------------------------------
Security Fix
============
CVE-2017-14695 Directory traversal vulnerability in minion id validation in SaltStack. Allows remote minions with incorrect credentials to authenticate to a master via a crafted minion ID. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net)
CVE-2017-14696 Remote Denial of Service with a specially crafted authentication request. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net)
Known Issues
============
On 2017.7.2 when using salt-api and cherrypy version 5.6.0, issue `#43581`_ will occur when starting the salt-api service. We have patched the cherry-py packages for python-cherrypy-5.6.0-2 from repo.saltstack.com. If you are using python-cherrypy-5.6.0-1 please ensure to run `yum install python-cherrypy` to install the new patched version.
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
*Generated at: 2017-09-26T21:06:19Z*
*Generated at: 2017-10-02T21:10:14Z*
Statistics:
Statistics
==========
- Total Merges: **326**
- Total Issue references: **133**
- Total PR references: **389**
- Total Merges: **328**
- Total Issue references: **134**
- Total PR references: **391**
Changes:
Changes
=======
- **PR** `#43868`_: (*rallytime*) Back-port `#43847`_ to 2017.7.2
* Fix to module.run
- **PR** `#43756`_: (*gtmanfred*) split build and install for pkg osx
@ *2017-09-26T20:51:28Z*
* 88414d5 Merge pull request `#43756`_ from gtmanfred/2017.7.2
* f7df41f split build and install for pkg osx
- **PR** `#43585`_: (*rallytime*) Back-port `#43330`_ to 2017.7.2
@ *2017-09-19T17:33:34Z*
@ -3097,6 +3119,13 @@ Changes:
.. _`#475`: https://github.com/saltstack/salt/issues/475
.. _`#480`: https://github.com/saltstack/salt/issues/480
.. _`#495`: https://github.com/saltstack/salt/issues/495
.. _`#43581`: https://github.com/saltstack/salt/issues/43581
.. _`#43756`: https://github.com/saltstack/salt/pull/43756
.. _`#43847`: https://github.com/saltstack/salt/pull/43847
.. _`#43868`: https://github.com/saltstack/salt/pull/43868
.. _`#475`: https://github.com/saltstack/salt/issues/475
.. _`#480`: https://github.com/saltstack/salt/issues/480
.. _`#495`: https://github.com/saltstack/salt/issues/495
.. _`bp-37424`: https://github.com/saltstack/salt/pull/37424
.. _`bp-39366`: https://github.com/saltstack/salt/pull/39366
.. _`bp-41543`: https://github.com/saltstack/salt/pull/41543

View file

@ -0,0 +1,6 @@
============================
Salt 2017.7.3 Release Notes
============================
Version 2017.7.3 is a bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.

View file

@ -27,7 +27,7 @@ Installing Dependencies
=======================
Both pygit2_ and GitPython_ are supported Python interfaces to git. If
compatible versions of both are installed, pygit2_ will preferred. In these
compatible versions of both are installed, pygit2_ will be preferred. In these
cases, GitPython_ can be forced using the :conf_master:`gitfs_provider`
parameter in the master config file.

View file

@ -33,3 +33,5 @@ Tutorials Index
* :ref:`The macOS (Maverick) Developer Step By Step Guide To Salt Installation <tutorial-macos-walk-through>`
* :ref:`SaltStack Walk-through <tutorial-salt-walk-through>`
* :ref:`Writing Salt Tests <tutorial-salt-testing>`
* :ref:`Running Salt States and Commands in Docker Containers <docker-sls>`
* :ref:`Preseed Minion with Accepted Key <tutorial-preseed-key>`

View file

@ -23,7 +23,7 @@ Supported Operating Systems
.. note::
In the event you do not see your distribution or version available please
review the develop branch on GitHub as it main contain updates that are
review the develop branch on GitHub as it may contain updates that are
not present in the stable release:
https://github.com/saltstack/salt-bootstrap/tree/develop

View file

@ -88,7 +88,8 @@ sudo $PKGRESOURCES/build_env.sh $PYVER
echo -n -e "\033]0;Build: Install Salt\007"
sudo rm -rf $SRCDIR/build
sudo rm -rf $SRCDIR/dist
sudo $PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s" install
sudo $PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s"
sudo $PYTHON $SRCDIR/setup.py install
############################################################################
# Build Package

View file

@ -67,7 +67,7 @@ _su_cmd() {
_get_pid() {
netstat $NS_NOTRIM -ap --protocol=unix 2>$ERROR_TO_DEVNULL \
netstat -n $NS_NOTRIM -ap --protocol=unix 2>$ERROR_TO_DEVNULL \
| sed -r -e "\|\s${SOCK_DIR}/minion_event_${MINION_ID_HASH}_pub\.ipc$|"'!d; s|/.*||; s/.*\s//;' \
| uniq
}
@ -155,7 +155,7 @@ start() {
printf "\nPROCESSES:\n" >&2
ps wwwaxu | grep '[s]alt-minion' >&2
printf "\nSOCKETS:\n" >&2
netstat $NS_NOTRIM -ap --protocol=unix | grep 'salt.*minion' >&2
netstat -n $NS_NOTRIM -ap --protocol=unix | grep 'salt.*minion' >&2
printf "\nLOG_FILE:\n" >&2
tail -n 20 "$LOG_FILE" >&2
printf "\nENVIRONMENT:\n" >&2

View file

@ -110,6 +110,10 @@ class _LDAPConnection(object):
self.ldap.set_option(ldap.OPT_REFERRALS, 0) # Needed for AD
if not anonymous:
if self.bindpw is None or len(self.bindpw) < 1:
raise CommandExecutionError(
'LDAP bind password is not set: password cannot be empty if auth.ldap.anonymous is False'
)
self.ldap.simple_bind_s(self.binddn, self.bindpw)
except Exception as ldap_error:
raise CommandExecutionError(

View file

@ -234,7 +234,7 @@ class CloudClient(object):
if a.get('provider', '')]
if providers:
_providers = opts.get('providers', {})
for provider in list(_providers):
for provider in list(_providers).copy():
if provider not in providers:
_providers.pop(provider)
return opts

View file

@ -1990,7 +1990,7 @@ def request_instance(vm_=None, call=None):
params[termination_key] = str(set_del_root_vol_on_destroy).lower()
# Use default volume type if not specified
if ex_blockdevicemappings and 'Ebs.VolumeType' not in ex_blockdevicemappings[dev_index]:
if ex_blockdevicemappings and dev_index < len(ex_blockdevicemappings) and 'Ebs.VolumeType' not in ex_blockdevicemappings[dev_index]:
type_key = '{0}BlockDeviceMapping.{1}.Ebs.VolumeType'.format(spot_prefix, dev_index)
params[type_key] = rd_type

View file

@ -2400,9 +2400,10 @@ def create_attach_volumes(name, kwargs, call=None):
'-a or --action.'
)
volumes = kwargs['volumes']
volumes = literal_eval(kwargs['volumes'])
node = kwargs['node']
node_data = _expand_node(node)
conn = get_conn()
node_data = _expand_node(conn.ex_get_node(node))
letter = ord('a') - 1
for idx, volume in enumerate(volumes):
@ -2412,9 +2413,9 @@ def create_attach_volumes(name, kwargs, call=None):
'disk_name': volume_name,
'location': node_data['extra']['zone']['name'],
'size': volume['size'],
'type': volume['type'],
'image': volume['image'],
'snapshot': volume['snapshot']
'type': volume.get('type', 'pd-standard'),
'image': volume.get('image', None),
'snapshot': volume.get('snapshot', None)
}
create_disk(volume_dict, 'function')
@ -2580,7 +2581,10 @@ def create(vm_=None, call=None):
ssh_user, ssh_key = __get_ssh_credentials(vm_)
vm_['ssh_host'] = __get_host(node_data, vm_)
vm_['key_filename'] = ssh_key
__utils__['cloud.bootstrap'](vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(node_dict)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.trace(
@ -2598,7 +2602,7 @@ def create(vm_=None, call=None):
transport=__opts__['transport']
)
return node_dict
return ret
def update_pricing(kwargs=None, call=None):

View file

@ -607,6 +607,9 @@ class AsyncAuth(object):
raise tornado.gen.Return('retry')
else:
raise SaltClientError('Attempt to authenticate with the salt master failed with timeout error')
if not isinstance(payload, dict):
log.error('Sign-in attempt failed: %s', payload)
raise tornado.gen.Return(False)
if 'load' in payload:
if 'ret' in payload['load']:
if not payload['load']['ret']:

View file

@ -622,10 +622,14 @@ class Client(object):
def on_header(hdr):
if write_body[1] is not False and write_body[2] is None:
if not hdr.strip() and 'Content-Type' not in write_body[1]:
# We've reached the end of the headers and not yet
# found the Content-Type. Reset the values we're
# tracking so that we properly follow the redirect.
write_body[0] = None
# If write_body[0] is True, then we are not following a
# redirect (initial response was a 200 OK). So there is
# no need to reset write_body[0].
if write_body[0] is not True:
# We are following a redirect, so we need to reset
# write_body[0] so that we properly follow it.
write_body[0] = None
# We don't need the HTTPHeaders object anymore
write_body[1] = False
return
# Try to find out what content type encoding is used if
@ -648,9 +652,12 @@ class Client(object):
# If write_body[0] is False, this means that this
# header is a 30x redirect, so we need to reset
# write_body[0] to None so that we parse the HTTP
# status code from the redirect target.
# status code from the redirect target. Additionally,
# we need to reset write_body[2] so that we inspect the
# headers for the Content-Type of the URL we're
# following.
if write_body[0] is write_body[1] is False:
write_body[0] = None
write_body[0] = write_body[2] = None
# Check the status line of the HTTP request
if write_body[0] is None:

View file

@ -373,7 +373,7 @@ def _file_lists(load, form):
# join UNC and non-UNC paths, just assume the original
# path.
log.trace(
'roots: %s is a UNCH path, using %s instead',
'roots: %s is a UNC path, using %s instead',
link_dest, abs_path
)
link_dest = abs_path

View file

@ -128,12 +128,12 @@ def setup_handlers():
callable(transport_registry.compute_scope)):
conf_extras = transport_registry.compute_scope(url, dsn_config)
dsn_config.update(conf_extras)
options.update({
'project': dsn_config['SENTRY_PROJECT'],
'servers': dsn_config['SENTRY_SERVERS'],
'public_key': dsn_config['SENTRY_PUBLIC_KEY'],
'secret_key': dsn_config['SENTRY_SECRET_KEY']
})
options.update({
'project': dsn_config['SENTRY_PROJECT'],
'servers': dsn_config['SENTRY_SERVERS'],
'public_key': dsn_config['SENTRY_PUBLIC_KEY'],
'secret_key': dsn_config['SENTRY_SECRET_KEY']
})
except ValueError as exc:
log.info(
'Raven failed to parse the configuration provided '

View file

@ -935,7 +935,7 @@ class Minion(MinionBase):
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = jid_queue
self.jid_queue = jid_queue or []
if io_loop is None:
if HAS_ZMQ:

View file

@ -396,10 +396,8 @@ def _run(cmd,
msg = 'missing salt/utils/win_runas.py'
raise CommandExecutionError(msg)
if not isinstance(cmd, list):
cmd = salt.utils.shlex_split(cmd, posix=False)
cmd = ' '.join(cmd)
if isinstance(cmd, (list, tuple)):
cmd = ' '.join(cmd)
return win_runas(cmd, runas, password, cwd)
@ -536,11 +534,11 @@ def _run(cmd,
.format(cwd)
)
if python_shell is not True and not isinstance(cmd, list):
posix = True
if salt.utils.is_windows():
posix = False
cmd = salt.utils.shlex_split(cmd, posix=posix)
if python_shell is not True \
and not salt.utils.is_windows() \
and not isinstance(cmd, list):
cmd = salt.utils.shlex_split(cmd)
if not use_vt:
# This is where the magic happens
try:

View file

@ -118,7 +118,7 @@ def get(key, default='', delimiter=DEFAULT_TARGET_DELIM, ordered=True):
def has_value(key):
'''
Determine whether a named value exists in the grains dictionary.
Determine whether a key exists in the grains dictionary.
Given a grains dictionary that contains the following structure::
@ -134,7 +134,10 @@ def has_value(key):
salt '*' grains.has_value pkg:apache
'''
return True if salt.utils.traverse_dict_and_list(__grains__, key, False) else False
return salt.utils.traverse_dict_and_list(
__grains__,
key,
KeyError) is not KeyError
def items(sanitize=False):

View file

@ -123,7 +123,16 @@ def available():
salt '*' kmod.available
'''
ret = []
mod_dir = os.path.join('/lib/modules/', os.uname()[2])
built_in_file = os.path.join(mod_dir, 'modules.builtin')
if os.path.exists(built_in_file):
with salt.utils.fopen(built_in_file, 'r') as f:
for line in f:
# Strip .ko from the basename
ret.append(os.path.basename(line)[:-4])
for root, dirs, files in os.walk(mod_dir):
for fn_ in files:
if '.ko' in fn_:

View file

@ -129,7 +129,7 @@ def version(*names, **kwargs):
return __salt__['pkg_resource.version'](*names, **kwargs)
def refresh_db():
def refresh_db(**kwargs): # pylint: disable=unused-argument
'''
Updates the opkg database to latest packages based upon repositories
@ -456,7 +456,7 @@ def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
return remove(name=name, pkgs=pkgs)
def upgrade(refresh=True):
def upgrade(refresh=True, **kwargs): # pylint: disable=unused-argument
'''
Upgrades all packages via ``opkg upgrade``
@ -739,7 +739,7 @@ def list_pkgs(versions_as_list=False, **kwargs):
return ret
def list_upgrades(refresh=True):
def list_upgrades(refresh=True, **kwargs): # pylint: disable=unused-argument
'''
List all available package upgrades.
@ -908,7 +908,7 @@ def info_installed(*names, **kwargs):
return ret
def upgrade_available(name):
def upgrade_available(name, **kwargs): # pylint: disable=unused-argument
'''
Check whether or not an upgrade is available for a given package
@ -921,7 +921,7 @@ def upgrade_available(name):
return latest_version(name) != ''
def version_cmp(pkg1, pkg2, ignore_epoch=False):
def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs): # pylint: disable=unused-argument
'''
Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
@ -969,7 +969,7 @@ def version_cmp(pkg1, pkg2, ignore_epoch=False):
return None
def list_repos():
def list_repos(**kwargs): # pylint: disable=unused-argument
'''
Lists all repos on /etc/opkg/*.conf
@ -1006,7 +1006,7 @@ def list_repos():
return repos
def get_repo(alias):
def get_repo(alias, **kwargs): # pylint: disable=unused-argument
'''
Display a repo from the /etc/opkg/*.conf
@ -1077,7 +1077,7 @@ def _mod_repo_in_file(alias, repostr, filepath):
fhandle.writelines(output)
def del_repo(alias):
def del_repo(alias, **kwargs): # pylint: disable=unused-argument
'''
Delete a repo from /etc/opkg/*.conf
@ -1191,7 +1191,7 @@ def mod_repo(alias, **kwargs):
refresh_db()
def file_list(*packages):
def file_list(*packages, **kwargs): # pylint: disable=unused-argument
'''
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's package database (not
@ -1212,7 +1212,7 @@ def file_list(*packages):
return {'errors': output['errors'], 'files': files}
def file_dict(*packages):
def file_dict(*packages, **kwargs): # pylint: disable=unused-argument
'''
List the files that belong to a package, grouped by package. Not
specifying any packages will return a list of _every_ file on the system's
@ -1254,7 +1254,7 @@ def file_dict(*packages):
return {'errors': errors, 'packages': ret}
def owner(*paths):
def owner(*paths, **kwargs): # pylint: disable=unused-argument
'''
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.opkg.version`, if a single

View file

@ -1408,6 +1408,12 @@ def sls_id(id_, mods, test=None, queue=False, **kwargs):
finally:
st_.pop_active()
errors += st_.state.verify_high(high_)
# Apply requisites to high data
high_, req_in_errors = st_.state.requisite_in(high_)
if req_in_errors:
# This if statement should not be necessary if there were no errors,
# but it is required to get the unit tests to pass.
errors.extend(req_in_errors)
if errors:
__context__['retcode'] = 1
return errors

View file

@ -490,11 +490,14 @@ logger = logging.getLogger(__name__)
import cherrypy
try:
from cherrypy.lib import cpstats
except ImportError:
except AttributeError:
cpstats = None
logger.warn('Import of cherrypy.cpstats failed. '
'Possible upstream bug: '
'https://github.com/cherrypy/cherrypy/issues/1444')
except ImportError:
cpstats = None
logger.warn('Import of cherrypy.cpstats failed.')
import yaml
import salt.ext.six as six

View file

@ -566,8 +566,15 @@ def ext_pillar(minion_id, repo, pillar_dirs):
False
)
for pillar_dir, env in six.iteritems(pillar.pillar_dirs):
# Map env if env == '__env__' before checking the env value
if env == '__env__':
env = opts.get('pillarenv') \
or opts.get('environment') \
or opts.get('git_pillar_base')
log.debug('__env__ maps to %s', env)
# If pillarenv is set, only grab pillars with that match pillarenv
if opts['pillarenv'] and env != opts['pillarenv'] and env != '__env__':
if opts['pillarenv'] and env != opts['pillarenv']:
log.debug(
'env \'%s\' for pillar dir \'%s\' does not match '
'pillarenv \'%s\', skipping',
@ -586,12 +593,6 @@ def ext_pillar(minion_id, repo, pillar_dirs):
'env \'%s\'', pillar_dir, env
)
if env == '__env__':
env = opts.get('pillarenv') \
or opts.get('environment') \
or opts.get('git_pillar_base')
log.debug('__env__ maps to %s', env)
pillar_roots = [pillar_dir]
if __opts__['git_pillar_includes']:

View file

@ -363,7 +363,8 @@ def statelist(states_dict, sid_excludes=frozenset(['include', 'exclude'])):
REQUISITES = set([
'require', 'require_in', 'watch', 'watch_in', 'use', 'use_in', 'listen', 'listen_in'
'require', 'require_in', 'watch', 'watch_in', 'use', 'use_in', 'listen', 'listen_in',
'onchanges', 'onchanges_in', 'onfail', 'onfail_in'
])
@ -405,8 +406,8 @@ def rename_state_ids(data, sls, is_extend=False):
del data[sid]
REQUIRE = set(['require', 'watch', 'listen'])
REQUIRE_IN = set(['require_in', 'watch_in', 'listen_in'])
REQUIRE = set(['require', 'watch', 'listen', 'onchanges', 'onfail'])
REQUIRE_IN = set(['require_in', 'watch_in', 'listen_in', 'onchanges_in', 'onfail_in'])
EXTENDED_REQUIRE = {}
EXTENDED_REQUIRE_IN = {}
@ -414,8 +415,8 @@ from itertools import chain
# To avoid cycles among states when each state requires the one before it:
# explicit require/watch/listen can only contain states before it
# explicit require_in/watch_in/listen_in can only contain states after it
# explicit require/watch/listen/onchanges/onfail can only contain states before it
# explicit require_in/watch_in/listen_in/onchanges_in/onfail_in can only contain states after it
def add_implicit_requires(data):
def T(sid, state): # pylint: disable=C0103
@ -449,7 +450,7 @@ def add_implicit_requires(data):
for _, rstate, rsid in reqs:
if T(rsid, rstate) in states_after:
raise SaltRenderError(
'State({0}) can\'t require/watch/listen a state({1}) defined '
'State({0}) can\'t require/watch/listen/onchanges/onfail a state({1}) defined '
'after it!'.format(tag, T(rsid, rstate))
)
@ -459,7 +460,7 @@ def add_implicit_requires(data):
for _, rstate, rsid in reqs:
if T(rsid, rstate) in states_before:
raise SaltRenderError(
'State({0}) can\'t require_in/watch_in/listen_in a state({1}) '
'State({0}) can\'t require_in/watch_in/listen_in/onchanges_in/onfail_in a state({1}) '
'defined before it!'.format(tag, T(rsid, rstate))
)
@ -571,7 +572,7 @@ def extract_state_confs(data, is_extend=False):
if not is_extend and state_id in STATE_CONF_EXT:
extend = STATE_CONF_EXT[state_id]
for requisite in 'require', 'watch', 'listen':
for requisite in 'require', 'watch', 'listen', 'onchanges', 'onfail':
if requisite in extend:
extend[requisite] += to_dict[state_id].get(requisite, [])
to_dict[state_id].update(STATE_CONF_EXT[state_id])

View file

@ -309,7 +309,7 @@ def _format_job_instance(job):
'Arguments': list(job.get('arg', [])),
# unlikely but safeguard from invalid returns
'Target': job.get('tgt', 'unknown-target'),
'Target-type': job.get('tgt_type', []),
'Target-type': job.get('tgt_type', 'list'),
'User': job.get('user', 'root')}
if 'metadata' in job:

View file

@ -4,29 +4,43 @@ Return salt data via mattermost
.. versionadded:: 2017.7.0
The following fields can be set in the minion conf file::
The following fields can be set in the minion conf file:
.. code-block:: yaml
mattermost.hook (required)
mattermost.username (optional)
mattermost.channel (optional)
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location:
.. code-block:: yaml
mattermost.channel
mattermost.hook
mattermost.username
mattermost settings may also be configured as:
.. code-block:: yaml
mattermost:
channel: RoomName
hook: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
username: user
channel: RoomName
hook: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
username: user
To use the mattermost returner, append '--return mattermost' to the salt command.
.. code-block:: bash
salt '*' test.ping --return mattermost
To override individual configuration items, append --return_kwargs '{'key:': 'value'}' to the salt command.
.. code-block:: bash
salt '*' test.ping --return mattermost --return_kwargs '{'channel': '#random'}'
'''
from __future__ import absolute_import
@ -53,6 +67,7 @@ __virtualname__ = 'mattermost'
def __virtual__():
'''
Return virtual name of the module.
:return: The virtual name of the module.
'''
return __virtualname__
@ -118,6 +133,7 @@ def returner(ret):
def event_return(events):
'''
Send the events to a mattermost room.
:param events: List of events
:return: Boolean if messages were sent successfully.
'''
@ -153,6 +169,7 @@ def post_message(channel,
hook):
'''
Send a message to a mattermost room.
:param channel: The room name.
:param message: The message to send to the mattermost room.
:param username: Specify who the message is from.

View file

@ -180,7 +180,7 @@ def _format_job_instance(job):
'Arguments': json.loads(job.get('arg', '[]')),
# unlikely but safeguard from invalid returns
'Target': job.get('tgt', 'unknown-target'),
'Target-type': job.get('tgt_type', []),
'Target-type': job.get('tgt_type', 'list'),
'User': job.get('user', 'root')}
# TODO: Add Metadata support when it is merged from develop
return ret

View file

@ -98,6 +98,7 @@ from __future__ import absolute_import
# Python
import logging
import re
import copy
# Salt libs
import salt.utils.minions
@ -151,7 +152,7 @@ def targets(tgt, tgt_type='glob', **kwargs): # pylint: disable=W0613
except LookupError:
continue
minion_res = __opts__.get('roster_defaults', {}).copy()
minion_res = copy.deepcopy(__opts__.get('roster_defaults', {}))
for param, order in roster_order.items():
if not isinstance(order, (list, tuple)):
order = [order]

View file

@ -21,6 +21,7 @@ usually located at /etc/salt/cloud. For example, add the following:
# Import python libs
from __future__ import absolute_import
import os
import copy
# Import Salt libs
import salt.loader
@ -63,7 +64,7 @@ def targets(tgt, tgt_type='glob', **kwargs): # pylint: disable=W0613
))
preferred_ip = extract_ipv4(roster_order, ip_list)
ret[minion_id] = __opts__.get('roster_defaults', {})
ret[minion_id] = copy.deepcopy(__opts__.get('roster_defaults', {}))
ret[minion_id].update({'host': preferred_ip})
ssh_username = salt.utils.cloud.ssh_usernames(vm_, cloud_opts)

View file

@ -15,6 +15,7 @@ When you want to use host globs for target matching, use ``--roster clustershell
# Import python libs
from __future__ import absolute_import
import socket
import copy
from salt.ext.six.moves import map # pylint: disable=import-error,redefined-builtin
REQ_ERROR = None
@ -43,7 +44,7 @@ def targets(tgt, tgt_type='glob', **kwargs):
for host, addr in host_addrs.items():
addr = str(addr)
ret[addr] = __opts__.get('roster_defaults', {}).copy()
ret[addr] = copy.deepcopy(__opts__.get('roster_defaults', {}))
for port in ports:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

View file

@ -7,6 +7,7 @@ from __future__ import absolute_import
# Import python libs
import fnmatch
import re
import copy
# Try to import range from https://github.com/ytoolshed/range
HAS_RANGE = False
@ -142,7 +143,7 @@ class RosterMatcher(object):
'''
Return the configured ip
'''
ret = __opts__.get('roster_defaults', {})
ret = copy.deepcopy(__opts__.get('roster_defaults', {}))
if isinstance(self.raw[minion], string_types):
ret.update({'host': self.raw[minion]})
return ret

View file

@ -13,6 +13,7 @@ When you want to use a range query for target matching, use ``--roster range``.
'''
from __future__ import absolute_import
import fnmatch
import copy
import logging
log = logging.getLogger(__name__)
@ -68,7 +69,7 @@ def targets(tgt, tgt_type='range', **kwargs):
def target_range(tgt, hosts):
ret = {}
for host in hosts:
ret[host] = __opts__.get('roster_defaults', {}).copy()
ret[host] = copy.deepcopy(__opts__.get('roster_defaults', {}))
ret[host].update({'host': host})
if __opts__.get('ssh_user'):
ret[host].update({'user': __opts__['ssh_user']})
@ -79,7 +80,7 @@ def target_glob(tgt, hosts):
ret = {}
for host in hosts:
if fnmatch.fnmatch(tgt, host):
ret[host] = __opts__.get('roster_defaults', {}).copy()
ret[host] = copy.deepcopy(__opts__.get('roster_defaults', {}))
ret[host].update({'host': host})
if __opts__.get('ssh_user'):
ret[host].update({'user': __opts__['ssh_user']})

View file

@ -7,6 +7,7 @@ Scan a netmask or ipaddr for open ssh ports
from __future__ import absolute_import
import socket
import logging
import copy
# Import salt libs
import salt.utils.network
@ -55,7 +56,7 @@ class RosterMatcher(object):
pass
for addr in addrs:
addr = str(addr)
ret[addr] = __opts__.get('roster_defaults', {}).copy()
ret[addr] = copy.deepcopy(__opts__.get('roster_defaults', {}))
log.trace('Scanning host: {0}'.format(addr))
for port in ports:
log.trace('Scanning port: {0}'.format(port))

View file

@ -542,7 +542,7 @@ def _format_job_instance(job):
'Arguments': list(job.get('arg', [])),
# unlikely but safeguard from invalid returns
'Target': job.get('tgt', 'unknown-target'),
'Target-type': job.get('tgt_type', []),
'Target-type': job.get('tgt_type', 'list'),
'User': job.get('user', 'root')}
if 'metadata' in job:

View file

@ -772,7 +772,8 @@ def extracted(name,
# Get rid of "file://" from start of source_match
source_match = os.path.realpath(os.path.expanduser(urlparsed_source.path))
if not os.path.isfile(source_match):
ret['comment'] = 'Source file \'{0}\' does not exist'.format(source_match)
ret['comment'] = 'Source file \'{0}\' does not exist'.format(
salt.utils.url.redact_http_basic_auth(source_match))
return ret
valid_archive_formats = ('tar', 'rar', 'zip')
@ -924,7 +925,7 @@ def extracted(name,
if __opts__['test']:
ret['result'] = None
ret['comment'] = (
'Archive {0} would be ached (if necessary) and checked to '
'Archive {0} would be cached (if necessary) and checked to '
'discover if extraction is needed'.format(
salt.utils.url.redact_http_basic_auth(source_match)
)
@ -938,7 +939,7 @@ def extracted(name,
# file states would be unavailable.
ret['comment'] = (
'Unable to cache {0}, file.cached state not available'.format(
source_match
salt.utils.url.redact_http_basic_auth(source_match)
)
)
return ret
@ -950,7 +951,9 @@ def extracted(name,
skip_verify=skip_verify,
saltenv=__env__)
except Exception as exc:
msg = 'Failed to cache {0}: {1}'.format(source_match, exc.__str__())
msg = 'Failed to cache {0}: {1}'.format(
salt.utils.url.redact_http_basic_auth(source_match),
exc.__str__())
log.exception(msg)
ret['comment'] = msg
return ret
@ -1181,7 +1184,8 @@ def extracted(name,
if not ret['result']:
ret['comment'] = \
'{0} does not match the desired source_hash {1}'.format(
source_match, source_sum['hsum']
salt.utils.url.redact_http_basic_auth(source_match),
source_sum['hsum']
)
return ret

View file

@ -159,7 +159,7 @@ def formatted(name, fs_type='ext4', force=False, **kwargs):
ret['result'] = None
return ret
__salt__['disk.format_'](name, fs_type, force=force, **kwargs)
__salt__['disk.format'](name, fs_type, force=force, **kwargs)
# Repeat fstype check up to 10 times with 3s sleeping between each
# to avoid detection failing although mkfs has succeeded

View file

@ -122,6 +122,7 @@ def present(
kernel_id=None,
ramdisk_id=None,
block_device_mappings=None,
delete_on_termination=None,
instance_monitoring=False,
spot_price=None,
instance_profile_name=None,
@ -186,8 +187,10 @@ def present(
Default is standard.
delete_on_termination
Indicates whether to delete the volume on instance termination (true) or
not (false).
Whether the volume should be explicitly marked for deletion when its instance is
terminated (True), or left around (False). If not provided, or None is explicitly passed,
the default AWS behaviour is used, which is True for ROOT volumes of instances, and
False for all others.
iops
For Provisioned IOPS (SSD) volumes only. The number of I/O operations per
@ -268,6 +271,7 @@ def present(
kernel_id=kernel_id,
ramdisk_id=ramdisk_id,
block_device_mappings=block_device_mappings,
delete_on_termination=delete_on_termination,
instance_monitoring=instance_monitoring,
spot_price=spot_price,
instance_profile_name=instance_profile_name,

View file

@ -6573,7 +6573,8 @@ def cached(name,
and parsed.scheme in salt.utils.files.REMOTE_PROTOS:
ret['comment'] = (
'Unable to verify upstream hash of source file {0}, please set '
'source_hash or set skip_verify to True'.format(name)
'source_hash or set skip_verify to True'.format(
salt.utils.url.redact_http_basic_auth(name))
)
return ret
@ -6712,13 +6713,14 @@ def cached(name,
# for the 2017.7 release cycle.
#source_hash=source_sum.get('hsum'))
except Exception as exc:
ret['comment'] = exc.__str__()
ret['comment'] = salt.utils.url.redact_http_basic_auth(exc.__str__())
return ret
if not local_copy:
ret['comment'] = (
'Failed to cache {0}, check minion log for more '
'information'.format(name)
'information'.format(
salt.utils.url.redact_http_basic_auth(name))
)
return ret

View file

@ -178,6 +178,7 @@ from __future__ import absolute_import
import salt.loader
import salt.utils
import salt.utils.jid
from salt.ext import six
from salt.ext.six.moves import range
from salt.ext.six.moves import zip
from salt.exceptions import SaltInvocationError
@ -252,7 +253,7 @@ def run(**kwargs):
if 'name' in kwargs:
kwargs.pop('name')
ret = {
'name': kwargs.keys(),
'name': list(kwargs),
'changes': {},
'comment': '',
'result': None,
@ -314,22 +315,31 @@ def _call_function(name, returner=None, **kwargs):
:return:
'''
argspec = salt.utils.args.get_function_argspec(__salt__[name])
# func_kw is initialized to a dictionary of keyword arguments the function to be run accepts
func_kw = dict(zip(argspec.args[-len(argspec.defaults or []):], # pylint: disable=incompatible-py3-code
argspec.defaults or []))
# func_args is initialized to a list of positional arguments that the function to be run accepts
func_args = argspec.args[:len(argspec.args or []) - len(argspec.defaults or [])]
arg_type, na_type, kw_type = [], {}, False
for funcset in reversed(kwargs.get('func_args') or []):
if not isinstance(funcset, dict):
kw_type = True
if kw_type:
if isinstance(funcset, dict):
arg_type += funcset.values()
na_type.update(funcset)
else:
arg_type.append(funcset)
# We are just receiving a list of args to the function to be run, so just append
# those to the arg list that we will pass to the func.
arg_type.append(funcset)
else:
func_kw.update(funcset)
for kwarg_key in six.iterkeys(funcset):
# We are going to pass in a keyword argument. The trick here is to make certain
# that if we find that in the *args* list that we pass it there and not as a kwarg
if kwarg_key in func_args:
arg_type.append(funcset[kwarg_key])
continue
else:
# Otherwise, we're good and just go ahead and pass the keyword/value pair into
# the kwargs list to be run.
func_kw.update(funcset)
arg_type.reverse()
_exp_prm = len(argspec.args or []) - len(argspec.defaults or [])
_passed_prm = len(arg_type)
missing = []

View file

@ -33,7 +33,7 @@ Ensure an ACL does not exist
.. code-block:: yaml
removeAcl:
removeAcl:
win_dacl.absent:
- name: HKEY_LOCAL_MACHINE\\SOFTWARE\\mykey
- objectType: Registry
@ -50,11 +50,11 @@ Ensure an object is inheriting permissions
.. code-block:: yaml
eInherit:
win_dacl.enableinheritance:
- name: HKEY_LOCAL_MACHINE\\SOFTWARE\\mykey
- objectType: Registry
- clear_existing_acl: True
eInherit:
win_dacl.enableinheritance:
- name: HKEY_LOCAL_MACHINE\\SOFTWARE\\mykey
- objectType: Registry
- clear_existing_acl: True
Ensure an object is not inheriting permissions
parameters:
@ -62,13 +62,13 @@ Ensure an object is not inheriting permissions
objectType - Registry/File/Directory
copy_inherited_acl - True/False - if inheritance is enabled, should the inherited permissions be copied to the ACL when inheritance is disabled
.. code-block:: yaml
.. code-block:: yaml
dInherit:
win_dacl.disableinheritance:
- name: HKEY_LOCAL_MACHINE\\SOFTWARE\\mykey
- objectType: Registry
- copy_inherited_acl: False
dInherit:
win_dacl.disableinheritance:
- name: HKEY_LOCAL_MACHINE\\SOFTWARE\\mykey
- objectType: Registry
- copy_inherited_acl: False
'''
@ -119,7 +119,7 @@ def present(name, objectType, user, permission, acetype, propagation):
def absent(name, objectType, user, permission, acetype, propagation):
'''
Ensure a Linux ACL does not exist
Ensure an ACL does not exist
'''
ret = {'name': name,
'result': True,

View file

@ -6,7 +6,7 @@ State to manage monitoring in Zenoss.
This state module depends on the 'zenoss' Salt execution module.
Allows for setting a state of minions in Zenoss using the Zenoss API. Currently Zenoss 4.x is supported.
Allows for setting a state of minions in Zenoss using the Zenoss API. Currently Zenoss 4.x and 5.x are supported.
.. code-block:: yaml
@ -30,6 +30,8 @@ def __virtual__():
'''
if 'zenoss.add_device' in __salt__:
return 'zenoss'
else:
return False, "The zenoss execution module is not available"
def monitored(name, device_class=None, collector='localhost', prod_state=None):
@ -57,21 +59,28 @@ def monitored(name, device_class=None, collector='localhost', prod_state=None):
ret['comment'] = '{0} is already monitored'.format(name)
# if prod_state is set, ensure it matches with the current state
if prod_state:
if device['productionState'] != prod_state:
if prod_state is not None and device['productionState'] != prod_state:
if __opts__['test']:
ret['comment'] = '{0} is already monitored but prodState will be updated'.format(name)
ret['result'] = None
else:
__salt__['zenoss.set_prod_state'](prod_state, name)
ret['changes'] = {'old': 'prodState == {0}'.format(device['productionState']), 'new': 'prodState == {0}'.format(prod_state)}
ret['comment'] = '{0} is already monitored but prodState was incorrect, setting to Production'.format(name)
ret['comment'] = '{0} is already monitored but prodState was updated'.format(name)
ret['changes'] = {
'old': 'prodState == {0}'.format(device['productionState']),
'new': 'prodState == {0}'.format(prod_state)
}
return ret
# Device not yet in Zenoss
if __opts__['test']:
ret['comment'] = 'The state of "{0}" will be changed.'.format(name)
ret['changes'] = {'old': 'monitored == False', 'new': 'monitored == True'}
ret['result'] = None
return ret
# Device not yet in Zenoss. Add and check result
# Add and check result
if __salt__['zenoss.add_device'](name, device_class, collector, prod_state):
ret['result'] = True
ret['changes'] = {'old': 'monitored == False', 'new': 'monitored == True'}

View file

@ -623,6 +623,17 @@ class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.tra
'payload and load must be a dict', header=header))
raise tornado.gen.Return()
try:
id_ = payload['load'].get('id', '')
if '\0' in id_:
log.error('Payload contains an id with a null byte: %s', payload)
stream.send(self.serial.dumps('bad load: id contains a null byte'))
raise tornado.gen.Return()
except TypeError:
log.error('Payload contains non-string id: %s', payload)
stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_)))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':

View file

@ -596,6 +596,17 @@ class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.
stream.send(self.serial.dumps('payload and load must be a dict'))
raise tornado.gen.Return()
try:
id_ = payload['load'].get('id', '')
if '\0' in id_:
log.error('Payload contains an id with a null byte: %s', payload)
stream.send(self.serial.dumps('bad load: id contains a null byte'))
raise tornado.gen.Return()
except TypeError:
log.error('Payload contains non-string id: %s', payload)
stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_)))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':

View file

@ -65,7 +65,7 @@ def format_job_instance(job):
'Arguments': list(job.get('arg', [])),
# unlikely but safeguard from invalid returns
'Target': job.get('tgt', 'unknown-target'),
'Target-type': job.get('tgt_type', []),
'Target-type': job.get('tgt_type', 'list'),
'User': job.get('user', 'root')}
if 'metadata' in job:

View file

@ -186,7 +186,7 @@ def object_to_dict(obj):
ret = obj
else:
ret = {}
for item in dir(obj):
for item in obj.__dict__:
if item.startswith('_'):
continue
# This is ugly, but inspect.isclass() doesn't seem to work

View file

@ -168,8 +168,13 @@ def wrap_tmpl_func(render_str):
if six.PY2:
output = output.encode(SLS_ENCODING)
if salt.utils.is_windows():
newline = False
if output.endswith(('\n', os.linesep)):
newline = True
# Write out with Windows newlines
output = os.linesep.join(output.splitlines())
if newline:
output += os.linesep
except SaltRenderError as exc:
log.error("Rendering exception occurred: {0}".format(exc))
@ -293,7 +298,7 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None):
# http://jinja.pocoo.org/docs/api/#unicode
tmplstr = tmplstr.decode(SLS_ENCODING)
if tmplstr.endswith('\n'):
if tmplstr.endswith(os.linesep):
newline = True
if not saltenv:
@ -462,7 +467,7 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None):
# Workaround a bug in Jinja that removes the final newline
# (https://github.com/mitsuhiko/jinja2/issues/75)
if newline:
output += '\n'
output += os.linesep
return output

View file

@ -480,22 +480,15 @@ def clean_path(root, path, subdir=False):
return ''
def clean_id(id_):
'''
Returns if the passed id is clean.
'''
if re.search(r'\.\.\{sep}'.format(sep=os.sep), id_):
return False
return True
def valid_id(opts, id_):
'''
Returns if the passed id is valid
'''
try:
return bool(clean_path(opts['pki_dir'], id_)) and clean_id(id_)
except (AttributeError, KeyError, TypeError) as e:
if any(x in id_ for x in ('/', '\\', '\0')):
return False
return bool(clean_path(opts['pki_dir'], id_))
except (AttributeError, KeyError, TypeError):
return False

View file

@ -183,17 +183,23 @@ class WriteSaltVersion(Command):
'''
def run(self):
if not os.path.exists(SALT_VERSION_HARDCODED):
if not os.path.exists(SALT_VERSION_HARDCODED) or self.distribution.with_salt_version:
# Write the version file
if getattr(self.distribution, 'salt_version_hardcoded_path', None) is None:
print('This command is not meant to be called on it\'s own')
exit(1)
if not self.distribution.with_salt_version:
salt_version = __saltstack_version__ # pylint: disable=undefined-variable
else:
from salt.version import SaltStackVersion
salt_version = SaltStackVersion.parse(self.distribution.with_salt_version)
# pylint: disable=E0602
open(self.distribution.salt_version_hardcoded_path, 'w').write(
INSTALL_VERSION_TEMPLATE.format(
date=DATE,
full_version_info=__saltstack_version__.full_info
full_version_info=salt_version.full_info
)
)
# pylint: enable=E0602
@ -731,6 +737,13 @@ class Build(build):
def run(self):
# Run build.run function
build.run(self)
if getattr(self.distribution, 'with_salt_version', False):
# Write the hardcoded salt version module salt/_version.py
self.distribution.salt_version_hardcoded_path = os.path.join(
self.build_lib, 'salt', '_version.py'
)
self.run_command('write_salt_version')
if getattr(self.distribution, 'running_salt_install', False):
# If our install attribute is present and set to True, we'll go
# ahead and write our install time python modules.
@ -839,6 +852,7 @@ class SaltDistribution(distutils.dist.Distribution):
('ssh-packaging', None, 'Run in SSH packaging mode'),
('salt-transport=', None, 'The transport to prepare salt for. Choices are \'zeromq\' '
'\'raet\' or \'both\'. Defaults to \'zeromq\'', 'zeromq')] + [
('with-salt-version=', None, 'Set a fixed version for Salt instead calculating it'),
# Salt's Paths Configuration Settings
('salt-root-dir=', None,
'Salt\'s pre-configured root directory'),
@ -893,6 +907,9 @@ class SaltDistribution(distutils.dist.Distribution):
self.salt_spm_pillar_dir = None
self.salt_spm_reactor_dir = None
# Salt version
self.with_salt_version = None
self.name = 'salt-ssh' if PACKAGED_FOR_SALT_SSH else 'salt'
self.salt_version = __version__ # pylint: disable=undefined-variable
self.description = 'Portable, distributed, remote execution and configuration management system'

View file

@ -0,0 +1,31 @@
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ShellCase
class SPMTest(ShellCase):
'''
Test spm script
'''
def test_spm_help(self):
'''
test --help argument for spm
'''
expected_args = ['--version', '--assume-yes', '--help']
output = self.run_spm('--help')
for arg in expected_args:
self.assertIn(arg, ''.join(output))
def test_spm_bad_arg(self):
'''
test correct output when bad argument passed
'''
expected_args = ['--version', '--assume-yes', '--help']
output = self.run_spm('doesnotexist')
for arg in expected_args:
self.assertIn(arg, ''.join(output))

View file

@ -442,6 +442,16 @@ class ShellCase(ShellTestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixi
catch_stderr=catch_stderr,
timeout=timeout)
def run_spm(self, arg_str, with_retcode=False, catch_stderr=False, timeout=60): # pylint: disable=W0221
'''
Execute spm
'''
return self.run_script('spm',
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout)
def run_ssh(self, arg_str, with_retcode=False, catch_stderr=False, timeout=60): # pylint: disable=W0221
'''
Execute salt-ssh

View file

@ -132,6 +132,9 @@ class MockState(object):
ret = ret
return True
def requisite_in(self, data): # pylint: disable=unused-argument
return data, []
class HighState(object):
'''
Mock HighState class

View file

@ -140,7 +140,8 @@ class AugeasTestCase(TestCase, LoaderModuleMockMixin):
'augeas.method_map': self.mock_method_map}
with patch.dict(augeas.__salt__, mock_dict_):
mock_filename = MagicMock(return_value='/etc/services')
with patch.object(augeas, '_workout_filename', mock_filename):
with patch.object(augeas, '_workout_filename', mock_filename), \
patch('os.path.isfile', MagicMock(return_value=True)):
with patch('salt.utils.fopen', MagicMock(mock_open)):
mock_diff = MagicMock(return_value=['+ zabbix-agent'])
with patch('difflib.unified_diff', mock_diff):

View file

@ -100,7 +100,7 @@ class BlockdevTestCase(TestCase, LoaderModuleMockMixin):
# Test state return when block device format fails
with patch.dict(blockdev.__salt__, {'cmd.run': MagicMock(return_value=mock_ext4),
'disk.format_': MagicMock(return_value=True)}):
'disk.format': MagicMock(return_value=True)}):
comt = ('Failed to format {0}'.format(name))
ret.update({'comment': comt, 'result': False})
with patch.object(salt.utils, 'which',

View file

@ -6,6 +6,7 @@
# Import Python Libs
from __future__ import absolute_import
from inspect import ArgSpec
import logging
# Import Salt Libs
import salt.states.module as module
@ -20,6 +21,8 @@ from tests.support.mock import (
patch
)
log = logging.getLogger(__name__)
CMD = 'foo.bar'
@ -91,8 +94,9 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
with patch.dict(module.__salt__, {}, clear=True):
with patch.dict(module.__opts__, {'use_superseded': ['module.run']}):
ret = module.run(**{CMD: None})
assert ret['comment'] == "Unavailable function: {0}.".format(CMD)
assert not ret['result']
if ret['comment'] != "Unavailable function: {0}.".format(CMD) \
or ret['result']:
self.fail('module.run did not fail as expected: {0}'.format(ret))
def test_module_run_hidden_varargs(self):
'''
@ -111,8 +115,9 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
'''
with patch.dict(module.__opts__, {'test': True, 'use_superseded': ['module.run']}):
ret = module.run(**{CMD: None})
assert ret['comment'] == "Function {0} to be executed.".format(CMD)
assert ret['result']
if ret['comment'] != "Function {0} to be executed.".format(CMD) \
or not ret['result']:
self.fail('module.run failed: {0}'.format(ret))
def test_run_missing_arg(self):
'''
@ -122,7 +127,10 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
with patch.dict(module.__salt__, {CMD: _mocked_func_named}):
with patch.dict(module.__opts__, {'use_superseded': ['module.run']}):
ret = module.run(**{CMD: None})
assert ret['comment'] == "'{0}' failed: Function expects 1 parameters, got only 0".format(CMD)
expected_comment = \
"'{0}' failed: Function expects 1 parameters, got only 0".format(CMD)
if ret['comment'] != expected_comment:
self.fail('module.run did not fail as expected: {0}'.format(ret))
def test_run_correct_arg(self):
'''
@ -132,16 +140,17 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
with patch.dict(module.__salt__, {CMD: _mocked_func_named}):
with patch.dict(module.__opts__, {'use_superseded': ['module.run']}):
ret = module.run(**{CMD: ['Fred']})
assert ret['comment'] == '{0}: Success'.format(CMD)
assert ret['result']
if ret['comment'] != '{0}: Success'.format(CMD) or not ret['result']:
self.fail('module.run failed: {0}'.format(ret))
def test_run_unexpected_keywords(self):
with patch.dict(module.__salt__, {CMD: _mocked_func_args}):
with patch.dict(module.__opts__, {'use_superseded': ['module.run']}):
ret = module.run(**{CMD: [{'foo': 'bar'}]})
assert ret['comment'] == "'{0}' failed: {1}() got an unexpected keyword argument " \
"'foo'".format(CMD, module.__salt__[CMD].__name__)
assert not ret['result']
expected_comment = "'{0}' failed: {1}() got an unexpected keyword argument " \
"'foo'".format(CMD, module.__salt__[CMD].__name__)
if ret['comment'] != expected_comment or ret['result']:
self.fail('module.run did not fail as expected: {0}'.format(ret))
def test_run_args(self):
'''
@ -150,7 +159,17 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
'''
with patch.dict(module.__salt__, {CMD: _mocked_func_args}):
with patch.dict(module.__opts__, {'use_superseded': ['module.run']}):
assert module.run(**{CMD: ['foo', 'bar']})['result']
try:
ret = module.run(**{CMD: ['foo', 'bar']})
except Exception as exc:
log.exception('test_run_none_return: raised exception')
self.fail('module.run raised exception: {0}'.format(exc))
if not ret['result']:
log.exception(
'test_run_none_return: test failed, result: %s',
ret
)
self.fail('module.run failed: {0}'.format(ret))
def test_run_none_return(self):
'''
@ -159,7 +178,17 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
'''
with patch.dict(module.__salt__, {CMD: _mocked_none_return}):
with patch.dict(module.__opts__, {'use_superseded': ['module.run']}):
assert module.run(**{CMD: None})['result']
try:
ret = module.run(**{CMD: None})
except Exception as exc:
log.exception('test_run_none_return: raised exception')
self.fail('module.run raised exception: {0}'.format(exc))
if not ret['result']:
log.exception(
'test_run_none_return: test failed, result: %s',
ret
)
self.fail('module.run failed: {0}'.format(ret))
def test_run_typed_return(self):
'''
@ -169,7 +198,18 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
for val in [1, 0, 'a', '', (1, 2,), (), [1, 2], [], {'a': 'b'}, {}, True, False]:
with patch.dict(module.__salt__, {CMD: _mocked_none_return}):
with patch.dict(module.__opts__, {'use_superseded': ['module.run']}):
assert module.run(**{CMD: [{'ret': val}]})['result']
log.debug('test_run_typed_return: trying %s', val)
try:
ret = module.run(**{CMD: [{'ret': val}]})
except Exception as exc:
log.exception('test_run_typed_return: raised exception')
self.fail('module.run raised exception: {0}'.format(exc))
if not ret['result']:
log.exception(
'test_run_typed_return: test failed, result: %s',
ret
)
self.fail('module.run failed: {0}'.format(ret))
def test_run_batch_call(self):
'''
@ -182,7 +222,18 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
'second': _mocked_none_return,
'third': _mocked_none_return}, clear=True):
for f_name in module.__salt__:
assert module.run(**{f_name: None})['result']
log.debug('test_run_batch_call: trying %s', f_name)
try:
ret = module.run(**{f_name: None})
except Exception as exc:
log.exception('test_run_batch_call: raised exception')
self.fail('module.run raised exception: {0}'.format(exc))
if not ret['result']:
log.exception(
'test_run_batch_call: test failed, result: %s',
ret
)
self.fail('module.run failed: {0}'.format(ret))
def test_module_run_module_not_available(self):
'''

View file

@ -177,7 +177,7 @@ class TestGetTemplate(TestCase):
out = render_jinja_tmpl(
fp_.read(),
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(out, 'world\n')
self.assertEqual(out, 'world' + os.linesep)
def test_fallback_noloader(self):
'''
@ -189,7 +189,7 @@ class TestGetTemplate(TestCase):
out = render_jinja_tmpl(
fp_.read(),
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(out, 'Hey world !a b !\n')
self.assertEqual(out, 'Hey world !a b !' + os.linesep)
def test_saltenv(self):
'''
@ -208,7 +208,7 @@ class TestGetTemplate(TestCase):
'file_roots': self.local_opts['file_roots'],
'pillar_roots': self.local_opts['pillar_roots']},
a='Hi', b='Salt', saltenv='test', salt=self.local_salt))
self.assertEqual(out, 'Hey world !Hi Salt !\n')
self.assertEqual(out, 'Hey world !Hi Salt !' + os.linesep)
self.assertEqual(fc.requests[0]['path'], 'salt://macro')
def test_macro_additional_log_for_generalexc(self):
@ -217,7 +217,7 @@ class TestGetTemplate(TestCase):
more output from trace.
'''
expected = r'''Jinja error:.*division.*
.*/macrogeneral\(2\):
.*macrogeneral\(2\):
---
\{% macro mymacro\(\) -%\}
\{\{ 1/0 \}\} <======================
@ -241,7 +241,7 @@ class TestGetTemplate(TestCase):
more output from trace.
'''
expected = r'''Jinja variable 'b' is undefined
.*/macroundefined\(2\):
.*macroundefined\(2\):
---
\{% macro mymacro\(\) -%\}
\{\{b.greetee\}\} <-- error is here <======================
@ -264,7 +264,7 @@ class TestGetTemplate(TestCase):
If we failed in a macro, get more output from trace.
'''
expected = r'''Jinja syntax error: expected token .*end.*got '-'.*
.*/macroerror\(2\):
.*macroerror\(2\):
---
# macro
\{% macro mymacro\(greeting, greetee='world'\) -\} <-- error is here <======================
@ -294,7 +294,7 @@ class TestGetTemplate(TestCase):
'file_roots': self.local_opts['file_roots'],
'pillar_roots': self.local_opts['pillar_roots']},
a='Hi', b='Sàlt', saltenv='test', salt=self.local_salt))
self.assertEqual(out, u'Hey world !Hi Sàlt !\n')
self.assertEqual(out, salt.utils.to_unicode('Hey world !Hi Sàlt !' + os.linesep))
self.assertEqual(fc.requests[0]['path'], 'salt://macro')
filename = os.path.join(TEMPLATES_DIR, 'files', 'test', 'non_ascii')
@ -305,7 +305,7 @@ class TestGetTemplate(TestCase):
'file_roots': self.local_opts['file_roots'],
'pillar_roots': self.local_opts['pillar_roots']},
a='Hi', b='Sàlt', saltenv='test', salt=self.local_salt))
self.assertEqual(u'Assunção\n', out)
self.assertEqual(u'Assunção' + os.linesep, out)
self.assertEqual(fc.requests[0]['path'], 'salt://macro')
@skipIf(HAS_TIMELIB is False, 'The `timelib` library is not installed.')
@ -340,8 +340,8 @@ class TestGetTemplate(TestCase):
with salt.utils.fopen(out['data']) as fp:
result = fp.read()
if six.PY2:
result = result.decode('utf-8')
self.assertEqual(u'Assunção\n', result)
result = salt.utils.to_unicode(result)
self.assertEqual(salt.utils.to_unicode('Assunção' + os.linesep), result)
def test_get_context_has_enough_context(self):
template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf'

View file

@ -192,7 +192,7 @@ class PillarTestCase(TestCase):
def _setup_test_topfile_mocks(self, Matcher, get_file_client,
nodegroup_order, glob_order):
# Write a simple topfile and two pillar state files
self.top_file = tempfile.NamedTemporaryFile(dir=TMP)
self.top_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
s = '''
base:
group:
@ -209,19 +209,19 @@ base:
'''.format(nodegroup_order=nodegroup_order, glob_order=glob_order)
self.top_file.write(salt.utils.to_bytes(s))
self.top_file.flush()
self.ssh_file = tempfile.NamedTemporaryFile(dir=TMP)
self.ssh_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
self.ssh_file.write(b'''
ssh:
foo
''')
self.ssh_file.flush()
self.ssh_minion_file = tempfile.NamedTemporaryFile(dir=TMP)
self.ssh_minion_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
self.ssh_minion_file.write(b'''
ssh:
bar
''')
self.ssh_minion_file.flush()
self.generic_file = tempfile.NamedTemporaryFile(dir=TMP)
self.generic_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
self.generic_file.write(b'''
generic:
key1:
@ -231,7 +231,7 @@ generic:
sub_key1: []
''')
self.generic_file.flush()
self.generic_minion_file = tempfile.NamedTemporaryFile(dir=TMP)
self.generic_minion_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
self.generic_minion_file.write(b'''
generic:
key1:
@ -260,7 +260,7 @@ generic:
client.get_state.side_effect = get_state
def _setup_test_include_mocks(self, Matcher, get_file_client):
self.top_file = top_file = tempfile.NamedTemporaryFile(dir=TMP)
self.top_file = top_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
top_file.write(b'''
base:
'*':
@ -271,21 +271,21 @@ base:
- test
''')
top_file.flush()
self.init_sls = init_sls = tempfile.NamedTemporaryFile(dir=TMP)
self.init_sls = init_sls = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
init_sls.write(b'''
include:
- test.sub1
- test.sub2
''')
init_sls.flush()
self.sub1_sls = sub1_sls = tempfile.NamedTemporaryFile(dir=TMP)
self.sub1_sls = sub1_sls = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
sub1_sls.write(b'''
p1:
- value1_1
- value1_2
''')
sub1_sls.flush()
self.sub2_sls = sub2_sls = tempfile.NamedTemporaryFile(dir=TMP)
self.sub2_sls = sub2_sls = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
sub2_sls.write(b'''
p1:
- value1_3

View file

@ -21,6 +21,7 @@ import salt.utils.parsers
import salt.log.setup as log
import salt.config
import salt.syspaths
import salt.utils
class ErrorMock(object): # pylint: disable=too-few-public-methods
@ -78,7 +79,8 @@ class LogSetupMock(object):
'''
Mock
'''
return None
import multiprocessing
return multiprocessing.Queue()
def setup_multiprocessing_logging_listener(self, opts, *args): # pylint: disable=invalid-name,unused-argument
'''
@ -488,6 +490,7 @@ class LogSettingsParserTests(TestCase):
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(salt.utils.is_windows(), 'Windows uses a logging listener')
class MasterOptionParserTestCase(LogSettingsParserTests):
'''
Tests parsing Salt Master options
@ -514,6 +517,7 @@ class MasterOptionParserTestCase(LogSettingsParserTests):
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(salt.utils.is_windows(), 'Windows uses a logging listener')
class MinionOptionParserTestCase(LogSettingsParserTests):
'''
Tests parsing Salt Minion options
@ -567,6 +571,7 @@ class ProxyMinionOptionParserTestCase(LogSettingsParserTests):
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(salt.utils.is_windows(), 'Windows uses a logging listener')
class SyndicOptionParserTestCase(LogSettingsParserTests):
'''
Tests parsing Salt Syndic options

View file

@ -99,7 +99,7 @@ class UtilsTestCase(TestCase):
def test_path_join(self):
with patch('salt.utils.is_windows', return_value=False) as is_windows_mock:
self.assertFalse(is_windows_mock.return_value)
expected_path = '/a/b/c/d'
expected_path = os.path.join(os.sep + 'a', 'b', 'c', 'd')
ret = utils.path_join('/a/b/c', 'd')
self.assertEqual(ret, expected_path)
@ -985,7 +985,8 @@ class UtilsTestCase(TestCase):
ret = utils.daemonize_if({})
self.assertEqual(None, ret)
with patch('salt.utils.daemonize'):
with patch('salt.utils.daemonize'), \
patch('sys.platform', 'not windows'):
utils.daemonize_if({})
self.assertTrue(utils.daemonize.called)
# pylint: enable=assignment-from-none

View file

@ -63,6 +63,16 @@ class TestVerify(TestCase):
opts = {'pki_dir': '/tmp/whatever'}
self.assertFalse(valid_id(opts, None))
def test_valid_id_pathsep(self):
'''
Path separators in id should make it invalid
'''
opts = {'pki_dir': '/tmp/whatever'}
# We have to test both path separators because os.path.normpath will
# convert forward slashes to backslashes on Windows.
for pathsep in ('/', '\\'):
self.assertFalse(valid_id(opts, pathsep.join(('..', 'foobar'))))
def test_zmq_verify(self):
self.assertTrue(zmq_version())

16
tox.ini Normal file
View file

@ -0,0 +1,16 @@
[tox]
envlist = py27,py34,py35,py36
[testenv]
sitepackages = True
deps =
py27,pylint: -r{toxinidir}/requirements/dev_python27.txt
py34,py35,py36: -r{toxinidir}/requirements/dev_python34.txt
commands =
py27: python2 {toxinidir}/tests/runtests.py {posargs:-v --run-destructive}
py34,py35,py36: python3 {toxinidir}/tests/runtests.py {posargs:-v --run-destructive}
[testenv:pylint]
basepython = python2.7
commands = pylint --rcfile={toxinidir}/.testing.pylintrc --disable=W1307 {posargs:setup.py salt/}
pylint --rcfile={toxinidir}/.testing.pylintrc --disable=W0232,E1002,W1307 {posargs:tests/}