mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge pull request #32870 from rallytime/merge-2016.3
[2016.3] Merge forward from 2015.8 to 2016.3
This commit is contained in:
commit
395b7ad747
24 changed files with 372 additions and 91 deletions
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -65,6 +65,9 @@ tags
|
|||
# Allow a user to set their own _version.py for testing
|
||||
_version.py
|
||||
|
||||
# Ignore auto generated _syspaths.py file
|
||||
_syspaths.py
|
||||
|
||||
# Ignore grains file written out during tests
|
||||
tests/integration/files/conf/grains
|
||||
/salt/_syspaths.py
|
||||
|
|
95
conf/master
95
conf/master
|
@ -549,10 +549,37 @@
|
|||
|
||||
# Git File Server Backend Configuration
|
||||
#
|
||||
# Gitfs can be provided by one of two python modules: GitPython or pygit2. If
|
||||
# using pygit2, both libgit2 and git must also be installed.
|
||||
#gitfs_provider: gitpython
|
||||
#
|
||||
# Optional parameter used to specify the provider to be used for gitfs. Must
|
||||
# be one of the following: pygit2, gitpython, or dulwich. If unset, then each
|
||||
# will be tried in that same order, and the first one with a compatible
|
||||
# version installed will be the provider that is used.
|
||||
#gitfs_provider: pygit2
|
||||
|
||||
# Along with gitfs_password, is used to authenticate to HTTPS remotes.
|
||||
# gitfs_user: ''
|
||||
|
||||
# Along with gitfs_user, is used to authenticate to HTTPS remotes.
|
||||
# This parameter is not required if the repository does not use authentication.
|
||||
#gitfs_password: ''
|
||||
|
||||
# By default, Salt will not authenticate to an HTTP (non-HTTPS) remote.
|
||||
# This parameter enables authentication over HTTP. Enable this at your own risk.
|
||||
#gitfs_insecure_auth: False
|
||||
|
||||
# Along with gitfs_privkey (and optionally gitfs_passphrase), is used to
|
||||
# authenticate to SSH remotes. This parameter (or its per-remote counterpart)
|
||||
# is required for SSH remotes.
|
||||
#gitfs_pubkey: ''
|
||||
|
||||
# Along with gitfs_pubkey (and optionally gitfs_passphrase), is used to
|
||||
# authenticate to SSH remotes. This parameter (or its per-remote counterpart)
|
||||
# is required for SSH remotes.
|
||||
#gitfs_privkey: ''
|
||||
|
||||
# This parameter is optional, required only when the SSH key being used to
|
||||
# authenticate is protected by a passphrase.
|
||||
#gitfs_passphrase: ''
|
||||
|
||||
# When using the git fileserver backend at least one git remote needs to be
|
||||
# defined. The user running the salt master will need read access to the repo.
|
||||
#
|
||||
|
@ -630,6 +657,66 @@
|
|||
# Recursively merge lists by aggregating them instead of replacing them.
|
||||
#pillar_merge_lists: False
|
||||
|
||||
# Git External Pillar (git_pillar) Configuration Options
|
||||
#
|
||||
# Specify the provider to be used for git_pillar. Must be either pygit2 or
|
||||
# gitpython. If unset, then both will be tried in that same order, and the
|
||||
# first one with a compatible version installed will be the provider that
|
||||
# is used.
|
||||
#git_pillar_provider: pygit2
|
||||
|
||||
# If the desired branch matches this value, and the environment is omitted
|
||||
# from the git_pillar configuration, then the environment for that git_pillar
|
||||
# remote will be base.
|
||||
#git_pillar_base: master
|
||||
|
||||
# If the branch is omitted from a git_pillar remote, then this branch will
|
||||
# be used instead
|
||||
#git_pillar_branch: master
|
||||
|
||||
# Environment to use for git_pillar remotes. This is normally derived from
|
||||
# the branch/tag (or from a per-remote env parameter), but if set this will
|
||||
# override the process of deriving the env from the branch/tag name.
|
||||
#git_pillar_env: ''
|
||||
|
||||
# Path relative to the root of the repository where the git_pillar top file
|
||||
# and SLS files are located.
|
||||
#git_pillar_root: ''
|
||||
|
||||
# Specifies whether or not to ignore SSL certificate errors when contacting
|
||||
# the remote repository.
|
||||
#git_pillar_ssl_verify: False
|
||||
|
||||
# When set to False, if there is an update/checkout lock for a git_pillar
|
||||
# remote and the pid written to it is not running on the master, the lock
|
||||
# file will be automatically cleared and a new lock will be obtained.
|
||||
#git_pillar_global_lock: True
|
||||
|
||||
# Git External Pillar Authentication Options
|
||||
#
|
||||
# Along with git_pillar_password, is used to authenticate to HTTPS remotes.
|
||||
#git_pillar_user: ''
|
||||
|
||||
# Along with git_pillar_user, is used to authenticate to HTTPS remotes.
|
||||
# This parameter is not required if the repository does not use authentication.
|
||||
#git_pillar_password: ''
|
||||
|
||||
# By default, Salt will not authenticate to an HTTP (non-HTTPS) remote.
|
||||
# This parameter enables authentication over HTTP.
|
||||
#git_pillar_insecure_auth: False
|
||||
|
||||
# Along with git_pillar_privkey (and optionally git_pillar_passphrase),
|
||||
# is used to authenticate to SSH remotes.
|
||||
#git_pillar_pubkey: ''
|
||||
|
||||
# Along with git_pillar_pubkey (and optionally git_pillar_passphrase),
|
||||
# is used to authenticate to SSH remotes.
|
||||
#git_pillar_privkey: ''
|
||||
|
||||
# This parameter is optional, required only when the SSH key being used
|
||||
# to authenticate is protected by a passphrase.
|
||||
#git_pillar_passphrase: ''
|
||||
|
||||
# A master can cache pillars locally to bypass the expense of having to render them
|
||||
# for each minion on every request. This feature should only be enabled in cases
|
||||
# where pillar rendering time is known to be unsatisfactory and any attendant security
|
||||
|
|
|
@ -89,7 +89,7 @@ Execution Options
|
|||
|
||||
.. option:: -u, --update-bootstrap
|
||||
|
||||
Update salt-bootstrap to the latest develop version on GitHub.
|
||||
Update salt-bootstrap to the latest stable bootstrap release.
|
||||
|
||||
.. option:: -y, --assume-yes
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
========================
|
||||
OS Support for Cloud VMs
|
||||
Cloud deployment scripts
|
||||
========================
|
||||
|
||||
Salt Cloud works primarily by executing a script on the virtual machines as
|
||||
|
@ -14,31 +14,39 @@ script. A stable version is included with each release tarball starting with
|
|||
|
||||
https://github.com/saltstack/salt-bootstrap
|
||||
|
||||
If you do not specify a script argument, this script will be used at the
|
||||
default.
|
||||
Note that, somewhat counter-intuitively, this script is referenced as
|
||||
``bootstrap-salt`` in the configuration.
|
||||
|
||||
If the Salt Bootstrap script does not meet your needs, you may write your own.
|
||||
The script should be written in bash and is a Jinja template. Deploy scripts
|
||||
need to execute a number of functions to do a complete salt setup. These
|
||||
functions include:
|
||||
You can specify a deploy script in the cloud configuration file
|
||||
(``/etc/salt/cloud`` by default):
|
||||
|
||||
1. Install the salt minion. If this can be done via system packages this method
|
||||
is HIGHLY preferred.
|
||||
2. Add the salt minion keys before the minion is started for the first time.
|
||||
The minion keys are available as strings that can be copied into place in
|
||||
the Jinja template under the dict named "vm".
|
||||
3. Start the salt-minion daemon and enable it at startup time.
|
||||
4. Set up the minion configuration file from the "minion" data available in
|
||||
the Jinja template.
|
||||
.. code-block:: yaml
|
||||
|
||||
A good, well commented, example of this process is the Fedora deployment
|
||||
script:
|
||||
script: bootstrap-salt
|
||||
|
||||
https://github.com/saltstack/salt-cloud/blob/master/saltcloud/deploy/Fedora.sh
|
||||
|
||||
A number of legacy deploy scripts are included with the release tarball. None
|
||||
of them are as functional or complete as Salt Bootstrap, and are still included
|
||||
for academic purposes.
|
||||
Or in a provider:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-provider:
|
||||
# snip...
|
||||
script: bootstrap-salt
|
||||
|
||||
|
||||
Or in a profile:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-profile:
|
||||
provider: my-provider
|
||||
# snip...
|
||||
script: bootstrap-salt
|
||||
|
||||
|
||||
If you do not specify a script argument in your cloud configuration file,
|
||||
provider configuration or profile configuration, the "bootstrap-salt" script
|
||||
will be used by default.
|
||||
|
||||
|
||||
Other Generic Deploy Scripts
|
||||
|
@ -61,6 +69,54 @@ refit to meet your needs. One important use of them is to pass options to
|
|||
the salt-bootstrap script, such as updating to specific git tags.
|
||||
|
||||
|
||||
Custom Deploy Scripts
|
||||
=====================
|
||||
|
||||
If the Salt Bootstrap script does not meet your needs, you may write your own.
|
||||
The script should be written in shell and is a Jinja template. Deploy scripts
|
||||
need to execute a number of functions to do a complete salt setup. These
|
||||
functions include:
|
||||
|
||||
1. Install the salt minion. If this can be done via system packages this method
|
||||
is HIGHLY preferred.
|
||||
2. Add the salt minion keys before the minion is started for the first time.
|
||||
The minion keys are available as strings that can be copied into place in
|
||||
the Jinja template under the dict named "vm".
|
||||
3. Start the salt-minion daemon and enable it at startup time.
|
||||
4. Set up the minion configuration file from the "minion" data available in
|
||||
the Jinja template.
|
||||
|
||||
A good, well commented example of this process is the Fedora deployment
|
||||
script:
|
||||
|
||||
https://github.com/saltstack/salt-cloud/blob/master/saltcloud/deploy/Fedora.sh
|
||||
|
||||
A number of legacy deploy scripts are included with the release tarball. None
|
||||
of them are as functional or complete as Salt Bootstrap, and are still included
|
||||
for academic purposes.
|
||||
|
||||
Custom deploy scripts are picked up from ``/etc/salt/cloud.deploy.d`` by
|
||||
default, but you can change the location of deploy scripts with the cloud
|
||||
configuration ``deploy_scripts_search_path``. Additionally, if your deploy
|
||||
script has the extension ``.sh``, you can leave out the extension in your
|
||||
configuration.
|
||||
|
||||
For example, if your custom deploy script is located in
|
||||
``/etc/salt/cloud.deploy.d/my_deploy.sh``, you could specify it in a cloud
|
||||
profile like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-profile:
|
||||
provider: my-provider
|
||||
# snip...
|
||||
script: my_deploy
|
||||
|
||||
You're also free to use the full path to the script if you like. Using full
|
||||
paths, your script doesn't have to live inside ``/etc/salt/cloud.deploy.d`` or
|
||||
whatever you've configured with ``deploy_scripts_search_path``.
|
||||
|
||||
|
||||
Post-Deploy Commands
|
||||
====================
|
||||
|
||||
|
|
|
@ -24,6 +24,18 @@ This package can be installed using `pip` or `easy_install`:
|
|||
pip install pyvmomi
|
||||
easy_install pyvmomi
|
||||
|
||||
.. note::
|
||||
|
||||
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
|
||||
versions of Python. If using version 6.0 of pyVmomi, the machine that you
|
||||
are running the proxy minion process from must have either Python 2.7.9 or
|
||||
newer This is due to an upstream dependency in pyVmomi 6.0 that is not supported
|
||||
in Python version 2.6 to 2.7.8. If the version of Python running the salt-cloud
|
||||
command is not in the supported range, you will need to install an earlier version
|
||||
of pyVmomi. See `Issue #29537`_ for more information.
|
||||
|
||||
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
|
||||
|
||||
|
||||
Configuration
|
||||
=============
|
||||
|
|
|
@ -274,9 +274,9 @@ with labels.
|
|||
``Awesome``
|
||||
The pull request implements an especially well crafted solution, or a very difficult but necessary change.
|
||||
|
||||
``Low Hanging Fruit``
|
||||
The issue is trivial or almost trivial to implement or fix. Issues having this label should be a good starting
|
||||
place for new contributors to Salt.
|
||||
``Help Wanted``
|
||||
The issue appears to have a simple solution. Issues having this label
|
||||
should be a good starting place for new contributors to Salt.
|
||||
|
||||
``Needs Testcase``
|
||||
The issue or pull request relates to a feature that needs test coverage. The pull request containing the tests
|
||||
|
|
|
@ -88,13 +88,13 @@ the following configuration:
|
|||
|
||||
.. code-block:: yaml
|
||||
|
||||
'node_type:web':
|
||||
'node_type:webserver':
|
||||
- match: grain
|
||||
- webserver
|
||||
|
||||
'node_type:postgres':
|
||||
- match: grain
|
||||
- database
|
||||
- postgres
|
||||
|
||||
'node_type:redis':
|
||||
- match: grain
|
||||
|
|
|
@ -325,7 +325,12 @@ tremendous amount of customization. Here's some example usage:
|
|||
- https://foo.com/foo.git
|
||||
- https://foo.com/bar.git:
|
||||
- root: salt
|
||||
- mountpoint: salt://foo/bar/baz
|
||||
- mountpoint: salt://bar
|
||||
- base: salt-base
|
||||
- https://foo.com/bar.git:
|
||||
- name: second_bar_repo
|
||||
- root: other/salt
|
||||
- mountpoint: salt://other/bar
|
||||
- base: salt-base
|
||||
- http://foo.com/baz.git:
|
||||
- root: salt/states
|
||||
|
@ -342,26 +347,32 @@ tremendous amount of customization. Here's some example usage:
|
|||
with a colon.
|
||||
|
||||
2. Per-remote configuration parameters are named like the global versions,
|
||||
with the ``gitfs_`` removed from the beginning.
|
||||
with the ``gitfs_`` removed from the beginning. The exception being the
|
||||
``name`` parameter which is only available to per-remote configurations.
|
||||
|
||||
In the example configuration above, the following is true:
|
||||
|
||||
1. The first and third gitfs remotes will use the ``develop`` branch/tag as the
|
||||
``base`` environment, while the second one will use the ``salt-base``
|
||||
1. The first and fourth gitfs remotes will use the ``develop`` branch/tag as the
|
||||
``base`` environment, while the second and third will use the ``salt-base``
|
||||
branch/tag as the ``base`` environment.
|
||||
|
||||
2. The first remote will serve all files in the repository. The second
|
||||
remote will only serve files from the ``salt`` directory (and its
|
||||
subdirectories), while the third remote will only serve files from the
|
||||
``salt/states`` directory (and its subdirectories).
|
||||
subdirectories). The third remote will only server files from the
|
||||
``other/salt`` directory (and its subdirectorys), while the fourth remote
|
||||
will only serve files from the ``salt/states`` directory (and its
|
||||
subdirectories).
|
||||
|
||||
3. The files from the second remote will be located under
|
||||
``salt://foo/bar/baz``, while the files from the first and third remotes
|
||||
will be located under the root of the Salt fileserver namespace
|
||||
(``salt://``).
|
||||
3. The first and fourth remotes will have files located under the root of the
|
||||
Salt fileserver namespace (``salt://``). The files from the second remote
|
||||
will be located under ``salt://bar``, while the files from the third remote
|
||||
will be located under ``salt://other/bar``.
|
||||
|
||||
4. The third remote overrides the default behavior of :ref:`not authenticating to
|
||||
insecure (non-HTTPS) remotes <gitfs-insecure-auth>`.
|
||||
4. The second and third remotes reference the same repository and unique names
|
||||
need to be declared for duplicate gitfs remotes.
|
||||
|
||||
5. The fourth remote overrides the default behavior of :ref:`not authenticating
|
||||
to insecure (non-HTTPS) remotes <gitfs-insecure-auth>`.
|
||||
|
||||
Serving from a Subdirectory
|
||||
===========================
|
||||
|
|
|
@ -129,12 +129,24 @@ modules.
|
|||
The Salt module functions are also made available in the template context as
|
||||
``salt:``
|
||||
|
||||
The following example illustrates calling the ``group_to_gid`` function in the
|
||||
``file`` execution module with a single positional argument called
|
||||
``some_group_that_exists``.
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
moe:
|
||||
user.present:
|
||||
- gid: {{ salt['file.group_to_gid']('some_group_that_exists') }}
|
||||
|
||||
One way to think about this might be that the ``gid`` key is being assigned
|
||||
a value equivelent to the following python pseudo-code:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import salt.modules.file
|
||||
file.group_to_gid('some_group_that_exists')
|
||||
|
||||
Note that for the above example to work, ``some_group_that_exists`` must exist
|
||||
before the state file is processed by the templating engine.
|
||||
|
||||
|
@ -145,6 +157,9 @@ MAC address for eth0:
|
|||
|
||||
salt['network.hw_addr']('eth0')
|
||||
|
||||
To examine the possible arguments to each execution module function,
|
||||
one can examine the `module reference documentation </ref/modules/all>`:
|
||||
|
||||
Advanced SLS module syntax
|
||||
==========================
|
||||
|
||||
|
|
|
@ -191,7 +191,8 @@ class Batch(object):
|
|||
else:
|
||||
parts.update(part)
|
||||
for id in part.keys():
|
||||
minion_tracker[queue]['minions'].remove(id)
|
||||
if id in minion_tracker[queue]['minions']:
|
||||
minion_tracker[queue]['minions'].remove(id)
|
||||
except StopIteration:
|
||||
# if a iterator is done:
|
||||
# - set it to inactive
|
||||
|
|
|
@ -1679,6 +1679,13 @@ def mod_repo(repo, saltenv='base', **kwargs):
|
|||
consolidate
|
||||
if ``True``, will attempt to de-dup and consolidate sources
|
||||
|
||||
comments
|
||||
Sometimes you want to supply additional information, but not as
|
||||
enabled configuration. Anything supplied for this list will be saved
|
||||
in the repo configuration with a comment marker (#) in front.
|
||||
|
||||
.. versionadded:: 2015.8.9
|
||||
|
||||
.. note:: Due to the way keys are stored for APT, there is a known issue
|
||||
where the key won't be updated unless another change is made
|
||||
at the same time. Keys should be properly added on initial
|
||||
|
@ -1905,6 +1912,8 @@ def mod_repo(repo, saltenv='base', **kwargs):
|
|||
if 'comments' in kwargs:
|
||||
mod_source.comment = " ".join(str(c) for c in kwargs['comments'])
|
||||
sources.list.append(mod_source)
|
||||
elif 'comments' in kwargs:
|
||||
mod_source.comment = " ".join(str(c) for c in kwargs['comments'])
|
||||
|
||||
for key in kwargs:
|
||||
if key in _MODIFY_OK and hasattr(mod_source, key):
|
||||
|
|
|
@ -17,6 +17,7 @@ import salt.utils.url
|
|||
import salt.crypt
|
||||
import salt.transport
|
||||
from salt.exceptions import CommandExecutionError
|
||||
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module
|
||||
|
||||
# Import 3rd-party libs
|
||||
import salt.ext.six as six
|
||||
|
@ -358,6 +359,25 @@ def cache_file(path, saltenv='base', env=None):
|
|||
# Backwards compatibility
|
||||
saltenv = env
|
||||
|
||||
contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv)
|
||||
path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp')
|
||||
try:
|
||||
if path_is_remote and contextkey in __context__:
|
||||
# Prevent multiple caches in the same salt run. Affects remote URLs
|
||||
# since the master won't know their hash, so the fileclient
|
||||
# wouldn't be able to prevent multiple caches if we try to cache
|
||||
# the remote URL more than once.
|
||||
if os.path.isfile(__context__[contextkey]):
|
||||
return __context__[contextkey]
|
||||
else:
|
||||
# File is in __context__ but no longer exists in the minion
|
||||
# cache, get rid of the context key and re-cache below.
|
||||
# Accounts for corner case where file is removed from minion
|
||||
# cache between cp.cache_file calls in the same salt-run.
|
||||
__context__.pop(contextkey)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
_mk_client()
|
||||
|
||||
path, senv = salt.utils.url.split_env(path)
|
||||
|
@ -371,6 +391,10 @@ def cache_file(path, saltenv='base', env=None):
|
|||
path, saltenv
|
||||
)
|
||||
)
|
||||
if path_is_remote:
|
||||
# Cache was successful, store the result in __context__ to prevent
|
||||
# multiple caches (see above).
|
||||
__context__[contextkey] = result
|
||||
return result
|
||||
|
||||
|
||||
|
|
|
@ -569,7 +569,7 @@ def create_container(image,
|
|||
detach=True,
|
||||
stdin_open=False,
|
||||
tty=False,
|
||||
mem_limit=0,
|
||||
mem_limit=None,
|
||||
ports=None,
|
||||
environment=None,
|
||||
dns=None,
|
||||
|
|
|
@ -3305,6 +3305,10 @@ def source_list(source, source_hash, saltenv):
|
|||
|
||||
salt '*' file.source_list salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' base
|
||||
'''
|
||||
contextkey = '{0}_|-{1}_|-{2}'.format(source, source_hash, saltenv)
|
||||
if contextkey in __context__:
|
||||
return __context__[contextkey]
|
||||
|
||||
# get the master file list
|
||||
if isinstance(source, list):
|
||||
mfiles = [(f, saltenv) for f in __salt__['cp.list_master'](saltenv)]
|
||||
|
@ -3338,10 +3342,7 @@ def source_list(source, source_hash, saltenv):
|
|||
ret = (single_src, single_hash)
|
||||
break
|
||||
elif proto.startswith('http') or proto == 'ftp':
|
||||
dest = salt.utils.mkstemp()
|
||||
fn_ = __salt__['cp.get_url'](single_src, dest)
|
||||
os.remove(fn_)
|
||||
if fn_:
|
||||
if __salt__['cp.cache_file'](single_src):
|
||||
ret = (single_src, single_hash)
|
||||
break
|
||||
elif proto == 'file' and os.path.exists(urlparsed_single_src.path):
|
||||
|
@ -3357,11 +3358,16 @@ def source_list(source, source_hash, saltenv):
|
|||
if (path, senv) in mfiles or (path, senv) in mdirs:
|
||||
ret = (single, source_hash)
|
||||
break
|
||||
urlparsed_source = _urlparse(single)
|
||||
if urlparsed_source.scheme == 'file' and os.path.exists(urlparsed_source.path):
|
||||
urlparsed_src = _urlparse(single)
|
||||
proto = urlparsed_src.scheme
|
||||
if proto == 'file' and os.path.exists(urlparsed_src.path):
|
||||
ret = (single, source_hash)
|
||||
break
|
||||
if single.startswith('/') and os.path.exists(single):
|
||||
elif proto.startswith('http') or proto == 'ftp':
|
||||
if __salt__['cp.cache_file'](single):
|
||||
ret = (single, source_hash)
|
||||
break
|
||||
elif single.startswith('/') and os.path.exists(single):
|
||||
ret = (single, source_hash)
|
||||
break
|
||||
if ret is None:
|
||||
|
@ -3369,10 +3375,11 @@ def source_list(source, source_hash, saltenv):
|
|||
raise CommandExecutionError(
|
||||
'none of the specified sources were found'
|
||||
)
|
||||
else:
|
||||
return ret
|
||||
else:
|
||||
return source, source_hash
|
||||
ret = (source, source_hash)
|
||||
|
||||
__context__[contextkey] = ret
|
||||
return ret
|
||||
|
||||
|
||||
def apply_template_on_contents(
|
||||
|
|
|
@ -119,9 +119,9 @@ def _get_config_file(user, config):
|
|||
if not uinfo:
|
||||
raise CommandExecutionError('User \'{0}\' does not exist'.format(user))
|
||||
home = uinfo['home']
|
||||
config = _expand_authorized_keys_path(config, user, home)
|
||||
if not os.path.isabs(config):
|
||||
config = os.path.join(home, config)
|
||||
config = _expand_authorized_keys_path(config, user, home)
|
||||
return config
|
||||
|
||||
|
||||
|
|
|
@ -254,7 +254,7 @@ class SaltClientsMixIn(object):
|
|||
# not the actual client we'll use.. but its what we'll use to get args
|
||||
'local_batch': local_client.cmd_batch,
|
||||
'local_async': local_client.run_job,
|
||||
'runner': salt.runner.RunnerClient(opts=self.application.opts).async,
|
||||
'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async,
|
||||
'runner_async': None, # empty, since we use the same client as `runner`
|
||||
}
|
||||
return SaltClientsMixIn.__saltclients
|
||||
|
@ -891,8 +891,6 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W
|
|||
def disbatch(self):
|
||||
'''
|
||||
Disbatch all lowstates to the appropriate clients
|
||||
|
||||
Auth must have been verified before this point
|
||||
'''
|
||||
ret = []
|
||||
|
||||
|
@ -901,16 +899,23 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W
|
|||
client = low.get('client')
|
||||
self._verify_client(client)
|
||||
|
||||
for low in self.lowstate:
|
||||
# make sure that the chunk has a token, if not we can't do auth per-request
|
||||
# Note: this means that you can send different tokens per lowstate
|
||||
# as long as the base token (to auth with the API) is valid
|
||||
if 'token' not in low:
|
||||
# Make sure we have 'token' or 'username'/'password' in each low chunk.
|
||||
# Salt will verify the credentials are correct.
|
||||
if self.token is not None and 'token' not in low:
|
||||
low['token'] = self.token
|
||||
|
||||
if not (('token' in low)
|
||||
or ('username' in low and 'password' in low and 'eauth' in low)):
|
||||
ret.append('Failed to authenticate')
|
||||
break
|
||||
|
||||
# disbatch to the correct handler
|
||||
try:
|
||||
chunk_ret = yield getattr(self, '_disbatch_{0}'.format(low['client']))(low)
|
||||
ret.append(chunk_ret)
|
||||
except EauthAuthenticationError as exc:
|
||||
ret.append('Failed to authenticate')
|
||||
break
|
||||
except Exception as ex:
|
||||
ret.append('Unexpected exception while handling request: {0}'.format(ex))
|
||||
logger.error('Unexpected exception while handling request:', exc_info=True)
|
||||
|
@ -1108,8 +1113,7 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W
|
|||
'''
|
||||
Disbatch runner client commands
|
||||
'''
|
||||
f_call = {'args': [chunk['fun'], chunk]}
|
||||
pub_data = self.saltclients['runner'](chunk['fun'], chunk)
|
||||
pub_data = self.saltclients['runner'](chunk)
|
||||
tag = pub_data['tag'] + '/ret'
|
||||
try:
|
||||
event = yield self.application.event_listener.get_event(self, tag=tag)
|
||||
|
@ -1124,8 +1128,7 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W
|
|||
'''
|
||||
Disbatch runner client_async commands
|
||||
'''
|
||||
f_call = {'args': [chunk['fun'], chunk]}
|
||||
pub_data = self.saltclients['runner'](chunk['fun'], chunk)
|
||||
pub_data = self.saltclients['runner'](chunk)
|
||||
raise tornado.gen.Return(pub_data)
|
||||
|
||||
|
||||
|
|
|
@ -16,9 +16,9 @@ from contextlib import closing
|
|||
# Import 3rd-party libs
|
||||
import salt.ext.six as six
|
||||
|
||||
# # Use salt.utils.fopen
|
||||
# Import salt libs
|
||||
from salt.exceptions import CommandExecutionError
|
||||
import salt.utils
|
||||
|
||||
# remove after archive_user deprecation.
|
||||
from salt.utils import warn_until
|
||||
|
||||
|
@ -249,14 +249,31 @@ def extracted(name,
|
|||
__env__,
|
||||
'{0}.{1}'.format(re.sub('[:/\\\\]', '_', if_missing),
|
||||
archive_format))
|
||||
|
||||
if __opts__['test']:
|
||||
source_match = source
|
||||
else:
|
||||
try:
|
||||
source_match = __salt__['file.source_list'](source,
|
||||
source_hash,
|
||||
__env__)[0]
|
||||
except CommandExecutionError as exc:
|
||||
ret['result'] = False
|
||||
ret['comment'] = exc.strerror
|
||||
return ret
|
||||
|
||||
if not os.path.exists(filename):
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = \
|
||||
'Archive {0} would have been downloaded in cache'.format(source)
|
||||
'{0} {1} would be downloaded to cache'.format(
|
||||
'One of' if not isinstance(source_match, six.string_types)
|
||||
else 'Archive',
|
||||
source_match
|
||||
)
|
||||
return ret
|
||||
|
||||
log.debug('Archive file {0} is not in cache, download it'.format(source))
|
||||
log.debug('%s is not in cache, downloading it', source_match)
|
||||
file_result = __salt__['state.single']('file.managed',
|
||||
filename,
|
||||
source=source,
|
||||
|
@ -279,17 +296,21 @@ def extracted(name,
|
|||
log.debug('failed to download {0}'.format(source))
|
||||
return file_result
|
||||
else:
|
||||
log.debug('Archive file {0} is already in cache'.format(name))
|
||||
log.debug('Archive %s is already in cache', name)
|
||||
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Archive {0} would have been extracted in {1}'.format(
|
||||
source, name)
|
||||
ret['comment'] = '{0} {1} would be extracted to {2}'.format(
|
||||
'One of' if not isinstance(source_match, six.string_types)
|
||||
else 'Archive',
|
||||
source_match,
|
||||
name
|
||||
)
|
||||
return ret
|
||||
|
||||
__salt__['file.makedirs'](name, user=user, group=group)
|
||||
|
||||
log.debug('Extract {0} in {1}'.format(filename, name))
|
||||
log.debug('Extracting {0} to {1}'.format(filename, name))
|
||||
if archive_format == 'zip':
|
||||
files = __salt__['archive.unzip'](filename, name, trim_output=trim_output, password=password)
|
||||
elif archive_format == 'rar':
|
||||
|
@ -347,7 +368,7 @@ def extracted(name,
|
|||
ret['result'] = True
|
||||
ret['changes']['directories_created'] = [name]
|
||||
ret['changes']['extracted_files'] = files
|
||||
ret['comment'] = '{0} extracted in {1}'.format(source, name)
|
||||
ret['comment'] = '{0} extracted to {1}'.format(source_match, name)
|
||||
if not keep:
|
||||
os.unlink(filename)
|
||||
if source_hash and source_hash_update:
|
||||
|
@ -356,5 +377,5 @@ def extracted(name,
|
|||
else:
|
||||
__salt__['file.remove'](if_missing)
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Can\'t extract content of {0}'.format(source)
|
||||
ret['comment'] = 'Can\'t extract content of {0}'.format(source_match)
|
||||
return ret
|
||||
|
|
|
@ -364,6 +364,10 @@ def managed(name, ppa=None, **kwargs):
|
|||
reposplit[3:] = sorted(reposplit[3:])
|
||||
if sanitizedsplit != reposplit:
|
||||
needs_update = True
|
||||
if 'comments' in kwargs:
|
||||
_line = pre[kwarg].split('#')
|
||||
if str(kwargs['comments']) not in _line:
|
||||
needs_update = True
|
||||
else:
|
||||
if str(sanitizedkwargs[kwarg]) != str(pre[kwarg]):
|
||||
needs_update = True
|
||||
|
|
|
@ -2670,13 +2670,11 @@ def update_bootstrap(config, url=None):
|
|||
'''
|
||||
Update the salt-bootstrap script
|
||||
|
||||
url can be either:
|
||||
|
||||
- The URL to fetch the bootstrap script from
|
||||
- The absolute path to the bootstrap
|
||||
- The content of the bootstrap script
|
||||
|
||||
url can be one of:
|
||||
|
||||
- The URL to fetch the bootstrap script from
|
||||
- The absolute path to the bootstrap
|
||||
- The content of the bootstrap script
|
||||
'''
|
||||
default_url = config.get('bootstrap_script_url',
|
||||
'https://bootstrap.saltstack.com')
|
||||
|
|
|
@ -1392,8 +1392,7 @@ class ExecutionOptionsMixIn(six.with_metaclass(MixInMeta, object)):
|
|||
'-u', '--update-bootstrap',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Update salt-bootstrap to the latest develop version on '
|
||||
'GitHub.'
|
||||
help='Update salt-bootstrap to the latest stable bootstrap release.'
|
||||
)
|
||||
group.add_option(
|
||||
'-y', '--assume-yes',
|
||||
|
|
|
@ -174,6 +174,7 @@ class FileModuleTest(integration.ModuleCase):
|
|||
return_value=['http/httpd.conf.fallback']),
|
||||
'cp.list_master_dirs': MagicMock(return_value=[]),
|
||||
}
|
||||
filemod.__context__ = {}
|
||||
|
||||
ret = filemod.source_list(['salt://http/httpd.conf',
|
||||
'salt://http/httpd.conf.fallback'],
|
||||
|
@ -189,6 +190,8 @@ class FileModuleTest(integration.ModuleCase):
|
|||
'cp.list_master': MagicMock(side_effect=list_master),
|
||||
'cp.list_master_dirs': MagicMock(return_value=[]),
|
||||
}
|
||||
filemod.__context__ = {}
|
||||
|
||||
ret = filemod.source_list(['salt://http/httpd.conf?saltenv=dev',
|
||||
'salt://http/httpd.conf.fallback'],
|
||||
'filehash', 'base')
|
||||
|
@ -200,6 +203,8 @@ class FileModuleTest(integration.ModuleCase):
|
|||
'cp.list_master': MagicMock(return_value=['http/httpd.conf']),
|
||||
'cp.list_master_dirs': MagicMock(return_value=[]),
|
||||
}
|
||||
filemod.__context__ = {}
|
||||
|
||||
ret = filemod.source_list(
|
||||
[{'salt://http/httpd.conf': ''}], 'filehash', 'base')
|
||||
self.assertItemsEqual(ret, ['salt://http/httpd.conf', 'filehash'])
|
||||
|
@ -210,8 +215,10 @@ class FileModuleTest(integration.ModuleCase):
|
|||
filemod.__salt__ = {
|
||||
'cp.list_master': MagicMock(return_value=[]),
|
||||
'cp.list_master_dirs': MagicMock(return_value=[]),
|
||||
'cp.get_url': MagicMock(return_value='/tmp/http.conf'),
|
||||
'cp.cache_file': MagicMock(return_value='/tmp/http.conf'),
|
||||
}
|
||||
filemod.__context__ = {}
|
||||
|
||||
ret = filemod.source_list(
|
||||
[{'http://t.est.com/http/httpd.conf': 'filehash'}], '', 'base')
|
||||
self.assertItemsEqual(ret, ['http://t.est.com/http/httpd.conf',
|
||||
|
|
|
@ -34,6 +34,10 @@ class SSHAuthKeyTestCase(TestCase):
|
|||
'/home/user')
|
||||
self.assertEqual(output, '/home//home/user')
|
||||
|
||||
output = ssh._expand_authorized_keys_path('%h/foo', 'user',
|
||||
'/home/user')
|
||||
self.assertEqual(output, '/home/user/foo')
|
||||
|
||||
output = ssh._expand_authorized_keys_path('/srv/%h/aaa/%u%%', 'user',
|
||||
'/home/user')
|
||||
self.assertEqual(output, '/srv//home/user/aaa/user%')
|
||||
|
|
|
@ -69,7 +69,7 @@ class SaltnadoTestCase(integration.ModuleCase, AsyncHTTPTestCase):
|
|||
|
||||
@property
|
||||
def opts(self):
|
||||
return self.get_config('master', from_scratch=True)
|
||||
return self.get_config('client_config', from_scratch=True)
|
||||
|
||||
@property
|
||||
def mod_opts(self):
|
||||
|
|
|
@ -65,6 +65,7 @@ class ArchiveTestCase(TestCase):
|
|||
mock_false = MagicMock(return_value=False)
|
||||
ret = {'stdout': ['saltines', 'cheese'], 'stderr': 'biscuits', 'retcode': '31337', 'pid': '1337'}
|
||||
mock_run = MagicMock(return_value=ret)
|
||||
mock_source_list = MagicMock(return_value=source)
|
||||
|
||||
with patch('os.path.exists', mock_true):
|
||||
with patch.dict(archive.__opts__, {'test': False,
|
||||
|
@ -72,7 +73,8 @@ class ArchiveTestCase(TestCase):
|
|||
with patch.dict(archive.__salt__, {'file.directory_exists': mock_false,
|
||||
'file.file_exists': mock_false,
|
||||
'file.makedirs': mock_true,
|
||||
'cmd.run_all': mock_run}):
|
||||
'cmd.run_all': mock_run,
|
||||
'file.source_list': mock_source_list}):
|
||||
filename = os.path.join(
|
||||
tmp_dir,
|
||||
'files/test/_tmp_test_archive_.tar'
|
||||
|
@ -90,10 +92,19 @@ class ArchiveTestCase(TestCase):
|
|||
Tests the call of extraction with gnutar
|
||||
'''
|
||||
gnutar = MagicMock(return_value='tar (GNU tar)')
|
||||
source = 'GNU tar'
|
||||
missing = MagicMock(return_value=False)
|
||||
nop = MagicMock(return_value=True)
|
||||
run_all = MagicMock(return_value={'retcode': 0, 'stdout': 'stdout', 'stderr': 'stderr'})
|
||||
with patch.dict(archive.__salt__, {'cmd.run': gnutar, 'file.directory_exists': missing, 'file.file_exists': missing, 'state.single': nop, 'file.makedirs': nop, 'cmd.run_all': run_all}):
|
||||
mock_source_list = MagicMock(return_value=source)
|
||||
|
||||
with patch.dict(archive.__salt__, {'cmd.run': gnutar,
|
||||
'file.directory_exists': missing,
|
||||
'file.file_exists': missing,
|
||||
'state.single': nop,
|
||||
'file.makedirs': nop,
|
||||
'cmd.run_all': run_all,
|
||||
'file.source_list': mock_source_list}):
|
||||
ret = archive.extracted('/tmp/out', '/tmp/foo.tar.gz', 'tar', tar_options='xvzf', keep=True)
|
||||
self.assertEqual(ret['changes']['extracted_files'], 'stdout')
|
||||
|
||||
|
@ -102,10 +113,19 @@ class ArchiveTestCase(TestCase):
|
|||
Tests the call of extraction with bsdtar
|
||||
'''
|
||||
bsdtar = MagicMock(return_value='tar (bsdtar)')
|
||||
source = 'bsdtar'
|
||||
missing = MagicMock(return_value=False)
|
||||
nop = MagicMock(return_value=True)
|
||||
run_all = MagicMock(return_value={'retcode': 0, 'stdout': 'stdout', 'stderr': 'stderr'})
|
||||
with patch.dict(archive.__salt__, {'cmd.run': bsdtar, 'file.directory_exists': missing, 'file.file_exists': missing, 'state.single': nop, 'file.makedirs': nop, 'cmd.run_all': run_all}):
|
||||
mock_source_list = MagicMock(return_value=source)
|
||||
|
||||
with patch.dict(archive.__salt__, {'cmd.run': bsdtar,
|
||||
'file.directory_exists': missing,
|
||||
'file.file_exists': missing,
|
||||
'state.single': nop,
|
||||
'file.makedirs': nop,
|
||||
'cmd.run_all': run_all,
|
||||
'file.source_list': mock_source_list}):
|
||||
ret = archive.extracted('/tmp/out', '/tmp/foo.tar.gz', 'tar', tar_options='xvzf', keep=True)
|
||||
self.assertEqual(ret['changes']['extracted_files'], 'stderr')
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue