Merge branch '2016.3' into 'develop'

Conflicts:
  - doc/man/salt.7
  - doc/ref/executors/all/salt.executors.direct_call.rst
  - doc/ref/executors/all/salt.executors.splay.rst
  - doc/ref/executors/all/salt.executors.sudo.rst
  - salt/cloud/clouds/vultrpy.py
This commit is contained in:
rallytime 2016-05-18 09:40:48 -06:00
commit 7bf9f5d0f5
47 changed files with 63044 additions and 60889 deletions

View file

@ -39,6 +39,9 @@
# key_logfile, pidfile:
#root_dir: /
# The path to the master's configuration file.
#conf_file: /etc/salt/master
# Directory used to store public key data:
#pki_dir: /etc/salt/pki/master
@ -65,6 +68,10 @@
# Set the number of hours to keep old job information in the job cache:
#keep_jobs: 24
# The number of seconds to wait when the client is requesting information
# about running jobs.
#gather_job_timeout: 10
# Set the default timeout for the salt command and api. The default is 5
# seconds.
#timeout: 5
@ -785,6 +792,12 @@
# LOG file of the syndic daemon:
#syndic_log_file: syndic.log
# The behaviour of the multi-syndic when connection to a master of masters failed.
# Can specify ``random`` (default) or ``ordered``. If set to ``random``, masters
# will be iterated in random order. If ``ordered`` is specified, the configured
# order will be used.
#syndic_failover: random
##### Peer Publish settings #####
##########################################

View file

@ -48,6 +48,16 @@
# of TCP connections, such as load balancers.)
# master_alive_interval: 30
# If the minion is in multi-master mode and the master_type configuration option
# is set to "failover", this setting can be set to "True" to force the minion
# to fail back to the first master in the list if the first master is back online.
#master_failback: False
# If the minion is in multi-master mode, the "master_type" configuration is set to
# "failover", and the "master_failback" option is enabled, the master failback
# interval can be set to ping the top master with this interval, in seconds.
#master_failback_interval: 0
# Set whether the minion should connect to the master via IPv6:
#ipv6: False
@ -75,6 +85,9 @@
# sock_dir, pidfile.
#root_dir: /
# The path to the minion's configuration file.
#conf_file: /etc/salt/minion
# The directory to store the pki information in
#pki_dir: /etc/salt/pki/minion
@ -85,6 +98,13 @@
# clusters.
#id:
# Cache the minion id to a file when the minion's id is not statically defined
# in the minion config. Defaults to "True". This setting prevents potential
# problems when automatic minion id resolution changes, which can cause the
# minion to lose connection with the master. To turn off minion id caching,
# set this config to ``False``.
#minion_id_caching: True
# Append a domain to a hostname in the event that it does not exist. This is
# useful for systems where socket.getfqdn() does not actually result in a
# FQDN (for instance, Solaris).
@ -309,6 +329,15 @@
# is not enabled.
# grains_cache_expiration: 300
# Determines whether or not the salt minion should run scheduled mine updates.
# Defaults to "True". Set to "False" to disable the scheduled mine updates.
#mine_enabled: True
# Determines whether or not scheduled mine updates should be accompanied by a job
# return for the job cache. Defaults to "False". Set to "True" to include job
# returns in the job cache for mine updates.
#mine_return_job: False
# Windows platforms lack posix IPC and must rely on slower TCP based inter-
# process communications. Set ipc_mode to 'tcp' on such systems
#ipc_mode: ipc

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-API" "1" "February 01, 2016" "2015.8.0" "Salt"
.TH "SALT-API" "1" "May 17, 2016" "2016.3.0" "Salt"
.SH NAME
salt-api \- salt-api Command
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-CALL" "1" "February 01, 2016" "2015.8.0" "Salt"
.TH "SALT-CALL" "1" "May 17, 2016" "2016.3.0" "Salt"
.SH NAME
salt-call \- salt-call Documentation
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-CLOUD" "1" "February 01, 2016" "2015.8.0" "Salt"
.TH "SALT-CLOUD" "1" "May 17, 2016" "2016.3.0" "Salt"
.SH NAME
salt-cloud \- Salt Cloud Command
.
@ -142,7 +142,7 @@ cloud provider carefully.
.INDENT 0.0
.TP
.B \-u, \-\-update\-bootstrap
Update salt\-bootstrap to the latest develop version on GitHub.
Update salt\-bootstrap to the latest stable bootstrap release.
.UNINDENT
.INDENT 0.0
.TP

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-CP" "1" "February 01, 2016" "2015.8.0" "Salt"
.TH "SALT-CP" "1" "May 17, 2016" "2016.3.0" "Salt"
.SH NAME
salt-cp \- salt-cp Documentation
.
@ -109,6 +109,9 @@ Logfile logging log level. One of \fBall\fP, \fBgarbage\fP, \fBtrace\fP,
\fBwarning\fP\&.
.UNINDENT
.SS Target Selection
.sp
The default matching that Salt utilizes is shell\-style globbing around the
minion id. See \fI\%https://docs.python.org/2/library/fnmatch.html#module\-fnmatch\fP\&.
.INDENT 0.0
.TP
.B \-E, \-\-pcre

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-KEY" "1" "February 01, 2016" "2015.8.0" "Salt"
.TH "SALT-KEY" "1" "May 17, 2016" "2016.3.0" "Salt"
.SH NAME
salt-key \- salt-key Documentation
.
@ -294,10 +294,10 @@ default is 2048.
.INDENT 0.0
.TP
.B \-\-gen\-signature
Create a signature file of the masters public\-key named
master_pubkey_signature. The signature can be send to a minion in the
masters auth\-reply and enables the minion to verify the masters public\-key
cryptographically. This requires a new signing\-key\- pair which can be
Create a signature file of the master\(aqs public\-key named
master_pubkey_signature. The signature can be sent to a minion in the
master\(aqs auth\-reply and enables the minion to verify the master\(aqs public\-key
cryptographically. This requires a new signing\-key\-pair which can be
auto\-created with the \-\-auto\-create parameter.
.UNINDENT
.INDENT 0.0

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-MASTER" "1" "February 01, 2016" "2015.8.0" "Salt"
.TH "SALT-MASTER" "1" "May 17, 2016" "2016.3.0" "Salt"
.SH NAME
salt-master \- salt-master Documentation
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-MINION" "1" "February 01, 2016" "2015.8.0" "Salt"
.TH "SALT-MINION" "1" "May 17, 2016" "2016.3.0" "Salt"
.SH NAME
salt-minion \- salt-minion Documentation
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-PROXY" "1" "February 01, 2016" "2015.8.0" "Salt"
.TH "SALT-PROXY" "1" "May 17, 2016" "2016.3.0" "Salt"
.SH NAME
salt-proxy \- salt-proxy Documentation
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-RUN" "1" "February 01, 2016" "2015.8.0" "Salt"
.TH "SALT-RUN" "1" "May 17, 2016" "2016.3.0" "Salt"
.SH NAME
salt-run \- salt-run Documentation
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-SSH" "1" "February 01, 2016" "2015.8.0" "Salt"
.TH "SALT-SSH" "1" "May 17, 2016" "2016.3.0" "Salt"
.SH NAME
salt-ssh \- salt-ssh Documentation
.
@ -136,52 +136,15 @@ the configuration files for Salt master and minions. The default location
on most systems is \fB/etc/salt\fP\&.
.UNINDENT
.SS Target Selection
.sp
The default matching that Salt utilizes is shell\-style globbing around the
minion id. See \fI\%https://docs.python.org/2/library/fnmatch.html#module\-fnmatch\fP\&.
.INDENT 0.0
.TP
.B \-E, \-\-pcre
The target expression will be interpreted as a PCRE regular expression
rather than a shell glob.
.UNINDENT
.INDENT 0.0
.TP
.B \-L, \-\-list
The target expression will be interpreted as a comma\-delimited list;
example: server1.foo.bar,server2.foo.bar,example7.quo.qux
.UNINDENT
.INDENT 0.0
.TP
.B \-G, \-\-grain
The target expression matches values returned by the Salt grains system on
the minions. The target expression is in the format of \(aq<grain value>:<glob
expression>\(aq; example: \(aqos:Arch*\(aq
.sp
This was changed in version 0.9.8 to accept glob expressions instead of
regular expression. To use regular expression matching with grains, use
the \-\-grain\-pcre option.
.UNINDENT
.INDENT 0.0
.TP
.B \-\-grain\-pcre
The target expression matches values returned by the Salt grains system on
the minions. The target expression is in the format of \(aq<grain value>:<
regular expression>\(aq; example: \(aqos:Arch.*\(aq
.UNINDENT
.INDENT 0.0
.TP
.B \-N, \-\-nodegroup
Use a predefined compound target defined in the Salt master configuration
file.
.UNINDENT
.INDENT 0.0
.TP
.B \-R, \-\-range
Instead of using shell globs to evaluate the target, use a range expression
to identify targets. Range expressions look like %cluster.
.sp
Using the Range option requires that a range server is set up and the
location of the range server is referenced in the master configuration
file.
.UNINDENT
.SS Logging Options
.sp
Logging options which override any settings defined on the configuration files.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-SYNDIC" "1" "February 01, 2016" "2015.8.0" "Salt"
.TH "SALT-SYNDIC" "1" "May 17, 2016" "2016.3.0" "Salt"
.SH NAME
salt-syndic \- salt-syndic Documentation
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-UNITY" "1" "February 01, 2016" "2015.8.0" "Salt"
.TH "SALT-UNITY" "1" "May 17, 2016" "2016.3.0" "Salt"
.SH NAME
salt-unity \- salt-unity Command
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT" "1" "February 01, 2016" "2015.8.0" "Salt"
.TH "SALT" "1" "May 17, 2016" "2016.3.0" "Salt"
.SH NAME
salt \- salt
.
@ -191,6 +191,9 @@ Logfile logging log level. One of \fBall\fP, \fBgarbage\fP, \fBtrace\fP,
\fBwarning\fP\&.
.UNINDENT
.SS Target Selection
.sp
The default matching that Salt utilizes is shell\-style globbing around the
minion id. See \fI\%https://docs.python.org/2/library/fnmatch.html#module\-fnmatch\fP\&.
.INDENT 0.0
.TP
.B \-E, \-\-pcre

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SPM" "1" "February 01, 2016" "2015.8.0" "Salt"
.TH "SPM" "1" "May 17, 2016" "2016.3.0" "Salt"
.SH NAME
spm \- Salt Package Manager Command
.

View file

@ -197,6 +197,19 @@ an alternative root.
:conf_master:`log_file`, :conf_master:`autosign_file`,
:conf_master:`autoreject_file`, :conf_master:`pidfile`.
.. conf_master:: conf_file
``conf_file``
-------------
Default: ``/etc/salt/master``
The path to the master's configuration file.
.. code-block:: yaml
conf_file: /etc/salt/master
.. conf_master:: pki_dir
``pki_dir``
@ -288,6 +301,22 @@ Set the number of hours to keep old job information.
keep_jobs: 24
.. conf_master:: gather_job_timeout
``gather_job_timeout``
----------------------
.. versionadded:: 2014.7.0
Default: ``10``
The number of seconds to wait when the client is requesting information
about running jobs.
.. code-block:: yaml
gather_job_timeout: 10
.. conf_master:: timeout
``timeout``
@ -490,6 +519,23 @@ Store all event returns _except_ the tags in a blacklist.
- salt/master/not_this_tag
- salt/master/or_this_one
.. conf_master:: max_event_size
``max_event_size``
------------------
.. versionadded:: 2014.7.0
Default: ``1048576``
Passing very large events can cause the minion to consume large amounts of
memory. This value tunes the maximum size of a message allowed onto the
master event bus. The value is expressed in bytes.
.. code-block:: yaml
max_event_size: 1048576
.. conf_master:: master_job_cache
``master_job_cache``
@ -591,7 +637,7 @@ what you are doing! Transports are explained in :ref:`Salt Transports
transport: zeromq
``transport_opts``
-------------
------------------
Default: ``{}``
@ -1009,6 +1055,40 @@ The renderer to use on the minions to render the state data.
renderer: yaml_jinja
.. conf_master:: jinja_trim_blocks
``jinja_trim_blocks``
---------------------
.. versionadded:: 2014.1.0
Default: ``False``
If this is set to ``True``, the first newline after a Jinja block is
removed (block, not variable tag!). Defaults to ``False`` and corresponds
to the Jinja environment init variable ``trim_blocks``.
.. code-block:: yaml
jinja_trim_blocks: False
.. conf_master:: jinja_lstrip_blocks
``jinja_lstrip_blocks``
-----------------------
.. versionadded:: 2014.1.0
Default: ``False``
If this is set to ``True``, leading spaces and tabs are stripped from the
start of a line to a block. Defaults to ``False`` and corresponds to the
Jinja environment init variable ``lstrip_blocks``.
.. code-block:: yaml
jinja_lstrip_blocks: False
.. conf_master:: failhard
``failhard``
@ -2788,9 +2868,7 @@ master, specify the higher level master port with this configuration value.
syndic_master_port: 4506
.. conf_master:: syndic_log_file
.. conf_master:: syndic_master_log_file
.. conf_master:: syndic_pidfile
``syndic_pidfile``
------------------
@ -2804,6 +2882,8 @@ master, specify the pidfile of the syndic daemon.
syndic_pidfile: syndic.pid
.. conf_master:: syndic_log_file
``syndic_log_file``
-------------------
@ -2816,6 +2896,24 @@ master, specify the log_file of the syndic daemon.
syndic_log_file: salt-syndic.log
.. master_conf:: syndic_failover
``syndic_failover``
-------------------
.. versionadded:: 2016.3.0
Default: ``random``
The behaviour of the multi-syndic when connection to a master of masters failed.
Can specify ``random`` (default) or ``ordered``. If set to ``random``, masters
will be iterated in random order. If ``ordered`` is specified, the configured
order will be used.
.. code-block:: yaml
syndic_failover: random
Peer Publish Settings
=====================

View file

@ -107,6 +107,58 @@ to manage the minion's master setting from an execution module. By simply
changing the algorithm in the module to return a new master ip/fqdn, restart
the minion and it will connect to the new master.
.. conf_minion:: max_event_size
``max_event_size``
------------------
.. versionadded:: 2014.7.0
Default: ``1048576``
Passing very large events can cause the minion to consume large amounts of
memory. This value tunes the maximum size of a message allowed onto the
minion event bus. The value is expressed in bytes.
.. code-block:: yaml
max_event_size: 1048576
.. conf_minion:: master_failback
``master_failback``
-------------------
.. versionadded:: 2016.3.0
Default: ``False``
If the minion is in multi-master mode and the :conf_minion`master_type`
configuration option is set to ``failover``, this setting can be set to ``True``
to force the minion to fail back to the first master in the list if the first
master is back online.
.. code-block:: yaml
master_failback: False
.. conf_minion:: master_failback_interval
``master_failback_interval``
----------------------------
.. versionadded:: 2016.3.0
Default: ``0``
If the minion is in multi-master mode, the :conf_minion`master_type` configuration
is set to ``failover``, and the ``master_failback`` option is enabled, the master
failback interval can be set to ping the top master with this interval, in seconds.
.. code-block:: yaml
master_failback_interval: 0
.. conf_minion:: master_alive_interval
``master_alive_interval``
@ -261,6 +313,19 @@ This directory is prepended to the following options: :conf_minion:`pki_dir`,
root_dir: /
.. conf_minion:: conf_file
``conf_file``
-------------
Default: ``/etc/salt/minion``
The path to the minion's configuration file.
.. code-block:: yaml
conf_file: /etc/salt/minion
.. conf_minion:: pki_dir
``pki_dir``
@ -294,6 +359,30 @@ ids.
id: foo.bar.com
.. conf_minion:: minion_id_caching
``minion_id_caching``
---------------------
.. versionadded:: 0.17.2
Default: ``True``
Caches the minion id to a file when the minion's :minion_conf:`id` is not
statically defined in the minion config. This setting prevents potential
problems when automatic minion id resolution changes, which can cause the
minion to lose connection with the master. To turn off minion id caching,
set this config to ``False``.
For more information, please see `Issue #7558`_ and `Pull Request #8488`_.
.. code-block:: yaml
minion_id_caching: True
.. _Issue #7558: https://github.com/saltstack/salt/issues/7558
.. _Pull Request #8488: https://github.com/saltstack/salt/pull/8488
.. conf_minion:: append_domain
``append_domain``
@ -461,6 +550,36 @@ With ``grains_deep_merge``, the result will be:
k1: v1
k2: v2
.. conf_minion:: mine_enabled
``mine_enabled``
----------------
.. versionadded:: 2016.3.0
Default: ``True``
Determines whether or not the salt minion should run scheduled mine updates.
.. code-block:: yaml
mine_enabled: True
.. conf_minion:: mine_return_job
``mine_return_job``
-------------------
.. versionadded:: 2016.3.0
Default: ``False``
Determines whether or not scheduled mine updates should be accompanied by a job
return for the job cache.
.. code-block:: yaml
mine_return_job: False
.. conf_minion:: sock_dir
@ -502,6 +621,21 @@ master.
acceptance_wait_time: 10
.. conf_minion:: acceptance_wait_time_max
``acceptance_wait_time_max``
----------------------------
Default: ``0``
The maximum number of seconds to wait until attempting to re-authenticate
with the master. If set, the wait will increase by :conf_minion:`acceptance_wait_time`
seconds each iteration.
.. code-block:: yaml
acceptance_wait_time_max: 0
.. conf_minion:: random_reauth_delay
``random_reauth_delay``
@ -520,20 +654,102 @@ parameter. The wait-time will be a random number of seconds between
random_reauth_delay: 60
.. conf_minion:: acceptance_wait_time_max
.. conf_minion:: auth_tries
``acceptance_wait_time_max``
----------------------------
``auth_tries``
--------------
Default: ``0``
.. versionadded:: 2014.7.0
The maximum number of seconds to wait until attempting to re\-authenticate
with the master. If set, the wait will increase by acceptance_wait_time
seconds each iteration.
Default: ``7``
The number of attempts to authenticate to a master before giving up. Or, more
technically, the number of consecutive SaltReqTimeoutErrors that are acceptable
when trying to authenticate to the master.
.. code-block:: yaml
acceptance_wait_time_max: 0
auth_tries: 7
.. conf_minion:: master_tries
``master_tries``
----------------
.. versionadded:: 2016.3.0
Default: ``1``
The number of attempts to connect to a master before giving up. Set this to
``-1`` for unlimited attempts. This allows for a master to have downtime and the
minion to reconnect to it later when it comes back up. In 'failover' mode, which
is set in the :conf_minion:`master_type` configuration, this value is the number
of attempts for each set of masters. In this mode, it will cycle through the list
of masters for each attempt.
``master_tries`` is different than :conf_minion:`auth_tries` because ``auth_tries``
attempts to retry auth attempts with a single master. ``auth_tries`` is under the
assumption that you can connect to the master but not gain authorization from it.
``master_tries`` will still cycle through all of the masters in a given try, so it
is appropriate if you expect occasional downtime from the master(s).
.. code-block:: yaml
master_tries: 1
.. conf_minion:: acceptance_wait_time_max
``auth_tries``
--------------
.. versionadded:: 2014.7.0
Default: ``7``
The number of attempts to authenticate to a master before giving up. Or, more
technically, the number of consecutive SaltReqTimeoutErrors that are acceptable
when trying to authenticate to the master.
.. code-block:: yaml
auth_tries: 7
.. conf_minion:: auth_timeout
``auth_timeout``
----------------
.. versionadded:: 2014.7.0
Default: ``60``
When waiting for a master to accept the minion's public key, salt will
continuously attempt to reconnect until successful. This is the timeout value,
in seconds, for each individual attempt. After this timeout expires, the minion
will wait for :conf_minion:`acceptance_wait_time` seconds before trying again.
Unless your master is under unusually heavy load, this should be left at the
default.
.. code-block:: yaml
auth_timeout: 60
.. conf_minion:: auth_safemode
``auth_safemode``
-----------------
.. versionadded:: 2014.7.0
Default: ``False``
If authentication fails due to SaltReqTimeoutError during a ping_interval,
this setting, when set to ``True``, will cause a sub-minion process to
restart.
.. code-block:: yaml
auth_safemode: False
.. conf_minion:: recon_default
@ -974,7 +1190,6 @@ environments is to isolate via the top file.
environment: None
File Directory Settings
=======================
@ -1103,6 +1318,7 @@ sha512 are also supported.
hash_type: md5
Pillar Settings
===============
@ -1249,6 +1465,7 @@ this can be set to ``True``.
always_verify_signature: True
Thread Settings
===============
@ -1267,8 +1484,6 @@ executed in a thread.
multiprocessing: True
.. _minion-logging-settings:
Minion Logging Settings

View file

@ -3,3 +3,4 @@ salt.executors.direct_call module
.. automodule:: salt.executors.direct_call
:members:

View file

@ -3,3 +3,4 @@ salt.executors.splay module
.. automodule:: salt.executors.splay
:members:

View file

@ -3,3 +3,4 @@ salt.executors.sudo module
.. automodule:: salt.executors.sudo
:members:

View file

@ -26,5 +26,6 @@ This section contains a list of the Python modules that are used to extend the v
../ref/sdb/all/index
../ref/serializers/all/index
../ref/states/all/index
../ref/thorium/all/index
../ref/tops/all/index
../ref/wheel/all/index

View file

@ -0,0 +1,17 @@
.. _all-salt.thorium:
====================================
Full list of builtin thorium modules
====================================
.. currentmodule:: salt.thorium
.. autosummary::
:toctree:
:template: autosummary.rst.tmpl
check
file
local
reg
timer

View file

@ -0,0 +1,6 @@
salt.thorium.check module
=========================
.. automodule:: salt.thorium.check
:members:

View file

@ -0,0 +1,6 @@
salt.thorium.file module
========================
.. automodule:: salt.thorium.file
:members:

View file

@ -0,0 +1,6 @@
salt.thorium.local module
=========================
.. automodule:: salt.thorium.local
:members:

View file

@ -0,0 +1,6 @@
salt.thorium.reg module
=======================
.. automodule:: salt.thorium.reg
:members:

View file

@ -0,0 +1,6 @@
salt.thorium.timer module
=========================
.. automodule:: salt.thorium.timer
:members:

View file

@ -10,12 +10,34 @@ Changes for v2015.8.8..v2015.8.9
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
*Generated at: 2016-05-13T19:31:51Z*
*Generated at: 2016-05-17T17:09:39Z*
Total Merges: **142**
Total Merges: **145**
Changes:
- **PR** `#33293`_: (*twangboy*) Fix minion start retry on Windows (2015.8)
* 22c4331 linux_acl: Allow '-' as a separation character in ACL permissions. Fixes `#31270`_ (`#33172`_) (`#33305`_)
* 7a181f2 Handle more ipv6 error as an exception `#33299`_ (`#33300`_)
* eb47a15 Ignore retcode when checking service's status (`#33294`_)
- **PR** `#33274`_: (*rallytime*) [2015.8] Merge forward from 2015.5 to 2015.8
* 4f3596a Add comment for test=true w/o changes ret and add changes dict example (`#33254`_)
* 2a30c48 Update Git Policy docs to match Contribution guide (`#33252`_)
* 056c273 Fix `#33238`_ (`#33239`_)
* 1cd34ab Properly report on invalid gitfs/git_pillar/winrepo repos (`#33245`_)
- **PR** `#33253`_: (*rallytime*) Update the release process docs
* 8c2c5b1 update 2015.8.9 release notes (`#33251`_)
* 8ee8ee3 Handle ipv6 error as an exception (`#33246`_)
* 855bed3 Check rendered YAML for invalid keys (`#33213`_)
@ -524,6 +546,7 @@ Changes:
.. _`#32223`: https://github.com/saltstack/salt/pull/32223
.. _`#32227`: https://github.com/saltstack/salt/pull/32227
.. _`#32230`: https://github.com/saltstack/salt/pull/32230
.. _`#32238`: https://github.com/saltstack/salt/pull/32238
.. _`#32239`: https://github.com/saltstack/salt/pull/32239
.. _`#32243`: https://github.com/saltstack/salt/pull/32243
.. _`#32248`: https://github.com/saltstack/salt/pull/32248
@ -709,6 +732,7 @@ Changes:
.. _`#33129`: https://github.com/saltstack/salt/pull/33129
.. _`#33132`: https://github.com/saltstack/salt/pull/33132
.. _`#33135`: https://github.com/saltstack/salt/pull/33135
.. _`#33137`: https://github.com/saltstack/salt/pull/33137
.. _`#33139`: https://github.com/saltstack/salt/pull/33139
.. _`#33140`: https://github.com/saltstack/salt/pull/33140
.. _`#33141`: https://github.com/saltstack/salt/pull/33141
@ -722,6 +746,7 @@ Changes:
.. _`#33160`: https://github.com/saltstack/salt/pull/33160
.. _`#33161`: https://github.com/saltstack/salt/pull/33161
.. _`#33164`: https://github.com/saltstack/salt/pull/33164
.. _`#33172`: https://github.com/saltstack/salt/pull/33172
.. _`#33178`: https://github.com/saltstack/salt/pull/33178
.. _`#33180`: https://github.com/saltstack/salt/pull/33180
.. _`#33181`: https://github.com/saltstack/salt/pull/33181
@ -740,5 +765,18 @@ Changes:
.. _`#33219`: https://github.com/saltstack/salt/pull/33219
.. _`#33222`: https://github.com/saltstack/salt/pull/33222
.. _`#33224`: https://github.com/saltstack/salt/pull/33224
.. _`#33236`: https://github.com/saltstack/salt/pull/33236
.. _`#33237`: https://github.com/saltstack/salt/pull/33237
.. _`#33239`: https://github.com/saltstack/salt/pull/33239
.. _`#33244`: https://github.com/saltstack/salt/pull/33244
.. _`#33245`: https://github.com/saltstack/salt/pull/33245
.. _`#33246`: https://github.com/saltstack/salt/pull/33246
.. _`#33251`: https://github.com/saltstack/salt/pull/33251
.. _`#33252`: https://github.com/saltstack/salt/pull/33252
.. _`#33253`: https://github.com/saltstack/salt/pull/33253
.. _`#33254`: https://github.com/saltstack/salt/pull/33254
.. _`#33274`: https://github.com/saltstack/salt/pull/33274
.. _`#33293`: https://github.com/saltstack/salt/pull/33293
.. _`#33294`: https://github.com/saltstack/salt/pull/33294
.. _`#33300`: https://github.com/saltstack/salt/pull/33300
.. _`#33305`: https://github.com/saltstack/salt/pull/33305

View file

@ -327,10 +327,12 @@ def create(vm_):
def create_lb(kwargs=None, call=None):
'''
r'''
Create a load-balancer configuration.
CLI Example:
.. code-block:: bash
salt-cloud -f create_lb dimensiondata \
name=dev-lb port=80 protocol=http \
members=w1,w2,w3 algorithm=ROUND_ROBIN
@ -455,10 +457,14 @@ def ssh_interface(vm_):
def stop(name, call=None):
'''
Stop a VM in DimensionData.
name
name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
conn = get_conn()
@ -474,10 +480,14 @@ def stop(name, call=None):
def start(name, call=None):
'''
Stop a VM in DimensionData.
name
:param str name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''

View file

@ -53,6 +53,8 @@ log = logging.getLogger(__name__)
__virtualname__ = 'vultr'
DETAILS = {}
def __virtual__():
'''
@ -75,6 +77,31 @@ def get_configured_provider():
)
def _cache_provider_details(conn=None):
'''
Provide a place to hang onto results of --list-[locations|sizes|images]
so we don't have to go out to the API and get them every time.
'''
DETAILS['avail_locations'] = {}
DETAILS['avail_sizes'] = {}
DETAILS['avail_images'] = {}
locations = avail_locations(conn)
images = avail_images(conn)
sizes = avail_sizes(conn)
for key, location in locations.iteritems():
DETAILS['avail_locations'][location['name']] = location
DETAILS['avail_locations'][key] = location
for key, image in images.iteritems():
DETAILS['avail_images'][image['name']] = image
DETAILS['avail_images'][key] = image
for key, vm_size in sizes.iteritems():
DETAILS['avail_sizes'][vm_size['name']] = vm_size
DETAILS['avail_sizes'][key] = vm_size
def avail_locations(conn=None):
'''
return available datacenter locations
@ -89,7 +116,7 @@ def avail_sizes(conn=None):
return _query('plans/list')
def avail_images():
def avail_images(conn=None):
'''
Return available images
'''
@ -146,7 +173,10 @@ def destroy(name):
'''
node = show_instance(name, call='action')
params = {'SUBID': node['SUBID']}
return _query('server/destroy', method='POST', decode=False, data=urllib.urlencode(params))
result = _query('server/destroy', method='POST', decode=False, data=urllib.urlencode(params))
if result['body'] == '' and result['text'] == '':
return True
return result
def stop(*args, **kwargs):
@ -180,6 +210,17 @@ def show_instance(name, call=None):
return nodes[name]
def _lookup_vultrid(which_key, availkey, keyname):
if DETAILS == {}:
_cache_provider_details()
which_key = str(which_key)
try:
return DETAILS[availkey][which_key][keyname]
except KeyError:
return False
def create(vm_):
'''
Create a single VM from a data dict
@ -211,11 +252,26 @@ def create(vm_):
transport=__opts__['transport']
)
osid = _lookup_vultrid(vm_['image'], 'avail_images', 'OSID')
if not osid:
log.error('Vultr does not have an image with id or name {0}'.format(vm_['image']))
return False
vpsplanid = _lookup_vultrid(vm_['size'], 'avail_sizes', 'VPSPLANID')
if not vpsplanid:
log.error('Vultr does not have a size with id or name {0}'.format(vm_['size']))
return False
dcid = _lookup_vultrid(vm_['location'], 'avail_locations', 'DCID')
if not dcid:
log.error('Vultr does not have a location with id or name {0}'.format(vm_['location']))
return False
kwargs = {
'label': vm_['name'],
'OSID': vm_['image'],
'VPSPLANID': vm_['size'],
'DCID': vm_['location'],
'OSID': osid,
'VPSPLANID': vpsplanid,
'DCID': dcid,
'hostname': vm_['name'],
'enable_private_network': enable_private_network,
}
@ -232,6 +288,20 @@ def create(vm_):
try:
data = _query('server/create', method='POST', data=urllib.urlencode(kwargs))
if int(data.get('status', '200')) >= 300:
log.error('Error creating {0} on Vultr\n\n'
'Vultr API returned {1}\n'.format(vm_['name'], data))
log.error('Status 412 may mean that you are requesting an\n'
'invalid location, image, or size.')
salt.utils.cloud.fire_event(
'event',
'instance request failed',
'salt/cloud/{0}/requesting/failed'.format(vm_['name']),
{'kwargs': kwargs},
transport=__opts__['transport'],
)
return False
except Exception as exc:
log.error(
'Error creating {0} on Vultr\n\n'
@ -242,6 +312,13 @@ def create(vm_):
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
salt.utils.cloud.fire_event(
'event',
'instance request failed',
'salt/cloud/{0}/requesting/failed'.format(vm_['name']),
{'kwargs': kwargs},
transport=__opts__['transport'],
)
return False
def wait_for_hostname():
@ -353,8 +430,7 @@ def _query(path, method='GET', data=None, params=None, header_dict=None, decode=
hide_fields=['api_key'],
opts=__opts__,
)
if 'dict' in result:
return result['dict']
return
return result

View file

@ -55,8 +55,10 @@ if salt.utils.is_windows():
# support in ZeroMQ, we want the default to be something that has a
# chance of working.
_DFLT_IPC_MODE = 'tcp'
_MASTER_TRIES = -1
else:
_DFLT_IPC_MODE = 'ipc'
_MASTER_TRIES = 1
FLO_DIR = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
@ -1019,7 +1021,7 @@ DEFAULT_MINION_OPTS = {
'transport': 'zeromq',
'auth_timeout': 60,
'auth_tries': 7,
'master_tries': 1,
'master_tries': _MASTER_TRIES,
'auth_safemode': False,
'random_master': False,
'minion_floscript': os.path.join(FLO_DIR, 'minion.flo'),

View file

@ -2,14 +2,15 @@
'''
An engine that reads messages from Slack and sends them to the Salt
event bus. Alternatively Salt commands can be sent to the Salt master
via Slack by setting the control paramter to True and using command
prefaced with a !.
via Slack by setting the control parameter to ``True`` and using command
prefaced with a ``!``.
.. versionadded: 2016.3.0
:configuration:
:configuration: Example configuration
.. code-block:: yaml
Example configuration
engines:
slack:
token: 'xoxb-xxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxx'

View file

@ -66,20 +66,24 @@ class SplayExecutor(ModuleExecutorBase):
Splay a salt function call execution time across minions over
a number of seconds (default: 600)
NOTE: You *probably* want to use --async here and look up the job results later.
If you're dead set on getting the output from the CLI command, then make
sure to set the timeout (with the -t flag) to something greater than the
splaytime (max splaytime + time to execute job).
Otherwise, it's very likely that the cli will time out before the job returns.
.. note::
You *probably* want to use --async here and look up the job results later.
If you're dead set on getting the output from the CLI command, then make
sure to set the timeout (with the -t flag) to something greater than the
splaytime (max splaytime + time to execute job).
Otherwise, it's very likely that the cli will time out before the job returns.
CLI Example:
# With default splaytime
salt --async '*' splay.splay pkg.install cowsay version=3.03-8.el6
# With specified splaytime (5 minutes) and timeout with 10 second buffer
salt -t 310 '*' splay.splay 300 pkg.version cowsay
.. code-block:: bash
# With default splaytime
salt --async '*' splay.splay pkg.install cowsay version=3.03-8.el6
.. code-block:: bash
# With specified splaytime (5 minutes) and timeout with 10 second buffer
salt -t 310 '*' splay.splay 300 pkg.version cowsay
'''
my_delay = self._calc_splay(__grains__['id'], splaytime=self.splaytime)
log.debug("Splay is sleeping {0} secs on {1}".format(my_delay, self.fun_name))

View file

@ -282,10 +282,11 @@ def cache_make(dev, reserved=None, force=False, block_size=None, bucket_size=Non
salt '*' bcache.cache_make sdb reserved=10% block_size=4096
:param reserved: if dev is a full device, create a partitition table with this size empty.
.. note::
this increases the amount of reserved space available to SSD garbage collectors,
potentially (vastly) increasing performance
:param reserved: if dev is a full device, create a partition table with this size empty.
.. note::
this increases the amount of reserved space available to SSD garbage collectors,
potentially (vastly) increasing performance
:param block_size: Block size of the cache; defaults to devices' logical block size
:param force: Overwrite existing BCache sets
:param attach: Attach all existing backend devices immediately

View file

@ -54,7 +54,9 @@ def jobs():
Return a list of the currently installed job names.
CLI Example:
.. code-block:: bash
salt chronos-minion-id chronos.jobs
'''
job_names = _jobs().keys()
@ -67,7 +69,9 @@ def has_job(name):
Return whether the given job is currently configured.
CLI Example:
.. code-block:: bash
salt chronos-minion-id chronos.has_job my-job
'''
return name in _jobs()
@ -78,7 +82,9 @@ def job(name):
Return the current server configuration for the specified job.
CLI Example:
.. code-block:: bash
salt chronos-minion-id chronos.job my-job
'''
jobs = _jobs()
@ -92,7 +98,9 @@ def update_job(name, config):
Update the specified job with the given configuration.
CLI Example:
.. code-block:: bash
salt chronos-minion-id chronos.update_job my-job '<config yaml>'
'''
if 'name' not in config:
@ -123,7 +131,9 @@ def rm_job(name):
Remove the specified job from the server.
CLI Example:
.. code-block:: bash
salt chronos-minion-id chronos.rm_job my-job
'''
response = salt.utils.http.query(

View file

@ -692,10 +692,10 @@ def run(cmd,
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The current working directory to execute the command in,
defaults to `/root` (`C:\` in windows)
defaults to ``/root`` (``C:\`` in windows)
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases

View file

@ -236,7 +236,7 @@ def status(name, sig=None):
if sig:
return bool(__salt__['status.pid'](sig))
cmd = _service_cmd(name, 'status')
return not __salt__['cmd.retcode'](cmd)
return not __salt__['cmd.retcode'](cmd, ignore_retcode=True)
def _osrel():

View file

@ -16,10 +16,11 @@ This module allows to deal with docker-compose file in a directory.
This is a first version only, the following commands are missing at the moment
but will be built later on if the community is interested in this module:
- run
- logs
- port
- scale
- run
- logs
- port
- scale
Installation Prerequisites
--------------------------
@ -43,6 +44,7 @@ can issue the command create, it takes two arguments the path where the
docker-compose.yml will be stored and the content of this latter:
.. code-block:: bash
# salt-call -l debug dockercompose.create /tmp/toto '
database:
image: mongo:3.0
@ -54,25 +56,25 @@ argument (the path where the docker-compose.yml will be read) and an optional
python list which corresponds to the services names:
.. code-block:: bash
# salt-call -l debug dockercompose.up /tmp/toto
# salt-call -l debug dockercompose.restart /tmp/toto '[database]'
# salt-call -l debug dockercompose.stop /tmp/toto
# salt-call -l debug dockercompose.rm /tmp/toto
Docker-compose method supported
-------------------------------
- up
- restart
- stop
- start
- pause
- unpause
- kill
- rm
- ps
- pull
- build
- up
- restart
- stop
- start
- pause
- unpause
- kill
- rm
- ps
- pull
- build
Functions
---------

View file

@ -46,7 +46,7 @@ def _cmd():
'''
service = salt.utils.which('service')
if not service:
raise CommandNotFoundError
raise CommandNotFoundError('\'service\' command not found')
return service
@ -376,4 +376,6 @@ def status(name, sig=None):
if sig:
return bool(__salt__['status.pid'](sig))
cmd = '{0} {1} onestatus'.format(_cmd(), name)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
return not __salt__['cmd.retcode'](cmd,
python_shell=False,
ignore_retcode=True)

View file

@ -115,7 +115,7 @@ def status(name, sig=None):
if sig:
return bool(__salt__['status.pid'](sig))
cmd = '/etc/rc.d/{0} onestatus'.format(name)
return not __salt__['cmd.retcode'](cmd)
return not __salt__['cmd.retcode'](cmd, ignore_retcode=True)
def _get_svc(rcd, service_status):

View file

@ -105,7 +105,7 @@ def status(name, sig=None):
if sig:
return bool(__salt__['status.pid'](sig))
cmd = '/etc/rc.d/{0} -f check'.format(name)
return not __salt__['cmd.retcode'](cmd)
return not __salt__['cmd.retcode'](cmd, ignore_retcode=True)
def reload_(name):

View file

@ -49,6 +49,7 @@ Example:
# Import python libs
from __future__ import absolute_import
import json
import logging
# Import 3rd-party libs

View file

@ -1869,9 +1869,12 @@ def scp_file(dest_path, contents=None, kwargs=None, local_file=None):
)
)
if socket.inet_pton(socket.AF_INET6, kwargs['hostname']):
ipaddr = '[{0}]'.format(kwargs['hostname'])
else:
try:
if socket.inet_pton(socket.AF_INET6, kwargs['hostname']):
ipaddr = '[{0}]'.format(kwargs['hostname'])
else:
ipaddr = kwargs['hostname']
except socket.error:
ipaddr = kwargs['hostname']
cmd = (
@ -1978,9 +1981,12 @@ def sftp_file(dest_path, contents=None, kwargs=None, local_file=None):
)
)
if socket.inet_pton(socket.AF_INET6, kwargs['hostname']):
ipaddr = '[{0}]'.format(kwargs['hostname'])
else:
try:
if socket.inet_pton(socket.AF_INET6, kwargs['hostname']):
ipaddr = '[{0}]'.format(kwargs['hostname'])
else:
ipaddr = kwargs['hostname']
except socket.error:
ipaddr = kwargs['hostname']
cmd = 'echo "put {0} {1} {2}" | sftp {3} {4[username]}@{5}'.format(

View file

@ -0,0 +1,195 @@
# -*- coding: utf-8 -*-
'''
Integration tests for Vultr
'''
# Import Python Libs
from __future__ import absolute_import
import os
import random
import string
import time
# Import Salt Testing Libs
from salttesting.helpers import ensure_in_syspath, expensiveTest
ensure_in_syspath('../../../')
# Import Salt Libs
import integration
from salt.config import cloud_providers_config
# Import 3rd-party libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
def __random_name(size=6):
'''
Generates a random cloud instance name
'''
return 'CLOUD-TEST-' + ''.join(
random.choice(string.ascii_uppercase + string.digits)
for x in range(size)
)
# Create the cloud instance name to be used throughout the tests
INSTANCE_NAME = __random_name()
PROVIDER_NAME = 'vultr'
class VultrTest(integration.ShellCase):
'''
Integration tests for the Vultr cloud provider in Salt-Cloud
'''
@expensiveTest
def setUp(self):
'''
Sets up the test requirements
'''
super(VultrTest, self).setUp()
# check if appropriate cloud provider and profile files are present
profile_str = 'vultr-config'
providers = self.run_cloud('--list-providers')
if profile_str + ':' not in providers:
self.skipTest(
'Configuration file for {0} was not found. Check {0}.conf files '
'in tests/integration/files/conf/cloud.*.d/ to run these tests.'
.format(PROVIDER_NAME)
)
# check if api_key, ssh_key_file, and ssh_key_names are present
config = cloud_providers_config(
os.path.join(
integration.FILES,
'conf',
'cloud.providers.d',
PROVIDER_NAME + '.conf'
)
)
api_key = config[profile_str][PROVIDER_NAME]['api_key']
ssh_file = config[profile_str][PROVIDER_NAME]['ssh_key_file']
ssh_name = config[profile_str][PROVIDER_NAME]['ssh_key_name']
if api_key == '' or ssh_file == '' or ssh_name == '':
self.skipTest(
'An API key, an ssh key file, and an ssh key name '
'must be provided to run these tests. Check '
'tests/integration/files/conf/cloud.providers.d/{0}.conf'
.format(PROVIDER_NAME)
)
def test_list_images(self):
'''
Tests the return of running the --list-images command for Vultr
'''
image_list = self.run_cloud('--list-images {0}'.format(PROVIDER_NAME))
self.assertIn(
'Debian 8 x64 (jessie)',
[i.strip() for i in image_list]
)
def test_list_locations(self):
'''
Tests the return of running the --list-locations command for Vultr
'''
location_list = self.run_cloud('--list-locations {0}'.format(PROVIDER_NAME))
self.assertIn(
'New Jersey',
[i.strip() for i in location_list]
)
def test_list_sizes(self):
'''
Tests the return of running the --list-sizes command for Vultr
'''
size_list = self.run_cloud('--list-sizes {0}'.format(PROVIDER_NAME))
self.assertIn(
'32768 MB RAM,110 GB SSD,40.00 TB BW',
[i.strip() for i in size_list]
)
# Commented for now, Vultr driver does not yet support key management
# def test_key_management(self):
# '''
# Test key management
# '''
# pub = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example'
# finger_print = '3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa'
#
# _key = self.run_cloud('-f create_key {0} name="MyPubKey" public_key="{1}"'.format(PROVIDER_NAME, pub))
#
# # Upload public key
# self.assertIn(
# finger_print,
# [i.strip() for i in _key]
# )
#
# try:
# # List all keys
# list_keypairs = self.run_cloud('-f list_keypairs {0}'.format(PROVIDER_NAME))
#
# self.assertIn(
# finger_print,
# [i.strip() for i in list_keypairs]
# )
#
# # List key
# show_keypair = self.run_cloud('-f show_keypair {0} keyname={1}'.format(PROVIDER_NAME, 'MyPubKey'))
#
# self.assertIn(
# finger_print,
# [i.strip() for i in show_keypair]
# )
# except AssertionError:
# # Delete the public key if the above assertions fail
# self.run_cloud('-f remove_key {0} id={1}'.format(PROVIDER_NAME, finger_print))
# raise
#
# # Delete public key
# self.assertTrue(self.run_cloud('-f remove_key {0} id={1}'.format(PROVIDER_NAME, finger_print)))
def test_instance(self):
'''
Test creating an instance on Vultr
'''
# check if instance with salt installed returned
try:
self.assertIn(
INSTANCE_NAME,
[i.strip() for i in self.run_cloud('-p vultr-test {0}'.format(INSTANCE_NAME))]
)
except AssertionError:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME))
raise
# Vultr won't let us delete an instance less than 5 minutes old.
time.sleep(420)
# delete the instance
results = self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME))
try:
self.assertIn(
'True',
[i.strip() for i in results]
)
except AssertionError:
raise
# Final clean-up of created instance, in case something went wrong.
# This was originally in a tearDown function, but that didn't make sense
# To run this for each test when not all tests create instances.
# Also, Vultr won't let instances be deleted unless they have been alive for 5 minutes.
# If we exceed 6 minutes and the instance is still there, quit
ct = 0
while ct < 12 and INSTANCE_NAME in [i.strip() for i in self.run_cloud('--query')]:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME))
time.sleep(30)
ct = ct + 1
if __name__ == '__main__':
from integration import run_tests
run_tests(VultrTest)

View file

@ -0,0 +1,6 @@
vultr-config:
provider: vultr
api_key: asdfhlkasdjfklsdfj;slkdfjas;dlkj
ssh_key_file: '/root/.ssh/vultr_test.pub'
ssh_key_name: 'vultr_test'
location: New Jersey