mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch '2015.8' into '2016.3'
Conflicts: - conf/minion - doc/ref/configuration/minion.rst - salt/modules/boto_elb.py - salt/modules/sudo.py - salt/states/boto_elb.py - salt/transport/zeromq.py
This commit is contained in:
commit
35ad788470
35 changed files with 924 additions and 56 deletions
16
conf/minion
16
conf/minion
|
@ -70,11 +70,15 @@
|
|||
# The user to run salt.
|
||||
#user: root
|
||||
|
||||
# Setting sudo_user will cause salt to run all execution modules under an sudo
|
||||
# to the user given in sudo_user. The user under which the salt minion process
|
||||
# itself runs will still be that provided in the user config above, but all
|
||||
# execution modules run by the minion will be rerouted through sudo.
|
||||
#sudo_user: saltdev
|
||||
# The user to run salt remote execution commands as via sudo. If this option is
|
||||
# enabled then sudo will be used to change the active user executing the remote
|
||||
# command. If enabled the user will need to be allowed access via the sudoers
|
||||
# file for the user that the salt minion is configured to run as. The most
|
||||
# common option would be to use the root user. If this option is set the user
|
||||
# option should also be set to a non-root user. If migrating from a root minion
|
||||
# to a non root minion the minion cache should be cleared and the minion pki
|
||||
# directory will need to be changed to the ownership of the new user.
|
||||
#sudo_user: root
|
||||
|
||||
# Specify the location of the daemon process ID file.
|
||||
#pidfile: /var/run/salt-minion.pid
|
||||
|
@ -323,7 +327,7 @@
|
|||
# Determines whether or not the salt minion should run scheduled mine updates.
|
||||
# Defaults to "True". Set to "False" to disable the scheduled mine updates
|
||||
# (this essentially just does not add the mine update function to the minion's
|
||||
# scheduler)
|
||||
# scheduler).
|
||||
#mine_enabled: True
|
||||
|
||||
# Determines whether or not scheduled mine updates should be accompanied by a job
|
||||
|
|
|
@ -195,7 +195,7 @@ project = 'Salt'
|
|||
copyright = '2016 SaltStack, Inc.'
|
||||
|
||||
version = salt.version.__version__
|
||||
latest_release = '2015.8.8' # latest release
|
||||
latest_release = '2015.8.9' # latest release
|
||||
previous_release = '2015.5.10' # latest release from previous branch
|
||||
previous_release_dir = '2015.5' # path on web server for previous branch
|
||||
next_release = '2016.3.0' # latest release from previous branch
|
||||
|
|
19
doc/faq.rst
19
doc/faq.rst
|
@ -358,3 +358,22 @@ When possible, you should target sensitive operations and data using the Minion
|
|||
ID. If the Minion ID of a system changes, the Salt Minion's public key must be
|
||||
re-accepted by an administrator on the Salt Master, making it less vulnerable
|
||||
to impersonation attacks.
|
||||
|
||||
Why Did the Value for a Grain Change on Its Own?
|
||||
------------------------------------------------
|
||||
|
||||
This is usually the result of an upstream change in an OS distribution that
|
||||
replaces or removes something that Salt was using to detect the grain.
|
||||
Fortunately, when this occurs, you can use Salt to fix it with a command
|
||||
similar to the following:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt -G 'grain:ChangedValue' grains.setvals "{'grain': 'OldValue'}"
|
||||
|
||||
(Replacing *grain*, *ChangedValue*, and *OldValue* with
|
||||
the grain and values that you want to change / set.)
|
||||
|
||||
You should also `file an issue <https://github.com/saltstack/salt/issues>`_
|
||||
describing the change so it can be fixed in Salt.
|
||||
|
||||
|
|
|
@ -295,7 +295,8 @@ Verify and set permissions on configuration directories at startup.
|
|||
|
||||
Default: ``24``
|
||||
|
||||
Set the number of hours to keep old job information.
|
||||
Set the number of hours to keep old job information. Note that setting this option
|
||||
to ``0`` disables the cache cleaner.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -407,13 +408,33 @@ grains for the master.
|
|||
|
||||
Default: ``True``
|
||||
|
||||
The master maintains a job cache. While this is a great addition, it can be
|
||||
a burden on the master for larger deployments (over 5000 minions).
|
||||
The master maintains a temporary job cache. While this is a great addition, it
|
||||
can be a burden on the master for larger deployments (over 5000 minions).
|
||||
Disabling the job cache will make previously executed jobs unavailable to
|
||||
the jobs system and is not generally recommended. Normally it is wise to make
|
||||
sure the master has access to a faster IO system or a tmpfs is mounted to the
|
||||
jobs dir.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
job_cache: True
|
||||
|
||||
.. note::
|
||||
|
||||
Setting the ``job_cache`` to ``False`` will not cache minion returns, but
|
||||
the JID directory for each job is still created. The creation of the JID
|
||||
directories is necessary because Salt uses those directories to check for
|
||||
JID collisions. By setting this option to ``False``, the job cache
|
||||
directory, which is ``/var/cache/salt/master/jobs/`` by default, will be
|
||||
smaller, but the JID directories will still be present.
|
||||
|
||||
Note that the :conf_master:`keep_jobs` option can be set to a lower value,
|
||||
such as ``1``, to limit the number of hours jobs are stored in the job
|
||||
cache. (The default is 24 hours.)
|
||||
|
||||
Please see the :ref:`Managing the Job Cache <managing_the_job_cache>`
|
||||
documentation for more information.
|
||||
|
||||
.. conf_master:: minion_data_cache
|
||||
|
||||
``minion_data_cache``
|
||||
|
|
|
@ -248,12 +248,12 @@ The user to run the Salt processes
|
|||
|
||||
user: root
|
||||
|
||||
.. conf_minion:: sudo_runas
|
||||
.. conf_minion:: sudo_user
|
||||
|
||||
``sudo_runas``
|
||||
``sudo_user``
|
||||
--------------
|
||||
|
||||
Default: None
|
||||
Default: ``''``
|
||||
|
||||
The user to run salt remote execution commands as via sudo. If this option is
|
||||
enabled then sudo will be used to change the active user executing the remote
|
||||
|
@ -268,24 +268,6 @@ need to be changed to the ownership of the new user.
|
|||
|
||||
sudo_user: root
|
||||
|
||||
.. conf_minion:: sudo_user
|
||||
|
||||
``sudo_user``
|
||||
-------------
|
||||
|
||||
Default: ``''``
|
||||
|
||||
Setting ``sudo_user`` will cause salt to run all execution modules under a
|
||||
sudo to the user given in ``sudo_user``. The user under which the salt minion
|
||||
process itself runs will still be that provided in :conf_minion:`user` above,
|
||||
but all execution modules run by the minion will be rerouted through sudo.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
sudo_user: saltadm
|
||||
|
||||
.. conf_minion:: pidfile
|
||||
|
||||
|
||||
``pidfile``
|
||||
-----------
|
||||
|
@ -506,7 +488,6 @@ to enable set grains_cache to ``True``.
|
|||
|
||||
grains_cache: False
|
||||
|
||||
|
||||
.. conf_minion:: grains_deep_merge
|
||||
|
||||
``grains_deep_merge``
|
||||
|
@ -555,7 +536,7 @@ With ``grains_deep_merge``, the result will be:
|
|||
``mine_enabled``
|
||||
----------------
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
.. versionadded:: 2015.8.10
|
||||
|
||||
Default: ``True``
|
||||
|
||||
|
@ -571,7 +552,7 @@ False then the mine update function will not get added to the scheduler for the
|
|||
``mine_return_job``
|
||||
-------------------
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
.. versionadded:: 2015.8.10
|
||||
|
||||
Default: ``False``
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
.. _managing_the_job_cache:
|
||||
|
||||
======================
|
||||
Managing the Job Cache
|
||||
======================
|
||||
|
@ -27,6 +29,37 @@ Salt Master configuration file. The value passed in is measured via hours:
|
|||
|
||||
keep_jobs: 24
|
||||
|
||||
Reducing the Size of the Default Job Cache
|
||||
------------------------------------------
|
||||
|
||||
The Default Job Cache can sometimes be a burden on larger deployments (over 5000
|
||||
minions). Disabling the job cache will make previously executed jobs unavailable
|
||||
to the jobs system and is not generally recommended. Normally it is wise to make
|
||||
sure the master has access to a faster IO system or a tmpfs is mounted to the
|
||||
jobs dir.
|
||||
|
||||
However, you can disable the :conf_master:`job_cache` by setting it to ``False``
|
||||
in the Salt Master configuration file. Setting this value to ``False`` means that
|
||||
the Salt Master will no longer cache minion returns, but a JID directory and ``jid``
|
||||
file for each job will still be created. This JID directory is necessary for
|
||||
checking for and preventing JID collisions.
|
||||
|
||||
The default location for the job cache is in the ``/var/cache/salt/master/jobs/``
|
||||
directory.
|
||||
|
||||
Setting the :conf_master:`job_cache`` to ``False`` in addition to setting
|
||||
the :conf_master:`keep_jobs` option to a smaller value, such as ``1``, in the Salt
|
||||
Master configuration file will reduce the size of the Default Job Cache, and thus
|
||||
the burden on the Salt Master.
|
||||
|
||||
.. note::
|
||||
|
||||
Changing the ``keep_jobs`` option sets the number of hours to keep old job
|
||||
information and defaults to ``24`` hours. Do not set this value to ``0`` when
|
||||
trying to make the cache cleaner run more frequently, as this means the cache
|
||||
cleaner will never run.
|
||||
|
||||
|
||||
Additional Job Cache Options
|
||||
============================
|
||||
|
||||
|
|
|
@ -5,6 +5,18 @@ Salt 2015.8.9 Release Notes
|
|||
Version 2015.8.9 is a bugfix release for :doc:`2015.8.0
|
||||
</topics/releases/2015.8.0>`.
|
||||
|
||||
.. admonition:: Mint Linux: Important Post-Upgrade Instructions
|
||||
|
||||
As a result of some upstream changes, the ``os`` grain on Mint Linux is now
|
||||
being detected as ``LinuxMint`` (:issue:`33295`). Run the following command
|
||||
**after you upgrade to 2015.8.9** to reset the ``os`` grain to ``Mint`` and
|
||||
the ``os_family`` grain to ``Debian``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt -G 'os:LinuxMint' grains.setvals "{'os': 'Mint', 'os_family': 'Debian'}"
|
||||
|
||||
|
||||
Changes for v2015.8.8..v2015.8.9
|
||||
--------------------------------
|
||||
|
||||
|
|
|
@ -21,12 +21,14 @@ allows for the fully automated run of integration and/or unit tests from a
|
|||
single interface.
|
||||
|
||||
Salt's test suite is located under the ``tests`` directory in the root of Salt's
|
||||
code base and is divided into two main types of tests: `unit tests and integration
|
||||
tests <Integration vs. Unit>`_. The ``unit`` and ``integration`` sub test suites
|
||||
are located in the ``tests`` directory, which is where the majority of Salt's test
|
||||
cases are housed.
|
||||
code base and is divided into two main types of tests:
|
||||
:ref:`unit tests and integration tests <integration-vs-unit>`. The ``unit`` and
|
||||
``integration`` sub-test-suites are located in the ``tests`` directory, which is
|
||||
where the majority of Salt's test cases are housed.
|
||||
|
||||
|
||||
.. _getting_set_up_for_tests:
|
||||
|
||||
Getting Set Up For Tests
|
||||
========================
|
||||
|
||||
|
@ -94,7 +96,7 @@ testing the logic contained inside Proxy Minion functions.
|
|||
Running the Test Suite
|
||||
======================
|
||||
|
||||
Once all of the `requirements<Getting Set Up For Tests>`_ are installed, the
|
||||
Once all of the :ref:`requirements <getting_set_up_for_tests>` are installed, the
|
||||
``runtests.py`` file in the ``salt/tests`` directory is used to instantiate
|
||||
Salt's test suite:
|
||||
|
||||
|
|
4
pkg/deb/salt-api.environment
Normal file
4
pkg/deb/salt-api.environment
Normal file
|
@ -0,0 +1,4 @@
|
|||
# Controls whether or not service is restarted automatically when it exits.
|
||||
# See the manpage for systemd.service(5) for possible values for the "Restart="
|
||||
# option.
|
||||
RESTART='no'
|
99
pkg/deb/salt-api.init
Executable file
99
pkg/deb/salt-api.init
Executable file
|
@ -0,0 +1,99 @@
|
|||
#!/bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: salt-api
|
||||
# Required-Start: $remote_fs $network
|
||||
# Required-Stop: $remote_fs $network
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: REST API for Salt
|
||||
# Description: salt-api provides a REST interface to the Salt master
|
||||
### END INIT INFO
|
||||
|
||||
# Author: Michael Prokop <mika@debian.org>
|
||||
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="REST API for Salt"
|
||||
NAME=salt-api
|
||||
DAEMON=/usr/bin/salt-api
|
||||
DAEMON_ARGS="-d"
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
|
||||
# Exit if the package is not installed
|
||||
[ -x "$DAEMON" ] || exit 0
|
||||
|
||||
# Read configuration variable file if it is present
|
||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||
|
||||
. /lib/init/vars.sh
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
do_start() {
|
||||
pid=$(pidofproc -p $PIDFILE $DAEMON)
|
||||
if [ -n "$pid" ] ; then
|
||||
log_begin_msg "$DESC already running."
|
||||
log_end_msg 0
|
||||
exit 0
|
||||
fi
|
||||
|
||||
log_daemon_msg "Starting salt-api daemon: "
|
||||
start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- $DAEMON_ARGS
|
||||
log_end_msg $?
|
||||
}
|
||||
|
||||
do_stop() {
|
||||
log_begin_msg "Stopping $DESC ..."
|
||||
start-stop-daemon --stop --retry TERM/5 --quiet --oknodo --pidfile $PIDFILE
|
||||
RC=$?
|
||||
[ $RC -eq 0 ] && rm -f $PIDFILE
|
||||
log_end_msg $RC
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
;;
|
||||
#reload)
|
||||
# not implemented
|
||||
#;;
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
15
pkg/deb/salt-api.service
Normal file
15
pkg/deb/salt-api.service
Normal file
|
@ -0,0 +1,15 @@
|
|||
[Unit]
|
||||
Description=REST API for Salt
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/default/salt-api
|
||||
LimitNOFILE=8192
|
||||
Type=simple
|
||||
NotifyAccess=all
|
||||
ExecStart=/usr/bin/salt-api
|
||||
KillMode=process
|
||||
Restart=$RESTART
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
4
pkg/deb/salt-master.environment
Normal file
4
pkg/deb/salt-master.environment
Normal file
|
@ -0,0 +1,4 @@
|
|||
# Controls whether or not service is restarted automatically when it exits.
|
||||
# See the manpage for systemd.service(5) for possible values for the "Restart="
|
||||
# option.
|
||||
RESTART='no'
|
112
pkg/deb/salt-master.init
Executable file
112
pkg/deb/salt-master.init
Executable file
|
@ -0,0 +1,112 @@
|
|||
#!/bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: salt-master
|
||||
# Required-Start: $remote_fs $network
|
||||
# Required-Stop: $remote_fs $network
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: The Salt Master daemon
|
||||
# Description: The Salt Master is the central server (management
|
||||
# component) to which all Salt Minions connect
|
||||
### END INIT INFO
|
||||
|
||||
# Author: Michael Prokop <mika@debian.org>
|
||||
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="The Salt Master daemon"
|
||||
NAME=salt-master
|
||||
DAEMON=/usr/bin/salt-master
|
||||
DAEMON_ARGS="-d"
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
|
||||
# Exit if the package is not installed
|
||||
[ -x "$DAEMON" ] || exit 0
|
||||
|
||||
# Read configuration variable file if it is present
|
||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
do_start() {
|
||||
# Return
|
||||
# 0 if daemon has been started
|
||||
# 1 if daemon was already running
|
||||
# 2 if daemon could not be started
|
||||
pid=$(pidofproc -p $PIDFILE $DAEMON)
|
||||
if [ -n "$pid" ] ; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
|
||||
$DAEMON_ARGS \
|
||||
|| return 2
|
||||
}
|
||||
|
||||
do_stop() {
|
||||
# Return
|
||||
# 0 if daemon has been stopped
|
||||
# 1 if daemon was already stopped
|
||||
# 2 if daemon could not be stopped
|
||||
# other if a failure occurred
|
||||
pids=$(pidof -x $DAEMON)
|
||||
if [ $? -eq 0 ] ; then
|
||||
echo $pids | xargs kill 2&1> /dev/null
|
||||
RETVAL=0
|
||||
else
|
||||
RETVAL=1
|
||||
fi
|
||||
|
||||
[ "$RETVAL" = 2 ] && return 2
|
||||
rm -f $PIDFILE
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
;;
|
||||
#reload)
|
||||
# not implemented
|
||||
#;;
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
15
pkg/deb/salt-master.service
Normal file
15
pkg/deb/salt-master.service
Normal file
|
@ -0,0 +1,15 @@
|
|||
[Unit]
|
||||
Description=The Salt Master daemon
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/default/salt-master
|
||||
LimitNOFILE=16384
|
||||
Type=simple
|
||||
NotifyAccess=all
|
||||
ExecStart=/usr/bin/salt-master
|
||||
KillMode=process
|
||||
Restart=$RESTART
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
4
pkg/deb/salt-minion.environment
Normal file
4
pkg/deb/salt-minion.environment
Normal file
|
@ -0,0 +1,4 @@
|
|||
# Controls whether or not service is restarted automatically when it exits.
|
||||
# See the manpage for systemd.service(5) for possible values for the "Restart="
|
||||
# option.
|
||||
RESTART='no'
|
107
pkg/deb/salt-minion.init
Executable file
107
pkg/deb/salt-minion.init
Executable file
|
@ -0,0 +1,107 @@
|
|||
#!/bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: salt-minion
|
||||
# Required-Start: $remote_fs $network
|
||||
# Required-Stop: $remote_fs $network
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: The Salt Minion daemon
|
||||
# Description: The Salt Minion is the agent component of Salt. It listens
|
||||
# for instructions from the Master, runs jobs, and returns
|
||||
# results back to the Salt Master
|
||||
### END INIT INFO
|
||||
|
||||
# Author: Michael Prokop <mika@debian.org>
|
||||
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="The Salt Minion daemon"
|
||||
NAME=salt-minion
|
||||
DAEMON=/usr/bin/salt-minion
|
||||
DAEMON_ARGS="-d"
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
|
||||
# Exit if the package is not installed
|
||||
[ -x "$DAEMON" ] || exit 0
|
||||
|
||||
# Read configuration variable file if it is present
|
||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
do_start() {
|
||||
# Return
|
||||
# 0 if daemon has been started
|
||||
# 1 if daemon was already running
|
||||
# 2 if daemon could not be started
|
||||
pid=$(pidofproc -p $PIDFILE $DAEMON)
|
||||
if [ -n "$pid" ] ; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
start-stop-daemon --start --quiet --background --pidfile $PIDFILE --exec $DAEMON -- \
|
||||
$DAEMON_ARGS \
|
||||
|| return 2
|
||||
}
|
||||
|
||||
do_stop() {
|
||||
# Return
|
||||
# 0 if daemon has been stopped
|
||||
# 1 if daemon was already stopped
|
||||
# 2 if daemon could not be stopped
|
||||
# other if a failure occurred
|
||||
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
|
||||
RETVAL="$?"
|
||||
[ "$RETVAL" = 2 ] && return 2
|
||||
rm -f $PIDFILE
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
;;
|
||||
#reload)
|
||||
# not implemented
|
||||
#;;
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
14
pkg/deb/salt-minion.service
Normal file
14
pkg/deb/salt-minion.service
Normal file
|
@ -0,0 +1,14 @@
|
|||
[Unit]
|
||||
Description=The Salt Minion daemon
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/default/salt-minion
|
||||
Type=simple
|
||||
LimitNOFILE=8192
|
||||
ExecStart=/usr/bin/salt-minion
|
||||
KillMode=process
|
||||
Restart=$RESTART
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
4
pkg/deb/salt-syndic.environment
Normal file
4
pkg/deb/salt-syndic.environment
Normal file
|
@ -0,0 +1,4 @@
|
|||
# Controls whether or not service is restarted automatically when it exits.
|
||||
# See the manpage for systemd.service(5) for possible values for the "Restart="
|
||||
# option.
|
||||
RESTART='no'
|
107
pkg/deb/salt-syndic.init
Executable file
107
pkg/deb/salt-syndic.init
Executable file
|
@ -0,0 +1,107 @@
|
|||
#!/bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: salt-syndic
|
||||
# Required-Start: $remote_fs $network
|
||||
# Required-Stop: $remote_fs $network
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: The Salt Syndic daemon
|
||||
# Description: The Salt Syndic is a master daemon which can receive
|
||||
# instructions from a higher-level Salt Master, allowing
|
||||
# for tiered organization of your Salt infrastructure
|
||||
### END INIT INFO
|
||||
|
||||
# Author: Michael Prokop <mika@debian.org>
|
||||
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="The Salt Syndic daemon"
|
||||
NAME=salt-syndic
|
||||
DAEMON=/usr/bin/salt-syndic
|
||||
DAEMON_ARGS="-d"
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
|
||||
# Exit if the package is not installed
|
||||
[ -x "$DAEMON" ] || exit 0
|
||||
|
||||
# Read configuration variable file if it is present
|
||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
do_start() {
|
||||
# Return
|
||||
# 0 if daemon has been started
|
||||
# 1 if daemon was already running
|
||||
# 2 if daemon could not be started
|
||||
pid=$(pidofproc -p $PIDFILE $DAEMON)
|
||||
if [ -n "$pid" ] ; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
|
||||
$DAEMON_ARGS \
|
||||
|| return 2
|
||||
}
|
||||
|
||||
do_stop() {
|
||||
# Return
|
||||
# 0 if daemon has been stopped
|
||||
# 1 if daemon was already stopped
|
||||
# 2 if daemon could not be stopped
|
||||
# other if a failure occurred
|
||||
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
|
||||
RETVAL="$?"
|
||||
[ "$RETVAL" = 2 ] && return 2
|
||||
rm -f $PIDFILE
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
|
||||
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
;;
|
||||
#reload)
|
||||
# not implemented
|
||||
#;;
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
13
pkg/deb/salt-syndic.service
Normal file
13
pkg/deb/salt-syndic.service
Normal file
|
@ -0,0 +1,13 @@
|
|||
[Unit]
|
||||
Description=The Salt Syndic daemon
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/default/salt-syndic
|
||||
Type=simple
|
||||
LimitNOFILE=8192
|
||||
ExecStart=/usr/bin/salt-syndic
|
||||
Restart=$RESTART
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -8,7 +8,15 @@ Beacon to monitor statistics from ethernet adapters
|
|||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import psutil
|
||||
|
||||
# Import third party libs
|
||||
# pylint: disable=import-error
|
||||
try:
|
||||
import salt.utils.psutil_compat as psutil
|
||||
HAS_PSUTIL = True
|
||||
except ImportError:
|
||||
HAS_PSUTIL = False
|
||||
# pylint: enable=import-error
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -32,6 +40,8 @@ def _to_list(obj):
|
|||
|
||||
|
||||
def __virtual__():
|
||||
if not HAS_PSUTIL:
|
||||
return (False, 'cannot load network_info beacon: psutil not available')
|
||||
return __virtualname__
|
||||
|
||||
|
||||
|
|
|
@ -5,12 +5,27 @@ Send events covering service status
|
|||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
import logging
|
||||
import psutil
|
||||
|
||||
# Import third party libs
|
||||
# pylint: disable=import-error
|
||||
try:
|
||||
import salt.utils.psutil_compat as psutil
|
||||
HAS_PSUTIL = True
|
||||
except ImportError:
|
||||
HAS_PSUTIL = False
|
||||
# pylint: enable=import-error
|
||||
|
||||
log = logging.getLogger(__name__) # pylint: disable=invalid-name
|
||||
|
||||
__virtualname__ = 'ps'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if not HAS_PSUTIL:
|
||||
return (False, 'cannot load network_info beacon: psutil not available')
|
||||
return __virtualname__
|
||||
|
||||
|
||||
def validate(config):
|
||||
'''
|
||||
|
|
|
@ -1004,6 +1004,7 @@ _OS_NAME_MAP = {
|
|||
'manjaro': 'Manjaro',
|
||||
'antergos': 'Antergos',
|
||||
'sles': 'SUSE',
|
||||
'linuxmint': 'Mint',
|
||||
}
|
||||
|
||||
# Map the 'os' grain to the 'os_family' grain
|
||||
|
|
|
@ -85,7 +85,7 @@ def __virtual__():
|
|||
'''
|
||||
Confirm this module is on a Debian based system
|
||||
'''
|
||||
if __grains__.get('os_family') in ('Kali', 'Debian', 'LinuxMint'):
|
||||
if __grains__.get('os_family') in ('Kali', 'Debian'):
|
||||
return __virtualname__
|
||||
return (False, 'The pkg module could not be loaded: unsupported OS family')
|
||||
|
||||
|
|
|
@ -500,6 +500,37 @@ def set_attributes(name, attributes, region=None, key=None, keyid=None,
|
|||
'''
|
||||
Set attributes on an ELB.
|
||||
|
||||
name (string)
|
||||
Name of the ELB instance to set attributes for
|
||||
|
||||
attributes
|
||||
A dict of attributes to set.
|
||||
|
||||
Valid attributes are:
|
||||
|
||||
access_log (dict)
|
||||
enabled (bool)
|
||||
Enable storage of access logs.
|
||||
s3_bucket_name (string)
|
||||
The name of the S3 bucket to place logs.
|
||||
s3_bucket_prefix (string)
|
||||
Prefix for the log file name.
|
||||
emit_interval (int)
|
||||
Interval for storing logs in S3 in minutes. Valid values are
|
||||
5 and 60.
|
||||
|
||||
connection_draining (dict)
|
||||
enabled (bool)
|
||||
Enable connection draining.
|
||||
timeout (int)
|
||||
Maximum allowed time in seconds for sending existing
|
||||
connections to an instance that is deregistering or unhealthy.
|
||||
Default is 300.
|
||||
|
||||
cross_zone_load_balancing (dict)
|
||||
enabled (bool)
|
||||
Enable cross-zone load balancing.
|
||||
|
||||
CLI example to set attributes on an ELB:
|
||||
|
||||
.. code-block:: bash
|
||||
|
|
|
@ -279,7 +279,11 @@ def _sysv_enabled(name):
|
|||
(starts with "S") to its script is found in /etc/init.d in the current
|
||||
runlevel.
|
||||
'''
|
||||
return bool(glob.glob('/etc/rc%s.d/S*%s' % (_runlevel(), name)))
|
||||
# Find exact match (disambiguate matches like "S01anacron" for cron)
|
||||
for match in glob.glob('/etc/rc%s.d/S*%s' % (_runlevel(), name)):
|
||||
if re.match(r'S\d{,2}%s' % name, os.path.basename(match)):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _untracked_custom_unit_found(name):
|
||||
|
|
|
@ -186,6 +186,12 @@ with the global authenication parameter names prefixed with ``git_pillar``
|
|||
instead of ``gitfs`` (e.g. :conf_master:`git_pillar_pubkey`,
|
||||
:conf_master:`git_pillar_privkey`, :conf_master:`git_pillar_passphrase`, etc.).
|
||||
|
||||
.. note::
|
||||
|
||||
The ``name`` parameter can be used to further differentiate between two
|
||||
remotes with the same URL. If you're using two remotes with the same URL,
|
||||
the ``name`` option is required.
|
||||
|
||||
.. _GitPython: https://github.com/gitpython-developers/GitPython
|
||||
.. _pygit2: https://github.com/libgit2/pygit2
|
||||
.. _Dulwich: https://www.samba.org/~jelmer/dulwich/
|
||||
|
|
|
@ -15,6 +15,13 @@ The following fields can be set in the minion conf file::
|
|||
hipchat.profile (optional)
|
||||
hipchat.url (optional)
|
||||
|
||||
.. note::
|
||||
|
||||
When using Hipchat's API v2, ``api_key`` needs to be assigned to the room with the
|
||||
"Label" set to what you would have been set in the hipchat.from_name field. The v2
|
||||
API disregards the ``from_name`` in the data sent for the room notification and uses
|
||||
the Label assigned through the Hipchat control panel.
|
||||
|
||||
Alternative configuration values can be used by prefacing the configuration.
|
||||
Any values not found in the alternative configuration will be pulled from
|
||||
the default location::
|
||||
|
|
|
@ -14,7 +14,6 @@ import shutil
|
|||
import time
|
||||
import hashlib
|
||||
import bisect
|
||||
import time
|
||||
|
||||
# Import salt libs
|
||||
import salt.payload
|
||||
|
@ -390,19 +389,44 @@ def clean_old_jobs():
|
|||
if not os.path.exists(jid_root):
|
||||
return
|
||||
|
||||
# Keep track of any empty t_path dirs that need to be removed later
|
||||
dirs_to_remove = set()
|
||||
|
||||
for top in os.listdir(jid_root):
|
||||
t_path = os.path.join(jid_root, top)
|
||||
for final in os.listdir(t_path):
|
||||
|
||||
# Check if there are any stray/empty JID t_path dirs
|
||||
t_path_dirs = os.listdir(t_path)
|
||||
if not t_path_dirs and t_path not in dirs_to_remove:
|
||||
dirs_to_remove.add(t_path)
|
||||
continue
|
||||
|
||||
for final in t_path_dirs:
|
||||
f_path = os.path.join(t_path, final)
|
||||
jid_file = os.path.join(f_path, 'jid')
|
||||
if not os.path.isfile(jid_file):
|
||||
# No jid file means corrupted cache entry, scrub it
|
||||
shutil.rmtree(f_path)
|
||||
# by removing the entire t_path directory
|
||||
shutil.rmtree(t_path)
|
||||
else:
|
||||
jid_ctime = os.stat(jid_file).st_ctime
|
||||
hours_difference = (cur - jid_ctime) / 3600.0
|
||||
if hours_difference > __opts__['keep_jobs']:
|
||||
shutil.rmtree(f_path)
|
||||
# Remove the entire t_path from the original JID dir
|
||||
shutil.rmtree(t_path)
|
||||
|
||||
# Remove empty JID dirs from job cache, if they're old enough.
|
||||
# JID dirs may be empty either from a previous cache-clean with the bug
|
||||
# Listed in #29286 still present, or the JID dir was only recently made
|
||||
# And the jid file hasn't been created yet.
|
||||
if dirs_to_remove:
|
||||
for t_path in dirs_to_remove:
|
||||
# Checking the time again prevents a possible race condition where
|
||||
# t_path JID dirs were created, but not yet populated by a jid file.
|
||||
t_path_ctime = os.stat(t_path).st_ctime
|
||||
hours_difference = (cur - t_path_ctime) / 3600.0
|
||||
if hours_difference > __opts__['keep_jobs']:
|
||||
shutil.rmtree(t_path)
|
||||
|
||||
|
||||
def update_endtime(jid, time):
|
||||
|
|
|
@ -320,6 +320,10 @@ def present(
|
|||
|
||||
attributes
|
||||
A dict defining the attributes to set on this ELB.
|
||||
Unknown keys will be silently ignored.
|
||||
|
||||
See the :mod:`salt.modules.boto_elb.set_attributes` function for
|
||||
recognized attributes.
|
||||
|
||||
attributes_from_pillar
|
||||
name of pillar dict that contains attributes. Attributes defined for this specific
|
||||
|
@ -471,7 +475,17 @@ def present(
|
|||
def register_instances(name, instances, region=None, key=None, keyid=None,
|
||||
profile=None):
|
||||
'''
|
||||
Add instance/s to load balancer
|
||||
Add EC2 instance(s) to an Elastic Load Balancer. Removing an instance from
|
||||
the ``instances`` list does not remove it from the ELB.
|
||||
|
||||
name
|
||||
The name of the Elastic Load Balancer to add EC2 instances to.
|
||||
|
||||
instances
|
||||
A list of EC2 instance IDs that this Elastic Load Balancer should
|
||||
distribute traffic to. This state will only ever append new instances
|
||||
to the ELB. EC2 instances already associated with this ELB will not be
|
||||
removed if they are not in the ``instances`` list.
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
|
||||
|
|
|
@ -591,6 +591,7 @@ def latest(name,
|
|||
'Failed to check remote refs: {0}'.format(_strip_exc(exc))
|
||||
)
|
||||
|
||||
desired_upstream = False
|
||||
if bare:
|
||||
remote_rev = None
|
||||
remote_rev_type = None
|
||||
|
@ -612,12 +613,10 @@ def latest(name,
|
|||
elif 'refs/tags/' + rev + '^{}' in all_remote_refs:
|
||||
# Annotated tag
|
||||
remote_rev = all_remote_refs['refs/tags/' + rev + '^{}']
|
||||
desired_upstream = False
|
||||
remote_rev_type = 'tag'
|
||||
elif 'refs/tags/' + rev in all_remote_refs:
|
||||
# Non-annotated tag
|
||||
remote_rev = all_remote_refs['refs/tags/' + rev]
|
||||
desired_upstream = False
|
||||
remote_rev_type = 'tag'
|
||||
else:
|
||||
if len(rev) <= 40 \
|
||||
|
@ -627,7 +626,6 @@ def latest(name,
|
|||
# desired rev is a SHA1
|
||||
rev = rev.lower()
|
||||
remote_rev = rev
|
||||
desired_upstream = False
|
||||
remote_rev_type = 'sha1'
|
||||
else:
|
||||
remote_rev = None
|
||||
|
|
|
@ -629,6 +629,7 @@ def installed(
|
|||
for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`,
|
||||
:mod:`ebuild <salt.modules.ebuild>`,
|
||||
:mod:`pacman <salt.modules.pacman>`,
|
||||
:mod:`win_pkg <salt.modules.win_pkg>`,
|
||||
:mod:`yumpkg <salt.modules.yumpkg>`, and
|
||||
:mod:`zypper <salt.modules.zypper>`. The version number includes the
|
||||
release designation where applicable, to allow Salt to target a
|
||||
|
@ -866,8 +867,7 @@ def installed(
|
|||
|
||||
|
|
||||
|
||||
**MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in Windows or
|
||||
pkgng)**
|
||||
**MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)**
|
||||
|
||||
:param list pkgs:
|
||||
A list of packages to install from a software repository. All packages
|
||||
|
@ -1459,8 +1459,7 @@ def latest(
|
|||
|
||||
Multiple Package Installation Options:
|
||||
|
||||
(Not yet supported for: Windows, FreeBSD, OpenBSD, MacOS, and Solaris
|
||||
pkgutil)
|
||||
(Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil)
|
||||
|
||||
pkgs
|
||||
A list of packages to maintain at the latest available version.
|
||||
|
|
|
@ -106,6 +106,9 @@ class AESReqServerMixin(object):
|
|||
pub = RSA.importKey(f.read())
|
||||
except (ValueError, IndexError, TypeError):
|
||||
return self.crypticle.dumps({})
|
||||
except IOError:
|
||||
log.error('AES key not found')
|
||||
return 'AES key not found'
|
||||
|
||||
pret = {}
|
||||
cipher = PKCS1_OAEP.new(pub)
|
||||
|
|
|
@ -188,6 +188,14 @@ class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
|
|||
)
|
||||
key = self.auth.get_keys()
|
||||
cipher = PKCS1_OAEP.new(key)
|
||||
if 'key' not in ret:
|
||||
# Reauth in the case our key is deleted on the master side.
|
||||
yield self.auth.authenticate()
|
||||
ret = yield self.message_client.send(
|
||||
self._package_load(self.auth.crypticle.dumps(load)),
|
||||
timeout=timeout,
|
||||
tries=tries,
|
||||
)
|
||||
aes = cipher.decrypt(ret['key'])
|
||||
pcrypt = salt.crypt.Crypticle(self.opts, aes)
|
||||
raise tornado.gen.Return(pcrypt.loads(ret[dictkey]))
|
||||
|
@ -492,6 +500,9 @@ class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.
|
|||
if hasattr(self, '_monitor') and self._monitor is not None:
|
||||
self._monitor.stop()
|
||||
self._monitor = None
|
||||
if hasattr(self, '_w_monitor') and self._w_monitor is not None:
|
||||
self._w_monitor.stop()
|
||||
self._w_monitor = None
|
||||
if hasattr(self, 'clients') and self.clients.closed is False:
|
||||
self.clients.close()
|
||||
if hasattr(self, 'workers') and self.workers.closed is False:
|
||||
|
@ -524,6 +535,13 @@ class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.
|
|||
|
||||
self.context = zmq.Context(1)
|
||||
self._socket = self.context.socket(zmq.REP)
|
||||
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
|
||||
# Socket monitor shall be used the only for debug purposes so using threading doesn't look too bad here
|
||||
import threading
|
||||
self._w_monitor = ZeroMQSocketMonitor(self._socket)
|
||||
t = threading.Thread(target=self._w_monitor.start_poll)
|
||||
t.start()
|
||||
|
||||
if self.opts.get('ipc_mode', '') == 'tcp':
|
||||
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
|
||||
self.opts.get('tcp_master_workers', 4515)
|
||||
|
|
168
tests/unit/returners/local_cache_test.py
Normal file
168
tests/unit/returners/local_cache_test.py
Normal file
|
@ -0,0 +1,168 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
tests.unit.returners.local_cache_test
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Unit tests for the Default Job Cache (local_cache).
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting import TestCase, skipIf
|
||||
from salttesting.helpers import destructiveTest, ensure_in_syspath
|
||||
from salttesting.mock import (
|
||||
MagicMock,
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON,
|
||||
patch
|
||||
)
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils
|
||||
from salt.returners import local_cache
|
||||
|
||||
TMP_CACHE_DIR = '/tmp/salt_test_job_cache/'
|
||||
TMP_JID_DIR = '/tmp/salt_test_job_cache/jobs/'
|
||||
|
||||
local_cache.__opts__ = {'cachedir': TMP_CACHE_DIR,
|
||||
'keep_jobs': 1}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@destructiveTest
|
||||
class LocalCacheCleanOldJobsTestCase(TestCase):
|
||||
'''
|
||||
Tests for the local_cache.clean_old_jobs function.
|
||||
'''
|
||||
|
||||
def tearDown(self):
|
||||
'''
|
||||
Clean up after tests.
|
||||
|
||||
Note that a setUp function is not used in this TestCase because the
|
||||
_make_tmp_jid_dirs replaces it.
|
||||
'''
|
||||
if os.path.exists(TMP_CACHE_DIR):
|
||||
shutil.rmtree(TMP_CACHE_DIR)
|
||||
|
||||
@patch('os.path.exists', MagicMock(return_value=False))
|
||||
def test_clean_old_jobs_no_jid_root(self):
|
||||
'''
|
||||
Tests that the function returns None when no jid_root is found.
|
||||
'''
|
||||
self.assertEqual(local_cache.clean_old_jobs(), None)
|
||||
|
||||
def test_clean_old_jobs_empty_jid_dir_removed(self):
|
||||
'''
|
||||
Tests that an empty JID dir is removed when it is old enough to be deleted.
|
||||
'''
|
||||
# Create temp job cache dir without files in it.
|
||||
jid_dir, jid_file = self._make_tmp_jid_dirs(create_files=False)
|
||||
|
||||
# Make sure there are no files in the directory before continuing
|
||||
self.assertEqual(jid_file, None)
|
||||
|
||||
# Call clean_old_jobs function, patching the keep_jobs value with a
|
||||
# very small value to force the call to clean the job.
|
||||
with patch.dict(local_cache.__opts__, {'keep_jobs': 0.00000001}):
|
||||
local_cache.clean_old_jobs()
|
||||
|
||||
# Assert that the JID dir was removed
|
||||
self.assertEqual([], os.listdir(TMP_JID_DIR))
|
||||
|
||||
def test_clean_old_jobs_empty_jid_dir_remains(self):
|
||||
'''
|
||||
Tests that an empty JID dir is NOT removed because it was created within
|
||||
the keep_jobs time frame.
|
||||
'''
|
||||
# Create temp job cache dir without files in it.
|
||||
jid_dir, jid_file = self._make_tmp_jid_dirs(create_files=False)
|
||||
|
||||
# Make sure there are no files in the directory
|
||||
self.assertEqual(jid_file, None)
|
||||
|
||||
# Call clean_old_jobs function
|
||||
local_cache.clean_old_jobs()
|
||||
|
||||
# Get the name of the JID directory that was created to test against
|
||||
jid_dir_name = jid_dir.rpartition('/')[2]
|
||||
|
||||
# Assert the JID directory is still present to be cleaned after keep_jobs interval
|
||||
self.assertEqual([jid_dir_name], os.listdir(TMP_JID_DIR))
|
||||
|
||||
def test_clean_old_jobs_jid_file_corrupted(self):
|
||||
'''
|
||||
Tests that the entire JID dir is removed when the jid_file is not a file.
|
||||
This scenario indicates a corrupted cache entry, so the entire dir is scrubbed.
|
||||
'''
|
||||
# Create temp job cache dir and jid file
|
||||
jid_dir, jid_file = self._make_tmp_jid_dirs()
|
||||
|
||||
# Make sure there is a jid file in a new job cache director
|
||||
jid_dir_name = jid_file.rpartition('/')[2]
|
||||
self.assertEqual(jid_dir_name, 'jid')
|
||||
|
||||
# Even though we created a valid jid file in the _make_tmp_jid_dirs call to get
|
||||
# into the correct loop, we need to mock the 'os.path.isfile' check to force the
|
||||
# "corrupted file" check in the clean_old_jobs call.
|
||||
with patch('os.path.isfile', MagicMock(return_value=False)) as mock:
|
||||
local_cache.clean_old_jobs()
|
||||
|
||||
# Assert that the JID dir was removed
|
||||
self.assertEqual([], os.listdir(TMP_JID_DIR))
|
||||
|
||||
def test_clean_old_jobs_jid_file_is_cleaned(self):
|
||||
'''
|
||||
Test that the entire JID dir is removed when a job is old enough to be removed.
|
||||
'''
|
||||
# Create temp job cache dir and jid file
|
||||
jid_dir, jid_file = self._make_tmp_jid_dirs()
|
||||
|
||||
# Make sure there is a jid directory
|
||||
jid_dir_name = jid_file.rpartition('/')[2]
|
||||
self.assertEqual(jid_dir_name, 'jid')
|
||||
|
||||
# Call clean_old_jobs function, patching the keep_jobs value with a
|
||||
# very small value to force the call to clean the job.
|
||||
with patch.dict(local_cache.__opts__, {'keep_jobs': 0.00000001}):
|
||||
local_cache.clean_old_jobs()
|
||||
|
||||
# Assert that the JID dir was removed
|
||||
self.assertEqual([], os.listdir(TMP_JID_DIR))
|
||||
|
||||
def _make_tmp_jid_dirs(self, create_files=True):
|
||||
'''
|
||||
Helper function to set up temporary directories and files used for
|
||||
testing the clean_old_jobs function.
|
||||
|
||||
Returns a temp_dir name and a jid_file_path. If create_files is False,
|
||||
the jid_file_path will be None.
|
||||
'''
|
||||
# First, create the /tmp/salt_test_job_cache/jobs/ directory to hold jid dirs
|
||||
if not os.path.exists(TMP_JID_DIR):
|
||||
os.makedirs(TMP_JID_DIR)
|
||||
|
||||
# Then create a JID temp file in "/tmp/salt_test_job_cache/"
|
||||
temp_dir = tempfile.mkdtemp(dir=TMP_JID_DIR)
|
||||
|
||||
jid_file_path = None
|
||||
if create_files:
|
||||
dir_name = '/'.join([temp_dir, 'jid'])
|
||||
os.mkdir(dir_name)
|
||||
jid_file_path = '/'.join([dir_name, 'jid'])
|
||||
with salt.utils.fopen(jid_file_path, 'w') as jid_file:
|
||||
jid_file.write('this is a jid file')
|
||||
|
||||
return temp_dir, jid_file_path
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(LocalCacheCleanOldJobsTestCase, needs_daemon=False)
|
Loading…
Add table
Reference in a new issue