mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge remote-tracking branch 'upstream/2015.8' into merge-forward-develop
Conflicts: salt/cli/daemons.py salt/master.py salt/minion.py salt/modules/gpg.py salt/modules/pw_group.py salt/utils/http.py setup.py tests/unit/modules/schedule_test.py
This commit is contained in:
commit
73282d51cc
80 changed files with 9333 additions and 2637 deletions
|
@ -1,4 +1,4 @@
|
|||
.. admonition:: Do not use dots in SLS file names
|
||||
.. admonition:: Do not use dots in SLS file names or their directories
|
||||
|
||||
The initial implementation of :conf_master:`top.sls <state_top>` and
|
||||
:ref:`include-declaration` followed the python import model where a slash
|
||||
|
@ -6,4 +6,5 @@
|
|||
the name ( besides the suffix period) can not be referenced. For example,
|
||||
webserver_1.0.sls is not referenceable because webserver_1.0 would refer
|
||||
to the directory/file webserver_1/0.sls
|
||||
|
||||
The same applies for any subdirecortories, this is especially 'tricky' when git repos are created.
|
||||
Another cmd that typically can't render it's output is ```state.show_sls``` of a file in a path that contains a dot.
|
||||
|
|
127
doc/man/salt-proxy.1
Normal file
127
doc/man/salt-proxy.1
Normal file
|
@ -0,0 +1,127 @@
|
|||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SALT-PROXY" "1" "July 29, 2015" "2015.8.0rc2-13-g733b842" "Salt"
|
||||
.SH NAME
|
||||
salt-proxy \- salt-proxy Documentation
|
||||
.
|
||||
.nr rst2man-indent-level 0
|
||||
.
|
||||
.de1 rstReportMargin
|
||||
\\$1 \\n[an-margin]
|
||||
level \\n[rst2man-indent-level]
|
||||
level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
|
||||
-
|
||||
\\n[rst2man-indent0]
|
||||
\\n[rst2man-indent1]
|
||||
\\n[rst2man-indent2]
|
||||
..
|
||||
.de1 INDENT
|
||||
.\" .rstReportMargin pre:
|
||||
. RS \\$1
|
||||
. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
|
||||
. nr rst2man-indent-level +1
|
||||
.\" .rstReportMargin post:
|
||||
..
|
||||
.de UNINDENT
|
||||
. RE
|
||||
.\" indent \\n[an-margin]
|
||||
.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
|
||||
.nr rst2man-indent-level -1
|
||||
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
|
||||
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
|
||||
..
|
||||
.sp
|
||||
The Salt Proxy minion daemon, receives commands from a remote Salt master
|
||||
and communicates with devices unable to run a full minion.
|
||||
.SH SYNOPSIS
|
||||
.INDENT 0.0
|
||||
.INDENT 3.5
|
||||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
salt\-proxy [ options ]
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.SH DESCRIPTION
|
||||
.sp
|
||||
The Salt proxy minion receives commands from the central Salt master, transmits apppropriate commands
|
||||
to devices unable to run a minion, and replies with the results of said commands.
|
||||
.SH OPTIONS
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B \-\-proxyid
|
||||
The minion id that this proxy will assume. This is required.
|
||||
.UNINDENT
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B \-\-version
|
||||
Print the version of Salt that is running.
|
||||
.UNINDENT
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B \-\-versions\-report
|
||||
Show program\(aqs dependencies and version number, and then exit
|
||||
.UNINDENT
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B \-h, \-\-help
|
||||
Show the help message and exit
|
||||
.UNINDENT
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B \-c CONFIG_DIR, \-\-config\-dir=CONFIG_dir
|
||||
The location of the Salt configuration directory. This directory contains
|
||||
the configuration files for Salt master and minions. The default location
|
||||
on most systems is \fB/etc/salt\fP\&.
|
||||
.UNINDENT
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B \-u USER, \-\-user=USER
|
||||
Specify user to run salt\-proxy
|
||||
.UNINDENT
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B \-d, \-\-daemon
|
||||
Run salt\-proxy as a daemon
|
||||
.UNINDENT
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B \-\-pid\-file PIDFILE
|
||||
Specify the location of the pidfile. Default: /var/run/salt\-proxy-<id>\&.pid
|
||||
.UNINDENT
|
||||
.SS Logging Options
|
||||
.sp
|
||||
Logging options which override any settings defined on the configuration files.
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B \-l LOG_LEVEL, \-\-log\-level=LOG_LEVEL
|
||||
Console logging log level. One of \fBall\fP, \fBgarbage\fP, \fBtrace\fP,
|
||||
\fBdebug\fP, \fBinfo\fP, \fBwarning\fP, \fBerror\fP, \fBquiet\fP\&. Default:
|
||||
\fBwarning\fP\&.
|
||||
.UNINDENT
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B \-\-log\-file=LOG_FILE
|
||||
Log file path. Default: /var/log/salt/minion\&.
|
||||
.UNINDENT
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B \-\-log\-file\-level=LOG_LEVEL_LOGFILE
|
||||
Logfile logging log level. One of \fBall\fP, \fBgarbage\fP, \fBtrace\fP,
|
||||
\fBdebug\fP, \fBinfo\fP, \fBwarning\fP, \fBerror\fP, \fBquiet\fP\&. Default:
|
||||
\fBwarning\fP\&.
|
||||
.UNINDENT
|
||||
.SH SEE ALSO
|
||||
.sp
|
||||
\fIsalt(1)\fP
|
||||
\fIsalt(7)\fP
|
||||
\fIsalt\-master(1)\fP
|
||||
\fIsalt\-minion(1)\fP
|
||||
.SH AUTHOR
|
||||
Thomas S. Hatch <thatch45@gmail.com> and many others, please see the Authors file
|
||||
.SH COPYRIGHT
|
||||
2015 SaltStack, Inc.
|
||||
.\" Generated by docutils manpage writer.
|
||||
.
|
|
@ -3,4 +3,5 @@ salt.modules.git
|
|||
================
|
||||
|
||||
.. automodule:: salt.modules.git
|
||||
:members:
|
||||
:members:
|
||||
:exclude-members: config_get_regex
|
||||
|
|
|
@ -3,4 +3,5 @@ salt.states.git
|
|||
===============
|
||||
|
||||
.. automodule:: salt.states.git
|
||||
:members:
|
||||
:members:
|
||||
:exclude-members: config
|
||||
|
|
|
@ -219,6 +219,48 @@ If you would like to log to the console instead of to the log file, remove the
|
|||
</topics/installation/osx>` instructions.
|
||||
|
||||
|
||||
Changing Default Paths
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Instead of updating your configuration files to point to the new root directory
|
||||
and having to pass the new configuration directory path to all of Salt's CLI
|
||||
tools, you can explicitly tweak the default system paths that Salt expects:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
GENERATE_SALT_SYSPATHS=1 pip --global-option='--salt-root-dir=/path/to/your/virtualenv/' \
|
||||
install -e ./salt # the path to the salt git clone from above
|
||||
|
||||
|
||||
You can now call all of Salt's CLI tools without explicitly passing the configuration directory.
|
||||
|
||||
Additional Options
|
||||
..................
|
||||
|
||||
In case you want to distribute your virtualenv, you probably don't want to
|
||||
include Salt's clone ``.git/`` directory, and, without it, Salt won't report
|
||||
the accurate version. You can tell ``setup.py`` to generate the hardcoded
|
||||
version information which is distributable:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
GENERATE_SALT_SYSPATHS=1 WRITE_SALT_VERSION=1 pip --global-option='--salt-root-dir=/path/to/your/virtualenv/' \
|
||||
install -e ./salt # the path to the salt git clone from above
|
||||
|
||||
|
||||
Instead of passing those two environmental variables, you can just pass a
|
||||
single one which will trigger the other two:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
MIMIC_SALT_INSTALL=1 pip --global-option='--salt-root-dir=/path/to/your/virtualenv/' \
|
||||
install -e ./salt # the path to the salt git clone from above
|
||||
|
||||
|
||||
This last one will grant you an edditable salt installation with hardcoded
|
||||
system paths and version information.
|
||||
|
||||
|
||||
Installing Salt from the Python Package Index
|
||||
---------------------------------------------
|
||||
|
||||
|
|
|
@ -139,8 +139,8 @@ Install the following software:
|
|||
Download the Prerequisite zip file for your CPU architecture from the
|
||||
SaltStack download site:
|
||||
|
||||
* `Salt32.zip <http://docs.saltstack.com/downloads/windows-deps/Salt32.zip/>`_
|
||||
* `Salt64.zip <http://docs.saltstack.com/downloads/windows-deps/Salt64.zip/>`_
|
||||
* `Salt32.zip <http://repo.saltstack.com/windows/dependencies/Salt32.zip/>`_
|
||||
* `Salt64.zip <http://repo.saltstack.com/windows/dependencies/Salt64.zip/>`_
|
||||
|
||||
These files contain all sofware required to build and develop salt. Unzip the
|
||||
contents of the file to ``C:\Salt-Dev\temp``.
|
||||
|
|
|
@ -19,6 +19,21 @@ your typical housecat would be excellent source material for a PhD thesis.
|
|||
Salt proxy-minions provide the 'plumbing' that allows device enumeration
|
||||
and discovery, control, status, remote execution, and state management.
|
||||
|
||||
|
||||
New in 2015.8
|
||||
-------------
|
||||
|
||||
Starting with the 2015.8 release of Salt, proxy processes are no longer forked off from a controlling minion.
|
||||
Instead, they have their own script ``salt-proxy`` which takes mostly the same arguments that the
|
||||
standard Salt minion does with the addition of ``--proxyid``. This is the id that the salt-proxy will
|
||||
use to identify itself to the master. Proxy configurations are still best kept in Pillar and their format
|
||||
has not changed.
|
||||
|
||||
This change allows for better process control and logging. Proxy processes can now be listed with standard
|
||||
process management utilities (``ps`` from the command line). Also, a full Salt minion is no longer
|
||||
required (though it is still strongly recommended) on machines hosting proxies.
|
||||
|
||||
|
||||
Getting Started
|
||||
---------------
|
||||
|
||||
|
@ -29,13 +44,11 @@ installation that includes proxy-minions:
|
|||
|
||||
The key thing to remember is the left-most section of the diagram. Salt's
|
||||
nature is to have a minion connect to a master, then the master may control
|
||||
the minion. However, for proxy minions, the target device cannot run a minion,
|
||||
and thus must rely on a separate minion to fire up the proxy-minion and make the
|
||||
initial and persistent connection.
|
||||
the minion. However, for proxy minions, the target device cannot run a minion.
|
||||
|
||||
After the proxy minion is started and initiates its connection to the 'dumb'
|
||||
device, it connects back to the salt-master and ceases to be affiliated in
|
||||
any way with the minion that started it.
|
||||
device, it connects back to the salt-master and for all intents and purposes
|
||||
looks like just another minion to the Salt master.
|
||||
|
||||
To create support for a proxied device one needs to create four things:
|
||||
|
||||
|
@ -46,8 +59,8 @@ To create support for a proxied device one needs to create four things:
|
|||
4. :ref:`Salt states <all-salt.states>` specific to the controlled device.
|
||||
|
||||
|
||||
Configuration parameters on the master
|
||||
######################################
|
||||
Configuration parameters
|
||||
########################
|
||||
|
||||
Proxy minions require no configuration parameters in /etc/salt/master.
|
||||
|
||||
|
@ -64,58 +77,89 @@ based on the diagram above:
|
|||
.. code-block:: yaml
|
||||
|
||||
base:
|
||||
minioncontroller1:
|
||||
- networkswitches
|
||||
minioncontroller2:
|
||||
- reallydumbdevices
|
||||
minioncontroller3:
|
||||
- smsgateway
|
||||
|
||||
|
||||
``/srv/pillar/networkswitches.sls``
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy:
|
||||
dumbdevice1:
|
||||
proxytype: networkswitch
|
||||
host: 172.23.23.5
|
||||
username: root
|
||||
passwd: letmein
|
||||
- dumbdevice1
|
||||
dumbdevice2:
|
||||
proxytype: networkswitch
|
||||
host: 172.23.23.6
|
||||
username: root
|
||||
passwd: letmein
|
||||
- dumbdevice2
|
||||
dumbdevice3:
|
||||
proxytype: networkswitch
|
||||
host: 172.23.23.7
|
||||
username: root
|
||||
passwd: letmein
|
||||
- dumbdevice3
|
||||
dumbdevice4:
|
||||
- dumbdevice4
|
||||
dumbdevice5:
|
||||
- dumbdevice5
|
||||
dumbdevice6:
|
||||
- dumbdevice6
|
||||
dumbdevice7:
|
||||
- dumbdevice7
|
||||
|
||||
``/srv/pillar/reallydumbdevices.sls``
|
||||
|
||||
``/srv/pillar/dumbdevice1.sls``
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy:
|
||||
dumbdevice4:
|
||||
proxytype: i2c_lightshow
|
||||
i2c_address: 1
|
||||
dumbdevice5:
|
||||
proxytype: networkswitch
|
||||
host: 172.23.23.5
|
||||
username: root
|
||||
passwd: letmein
|
||||
|
||||
|
||||
``/srv/pillar/dumbdevice2.sls``
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy:
|
||||
proxytype: networkswitch
|
||||
host: 172.23.23.6
|
||||
username: root
|
||||
passwd: letmein
|
||||
|
||||
|
||||
``/srv/pillar/dumbdevice3.sls``
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy:
|
||||
proxytype: networkswitch
|
||||
host: 172.23.23.7
|
||||
username: root
|
||||
passwd: letmein
|
||||
|
||||
|
||||
``/srv/pillar/dumbdevice4.sls``
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy:
|
||||
proxytype: i2c_lightshow
|
||||
i2c_address: 1
|
||||
|
||||
|
||||
``/srv/pillar/dumbdevice5.sls``
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy:
|
||||
proxytype: i2c_lightshow
|
||||
i2c_address: 2
|
||||
dumbdevice6:
|
||||
|
||||
|
||||
``/srv/pillar/dumbdevice6.sls``
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy:
|
||||
proxytype: 433mhz_wireless
|
||||
|
||||
``/srv/pillar/smsgateway.sls``
|
||||
|
||||
``/srv/pillar/dumbdevice7.sls``
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy:
|
||||
minioncontroller3:
|
||||
dumbdevice7:
|
||||
proxytype: sms_serial
|
||||
deventry: /dev/tty04
|
||||
proxytype: sms_serial
|
||||
deventry: /dev/tty04
|
||||
|
||||
|
||||
Note the contents of each minioncontroller key may differ widely based on
|
||||
the type of device that the proxy-minion is managing.
|
||||
|
@ -136,25 +180,20 @@ In the above example
|
|||
- dumbdevice7 is an SMS gateway connected to machine minioncontroller3 via a
|
||||
serial port.
|
||||
|
||||
Because of the way pillar works, each of the salt-minions that fork off the
|
||||
Because of the way pillar works, each of the salt-proxy processes that fork off the
|
||||
proxy minions will only see the keys specific to the proxies it will be
|
||||
handling. In other words, from the above example, only minioncontroller1 will
|
||||
see the connection information for dumbdevices 1, 2, and 3. Minioncontroller2
|
||||
will see configuration data for dumbdevices 4, 5, and 6, and minioncontroller3
|
||||
will be privy to dumbdevice7.
|
||||
handling.
|
||||
|
||||
Also, in general, proxy-minions are lightweight, so the machines that run them
|
||||
could conceivably control a large number of devices. The example above is just
|
||||
to illustrate that it is possible for the proxy services to be spread across
|
||||
could conceivably control a large number of devices. To run more than one proxy from
|
||||
a single machine, simply start an additional proxy process with ``--proxyid``
|
||||
set to the id to which you want the proxy to bind.
|
||||
It is possible for the proxy services to be spread across
|
||||
many machines if necessary, or intentionally run on machines that need to
|
||||
control devices because of some physical interface (e.g. i2c and serial above).
|
||||
Another reason to divide proxy services might be security. In more secure
|
||||
environments only certain machines may have a network path to certain devices.
|
||||
|
||||
Now our salt-minions know if they are supposed to spawn a proxy-minion process
|
||||
to control a particular device. That proxy-minion process will initiate
|
||||
a connection back to the master to enable control.
|
||||
|
||||
|
||||
.. _proxy_connection_module:
|
||||
|
||||
|
@ -167,17 +206,13 @@ a proxymodule object must implement the following functions:
|
|||
|
||||
``__virtual__()``: This function performs the same duty that it does for other
|
||||
types of Salt modules. Logic goes here to determine if the module can be
|
||||
loaded, checking for the presence of Python modules on which the proxy deepends.
|
||||
loaded, checking for the presence of Python modules on which the proxy depends.
|
||||
Returning ``False`` will prevent the module from loading.
|
||||
|
||||
``init(opts)``: Perform any initialization that the device needs. This is
|
||||
a good place to bring up a persistent connection to a device, or authenticate
|
||||
to create a persistent authorization token.
|
||||
|
||||
``id(opts)``: Returns a unique, unchanging id for the controlled device. This is
|
||||
the "name" of the device, and is used by the salt-master for targeting and key
|
||||
authentication.
|
||||
|
||||
``shutdown()``: Code to cleanly shut down or close a connection to
|
||||
a controlled device goes here. This function must exist, but can contain only
|
||||
the keyword ``pass`` if there is no shutdown logic required.
|
||||
|
@ -186,6 +221,13 @@ the keyword ``pass`` if there is no shutdown logic required.
|
|||
be defined in the proxymodule. The code for ``ping`` should contact the
|
||||
controlled device and make sure it is really available.
|
||||
|
||||
Pre 2015.8 the proxymodule also must have an ``id()`` function. 2015.8 and following don't use
|
||||
this function because the proxy's id is required on the command line.
|
||||
|
||||
``id(opts)``: Returns a unique, unchanging id for the controlled device. This is
|
||||
the "name" of the device, and is used by the salt-master for targeting and key
|
||||
authentication.
|
||||
|
||||
Here is an example proxymodule used to interface to a *very* simple REST
|
||||
server. Code for the server is in the `salt-contrib GitHub repository <https://github.com/saltstack/salt-contrib/proxyminion_rest_example>`_
|
||||
|
||||
|
@ -372,8 +414,9 @@ and status; "package" installation, and a ping.
|
|||
|
||||
Grains are data about minions. Most proxied devices will have a paltry amount
|
||||
of data as compared to a typical Linux server. By default, a proxy minion will
|
||||
have no grains set at all. Salt core code requires values for ``kernel``,
|
||||
``os``, and ``os_family``. To add them (and others) to your proxy minion for
|
||||
have several grains taken from the host. Salt core code requires values for ``kernel``,
|
||||
``os``, and ``os_family``--all of these are forced to be ``proxy`` for proxy-minions.
|
||||
To add others to your proxy minion for
|
||||
a particular device, create a file in salt/grains named [proxytype].py and place
|
||||
inside it the different functions that need to be run to collect the data you
|
||||
are interested in. Here's an example:
|
||||
|
@ -439,10 +482,8 @@ Here is an excerpt from a module that was modified to support proxy-minions:
|
|||
def ping():
|
||||
|
||||
if 'proxymodule' in __opts__:
|
||||
if 'ping' in __opts__['proxyobject'].__attr__():
|
||||
return __opts['proxyobject'].ping()
|
||||
else:
|
||||
return False
|
||||
ping_cmd = __opts__['proxymodule'].loaded_base_name + '.ping'
|
||||
return __opts__['proxymodule'][ping_cmd]()
|
||||
else:
|
||||
return True
|
||||
|
File diff suppressed because it is too large
Load diff
|
@ -138,3 +138,8 @@ instead.
|
|||
|
||||
- The use of ``delim`` was removed from the following functions in the ``match``
|
||||
execution module: ``pillar_pcre``, ``pillar``, ``grain_pcre``,
|
||||
|
||||
Known Issues
|
||||
============
|
||||
|
||||
- The TCP transport does not function on FreeBSD.
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
.. toctree::
|
||||
|
||||
syndic
|
||||
proxyminion/index
|
|
@ -233,9 +233,9 @@ Now you can query the available version of Firefox using the Salt pkg module.
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.available_version Firefox
|
||||
salt '*' pkg.available_version firefox
|
||||
|
||||
{'Firefox': {'15.0.1': 'Mozilla Firefox 15.0.1 (x86 en-US)',
|
||||
{'firefox': {'15.0.1': 'Mozilla Firefox 15.0.1 (x86 en-US)',
|
||||
'16.0.2': 'Mozilla Firefox 16.0.2 (x86 en-US)',
|
||||
'17.0.1': 'Mozilla Firefox 17.0.1 (x86 en-US)'}}
|
||||
|
||||
|
@ -245,13 +245,13 @@ by single quotes.
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.install 'Firefox'
|
||||
salt '*' pkg.install 'firefox'
|
||||
|
||||
The above line will install the latest version of Firefox.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.install 'Firefox' version=16.0.2
|
||||
salt '*' pkg.install 'firefox' version=16.0.2
|
||||
|
||||
The above line will install version 16.0.2 of Firefox.
|
||||
|
||||
|
@ -273,9 +273,9 @@ Uninstall software using the pkg module:
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.remove 'Firefox'
|
||||
salt '*' pkg.remove 'firefox'
|
||||
|
||||
salt '*' pkg.purge 'Firefox'
|
||||
salt '*' pkg.purge 'firefox'
|
||||
|
||||
``pkg.purge`` just executes ``pkg.remove`` on Windows. At some point in the
|
||||
future ``pkg.purge`` may direct the installer to remove all configs and
|
||||
|
@ -316,7 +316,7 @@ this option was called ``win_gitrepos`` in Salt versions prior to 2015.8.0).
|
|||
|
||||
Use the :py:func:`winrepo.update_git_repos
|
||||
<salt.runners.winrepo.update_git_repos>` runner to clone/update the configured
|
||||
reops, then use :py:func:`winrepo.genrepo <salt.runners.winrepo.genrepo>`
|
||||
repos, then use :py:func:`winrepo.genrepo <salt.runners.winrepo.genrepo>`
|
||||
runner to compile the repository cache. Finally, use :py:func:`pkg.refresh_db
|
||||
<salt.modules.win_pkg.refresh_db>` on each minion to have them update their
|
||||
copy of the repository cache. Command examples are as follows:
|
||||
|
|
|
@ -73,7 +73,10 @@ start() {
|
|||
RETVAL=1
|
||||
else
|
||||
daemon --pidfile=$PID_FILE --check $SERVICE $SALTAPI $CONFIG_ARGS
|
||||
RETVAL=0
|
||||
RETVAL=$?
|
||||
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/$SERVICE
|
||||
echo
|
||||
return $RETVAL
|
||||
fi
|
||||
fi
|
||||
RETVAL=$?
|
||||
|
@ -97,6 +100,10 @@ stop() {
|
|||
fi
|
||||
else
|
||||
killproc $PROCESS
|
||||
RETVAL=$?
|
||||
echo
|
||||
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/$SERVICE
|
||||
return $RETVAL
|
||||
fi
|
||||
RETVAL=$?
|
||||
echo
|
||||
|
|
|
@ -64,6 +64,10 @@ start() {
|
|||
fi
|
||||
else
|
||||
daemon --check $SERVICE $SALTMASTER -d $MASTER_ARGS
|
||||
RETVAL=$?
|
||||
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/$SERVICE
|
||||
echo
|
||||
return $RETVAL
|
||||
fi
|
||||
RETVAL=$?
|
||||
echo
|
||||
|
@ -86,6 +90,10 @@ stop() {
|
|||
fi
|
||||
else
|
||||
killproc $PROCESS
|
||||
RETVAL=$?
|
||||
echo
|
||||
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/$SERVICE
|
||||
return $RETVAL
|
||||
fi
|
||||
RETVAL=$?
|
||||
echo
|
||||
|
|
|
@ -68,7 +68,11 @@ start() {
|
|||
RETVAL=$?
|
||||
echo -n "already running"
|
||||
else
|
||||
daemon --check $SERVICE $SALTMINION -d $MINION_ARGS
|
||||
daemon --check $SERVICE $SALTMINION -d $MINION_ARGS
|
||||
RETVAL=$?
|
||||
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/$SERVICE
|
||||
echo
|
||||
return $RETVAL
|
||||
fi
|
||||
fi
|
||||
RETVAL=$?
|
||||
|
@ -94,6 +98,7 @@ stop() {
|
|||
else
|
||||
killproc $PROCESS
|
||||
RETVAL=$?
|
||||
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/$SERVICE
|
||||
# tidy up any rogue processes:
|
||||
PROCS=`ps -ef | grep "$SALTMINION" | grep -v grep | awk '{print $2}'`
|
||||
if [ -n "$PROCS" ]; then
|
||||
|
|
|
@ -65,6 +65,10 @@ start() {
|
|||
fi
|
||||
else
|
||||
daemon --check $SERVICE $SALTSYNDIC -d $SYNDIC_ARGS
|
||||
RETVAL=$?
|
||||
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/$SERVICE
|
||||
echo
|
||||
return $RETVAL
|
||||
fi
|
||||
RETVAL=$?
|
||||
echo
|
||||
|
@ -87,6 +91,10 @@ stop() {
|
|||
fi
|
||||
else
|
||||
killproc $PROCESS
|
||||
RETVAL=$?
|
||||
echo
|
||||
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/$SERVICE
|
||||
return $RETVAL
|
||||
fi
|
||||
RETVAL=$?
|
||||
echo
|
||||
|
|
|
@ -198,6 +198,7 @@ class Minion(parsers.MinionOptionParser): # pylint: disable=no-init
|
|||
confd = os.path.join(
|
||||
os.path.dirname(self.config['conf_file']), 'minion.d'
|
||||
)
|
||||
|
||||
v_dirs = [
|
||||
self.config['pki_dir'],
|
||||
self.config['cachedir'],
|
||||
|
@ -205,11 +206,13 @@ class Minion(parsers.MinionOptionParser): # pylint: disable=no-init
|
|||
self.config['extension_modules'],
|
||||
confd,
|
||||
]
|
||||
|
||||
if self.config.get('transport') == 'raet':
|
||||
v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted'))
|
||||
v_dirs.append(os.path.join(self.config['pki_dir'], 'pending'))
|
||||
v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected'))
|
||||
v_dirs.append(os.path.join(self.config['cachedir'], 'raet'))
|
||||
|
||||
verify_env(
|
||||
v_dirs,
|
||||
self.config['user'],
|
||||
|
@ -319,12 +322,13 @@ class Minion(parsers.MinionOptionParser): # pylint: disable=no-init
|
|||
# pylint: enable=no-member
|
||||
|
||||
|
||||
class ProxyMinion(parsers.MinionOptionParser): # pylint: disable=no-init
|
||||
class ProxyMinion(parsers.ProxyMinionOptionParser): # pylint: disable=no-init
|
||||
'''
|
||||
Create a proxy minion server
|
||||
'''
|
||||
|
||||
# pylint: disable=no-member
|
||||
def prepare(self, proxydetails):
|
||||
def prepare(self):
|
||||
'''
|
||||
Run the preparation sequence required to start a salt minion.
|
||||
|
||||
|
@ -334,6 +338,13 @@ class ProxyMinion(parsers.MinionOptionParser): # pylint: disable=no-init
|
|||
'''
|
||||
self.parse_args()
|
||||
|
||||
if not self.values.proxyid:
|
||||
raise SaltSystemExit('salt-proxy requires --proxyid')
|
||||
|
||||
# Proxies get their ID from the command line. This may need to change in
|
||||
# the future.
|
||||
self.config['id'] = self.values.proxyid
|
||||
|
||||
try:
|
||||
if self.config['verify_env']:
|
||||
confd = self.config.get('default_include')
|
||||
|
@ -353,32 +364,43 @@ class ProxyMinion(parsers.MinionOptionParser): # pylint: disable=no-init
|
|||
confd = os.path.join(
|
||||
os.path.dirname(self.config['conf_file']), 'minion.d'
|
||||
)
|
||||
|
||||
v_dirs = [
|
||||
self.config['pki_dir'],
|
||||
self.config['cachedir'],
|
||||
self.config['sock_dir'],
|
||||
self.config['extension_modules'],
|
||||
confd,
|
||||
]
|
||||
|
||||
if self.config.get('transport') == 'raet':
|
||||
v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted'))
|
||||
v_dirs.append(os.path.join(self.config['pki_dir'], 'pending'))
|
||||
v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected'))
|
||||
v_dirs.append(os.path.join(self.config['cachedir'], 'raet'))
|
||||
|
||||
verify_env(
|
||||
[
|
||||
self.config['pki_dir'],
|
||||
self.config['cachedir'],
|
||||
self.config['sock_dir'],
|
||||
self.config['extension_modules'],
|
||||
confd,
|
||||
],
|
||||
v_dirs,
|
||||
self.config['user'],
|
||||
permissive=self.config['permissive_pki_access'],
|
||||
pki_dir=self.config['pki_dir'],
|
||||
)
|
||||
if 'proxy_log' in proxydetails:
|
||||
logfile = proxydetails['proxy_log']
|
||||
if 'proxy_log' in self.config:
|
||||
logfile = self.config['proxy_log']
|
||||
else:
|
||||
logfile = None
|
||||
logfile = self.config['log_file']
|
||||
if logfile is not None and not logfile.startswith(('tcp://',
|
||||
'udp://',
|
||||
'file://')):
|
||||
# Logfile is not using Syslog, verify
|
||||
current_umask = os.umask(0o027)
|
||||
verify_files([logfile], self.config['user'])
|
||||
os.umask(current_umask)
|
||||
|
||||
except OSError as err:
|
||||
logger.exception('Failed to prepare salt environment')
|
||||
self.shutdown(err.errno)
|
||||
|
||||
self.config['proxy'] = proxydetails
|
||||
self.setup_logfile_logger()
|
||||
logger.info(
|
||||
'Setting up a Salt Proxy Minion "{0}"'.format(
|
||||
|
@ -386,20 +408,26 @@ class ProxyMinion(parsers.MinionOptionParser): # pylint: disable=no-init
|
|||
)
|
||||
)
|
||||
migrations.migrate_paths(self.config)
|
||||
# Late import so logging works correctly
|
||||
import salt.minion
|
||||
# If the minion key has not been accepted, then Salt enters a loop
|
||||
# waiting for it, if we daemonize later then the minion could halt
|
||||
# the boot process waiting for a key to be accepted on the master.
|
||||
# This is the latest safe place to daemonize
|
||||
self.daemonize_if_required()
|
||||
self.set_pidfile()
|
||||
if isinstance(self.config.get('master'), list):
|
||||
self.minion = salt.minion.MultiMinion(self.config)
|
||||
else:
|
||||
# TODO: AIO core is separate from transport
|
||||
if self.config['transport'].lower() in ('zeromq', 'tcp'):
|
||||
# Late import so logging works correctly
|
||||
import salt.minion
|
||||
# If the minion key has not been accepted, then Salt enters a loop
|
||||
# waiting for it, if we daemonize later then the minion could halt
|
||||
# the boot process waiting for a key to be accepted on the master.
|
||||
# This is the latest safe place to daemonize
|
||||
self.daemonize_if_required()
|
||||
self.set_pidfile()
|
||||
# TODO Proxy minions don't currently support failover
|
||||
self.minion = salt.minion.ProxyMinion(self.config)
|
||||
else:
|
||||
# For proxy minions, this doesn't work yet.
|
||||
import salt.daemons.flo
|
||||
self.daemonize_if_required()
|
||||
self.set_pidfile()
|
||||
self.minion = salt.daemons.flo.IofloMinion(self.config)
|
||||
|
||||
def start(self, proxydetails):
|
||||
def start(self):
|
||||
'''
|
||||
Start the actual proxy minion.
|
||||
|
||||
|
@ -409,10 +437,11 @@ class ProxyMinion(parsers.MinionOptionParser): # pylint: disable=no-init
|
|||
|
||||
NOTE: Run any required code before calling `super()`.
|
||||
'''
|
||||
self.prepare(proxydetails)
|
||||
try:
|
||||
self.minion.tune_in()
|
||||
logger.info('The proxy minion is starting up')
|
||||
self.prepare()
|
||||
if check_user(self.config['user']):
|
||||
logger.info('The proxy minion is starting up')
|
||||
self.minion.tune_in()
|
||||
except (KeyboardInterrupt, SaltSystemExit) as exc:
|
||||
logger.warn('Stopping the Salt Proxy Minion')
|
||||
if isinstance(exc, KeyboardInterrupt):
|
||||
|
@ -426,7 +455,7 @@ class ProxyMinion(parsers.MinionOptionParser): # pylint: disable=no-init
|
|||
'''
|
||||
If sub-classed, run any shutdown operations on this method.
|
||||
'''
|
||||
if 'proxymodule' in self.minion.opts:
|
||||
if hasattr(self, 'minion') and 'proxymodule' in self.minion.opts:
|
||||
proxy_fn = self.minion.opts['proxymodule'].loaded_base_name + '.shutdown'
|
||||
self.minion.opts['proxymodule'][proxy_fn](self.minion.opts)
|
||||
logger.info('The proxy minion is shut down')
|
||||
|
|
|
@ -244,7 +244,7 @@ class LocalClient(object):
|
|||
'No command was sent, no jid was assigned.')
|
||||
return {}
|
||||
else:
|
||||
self.event.subscribe_regex('^syndic/.*/{0}'.format(pub_data['jid']))
|
||||
self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex')
|
||||
|
||||
self.event.subscribe('salt/job/{0}'.format(pub_data['jid']))
|
||||
|
||||
|
@ -807,9 +807,8 @@ class LocalClient(object):
|
|||
# TODO: tests!!
|
||||
def get_returns_no_block(
|
||||
self,
|
||||
jid,
|
||||
tags_regex=None
|
||||
):
|
||||
tag,
|
||||
match_type=None):
|
||||
'''
|
||||
Raw function to just return events of jid excluding timeout logic
|
||||
|
||||
|
@ -821,9 +820,8 @@ class LocalClient(object):
|
|||
'''
|
||||
|
||||
while True:
|
||||
# TODO(driskell): This was previously completely nonblocking.
|
||||
# Should get_event have a nonblock option?
|
||||
raw = self.event.get_event(wait=0.01, tag='salt/job/{0}'.format(jid), tags_regex=tags_regex, full=True)
|
||||
# CHANGED(driskell): This was previously completely nonblocking.
|
||||
raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True)
|
||||
yield raw
|
||||
|
||||
def get_iter_returns(
|
||||
|
@ -869,9 +867,9 @@ class LocalClient(object):
|
|||
# iterator for this job's return
|
||||
if self.opts['order_masters']:
|
||||
# If we are a MoM, we need to gather expected minions from downstreams masters.
|
||||
ret_iter = self.get_returns_no_block(jid, tags_regex=['^syndic/.*/{0}'.format(jid)])
|
||||
ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex')
|
||||
else:
|
||||
ret_iter = self.get_returns_no_block(jid)
|
||||
ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid))
|
||||
# iterator for the info of this job
|
||||
jinfo_iter = []
|
||||
timeout_at = time.time() + timeout
|
||||
|
@ -944,7 +942,7 @@ class LocalClient(object):
|
|||
if 'jid' not in jinfo:
|
||||
jinfo_iter = []
|
||||
else:
|
||||
jinfo_iter = self.get_returns_no_block(jinfo['jid'])
|
||||
jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid']))
|
||||
timeout_at = time.time() + self.opts['gather_job_timeout']
|
||||
# if you are a syndic, wait a little longer
|
||||
if self.opts['order_masters']:
|
||||
|
|
|
@ -968,10 +968,10 @@ ARGS = {9}\n'''.format(self.minion_config,
|
|||
pass
|
||||
|
||||
# Execute shim
|
||||
ret = self.shell.exec_cmd('/bin/sh $HOME/{0}'.format(target_shim_file))
|
||||
ret = self.shell.exec_cmd('/bin/sh \'$HOME/{0}\''.format(target_shim_file))
|
||||
|
||||
# Remove shim from target system
|
||||
self.shell.exec_cmd('rm $HOME/{0}'.format(target_shim_file))
|
||||
self.shell.exec_cmd('rm \'$HOME/{0}\''.format(target_shim_file))
|
||||
|
||||
return ret
|
||||
|
||||
|
|
|
@ -1655,7 +1655,6 @@ def request_instance(vm_=None, call=None):
|
|||
}
|
||||
try:
|
||||
rd_data = aws.query(rd_params,
|
||||
return_root=True,
|
||||
location=get_location(),
|
||||
provider=get_provider(),
|
||||
opts=__opts__,
|
||||
|
@ -2364,7 +2363,7 @@ def create(vm_=None, call=None):
|
|||
'volumes': volumes,
|
||||
'zone': ret['placement']['availabilityZone'],
|
||||
'instance_id': ret['instanceId'],
|
||||
'del_all_vols_on_destroy': vm_.get('set_del_all_vols_on_destroy', False)
|
||||
'del_all_vols_on_destroy': vm_.get('del_all_vols_on_destroy', False)
|
||||
},
|
||||
call='action'
|
||||
)
|
||||
|
@ -3851,38 +3850,52 @@ def delete_keypair(kwargs=None, call=None):
|
|||
|
||||
def create_snapshot(kwargs=None, call=None, wait_to_finish=False):
|
||||
'''
|
||||
Create a snapshot
|
||||
Create a snapshot.
|
||||
|
||||
volume_id
|
||||
The ID of the Volume from which to create a snapshot.
|
||||
|
||||
description
|
||||
The optional description of the snapshot.
|
||||
|
||||
CLI Exampe:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f create_snapshot my-ec2-config volume_id=vol-351d8826
|
||||
salt-cloud -f create_snapshot my-ec2-config volume_id=vol-351d8826 \\
|
||||
description="My Snapshot Description"
|
||||
'''
|
||||
if call != 'function':
|
||||
log.error(
|
||||
raise SaltCloudSystemExit(
|
||||
'The create_snapshot function must be called with -f '
|
||||
'or --function.'
|
||||
)
|
||||
return False
|
||||
|
||||
if 'volume_id' not in kwargs:
|
||||
log.error('A volume_id must be specified to create a snapshot.')
|
||||
return False
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
|
||||
if 'description' not in kwargs:
|
||||
kwargs['description'] = ''
|
||||
volume_id = kwargs.get('volume_id', None)
|
||||
description = kwargs.get('description', '')
|
||||
|
||||
params = {'Action': 'CreateSnapshot'}
|
||||
if volume_id is None:
|
||||
raise SaltCloudSystemExit(
|
||||
'A volume_id must be specified to create a snapshot.'
|
||||
)
|
||||
|
||||
if 'volume_id' in kwargs:
|
||||
params['VolumeId'] = kwargs['volume_id']
|
||||
|
||||
if 'description' in kwargs:
|
||||
params['Description'] = kwargs['description']
|
||||
params = {'Action': 'CreateSnapshot',
|
||||
'VolumeId': volume_id,
|
||||
'Description': description}
|
||||
|
||||
log.debug(params)
|
||||
|
||||
data = aws.query(params,
|
||||
return_url=True,
|
||||
return_root=True,
|
||||
location=get_location(),
|
||||
provider=get_provider(),
|
||||
opts=__opts__,
|
||||
sigver='4')
|
||||
sigver='4')[0]
|
||||
|
||||
r_data = {}
|
||||
for d in data:
|
||||
|
@ -3898,7 +3911,7 @@ def create_snapshot(kwargs=None, call=None, wait_to_finish=False):
|
|||
argument_being_watched='status',
|
||||
required_argument_response='completed')
|
||||
|
||||
return data
|
||||
return r_data
|
||||
|
||||
|
||||
def delete_snapshot(kwargs=None, call=None):
|
||||
|
|
|
@ -855,8 +855,8 @@ def list_nodes(call=None, **kwargs):
|
|||
'image': server_tmp['image']['id'],
|
||||
'size': server_tmp['flavor']['id'],
|
||||
'state': server_tmp['state'],
|
||||
'private_ips': public,
|
||||
'public_ips': private,
|
||||
'private_ips': private,
|
||||
'public_ips': public,
|
||||
}
|
||||
return ret
|
||||
|
||||
|
|
|
@ -348,7 +348,7 @@ class AutoKey(object):
|
|||
autosign_dir = os.path.join(self.opts['pki_dir'], 'minions_autosign')
|
||||
|
||||
# cleanup expired files
|
||||
expire_minutes = self.opts.get('autosign_expire_minutes', 10)
|
||||
expire_minutes = self.opts.get('autosign_timeout', 120)
|
||||
if expire_minutes > 0:
|
||||
min_time = time.time() - (60 * int(expire_minutes))
|
||||
for root, dirs, filenames in os.walk(autosign_dir):
|
||||
|
|
|
@ -21,6 +21,8 @@ import logging
|
|||
import locale
|
||||
import salt.exceptions
|
||||
|
||||
__proxyenabled__ = ['*']
|
||||
|
||||
# Extend the default list of supported distros. This will be used for the
|
||||
# /etc/DISTRO-release checking that is part of platform.linux_distribution()
|
||||
from platform import _supported_dists
|
||||
|
@ -1032,7 +1034,13 @@ def os_data():
|
|||
grains['kernelrelease'], version, grains['cpuarch'], _) = platform.uname()
|
||||
# pylint: enable=unpacking-non-sequence
|
||||
|
||||
if salt.utils.is_windows():
|
||||
if salt.utils.is_proxy():
|
||||
grains['kernel'] = 'proxy'
|
||||
grains['kernelrelease'] = 'proxy'
|
||||
grains['osrelease'] = 'proxy'
|
||||
grains['os'] = 'proxy'
|
||||
grains['os_family'] = 'proxy'
|
||||
elif salt.utils.is_windows():
|
||||
with salt.utils.winapi.Com():
|
||||
wmi_c = wmi.WMI()
|
||||
grains['osrelease'] = grains['kernelrelease']
|
||||
|
|
|
@ -44,9 +44,11 @@ def defaults():
|
|||
|
||||
def facts():
|
||||
log.debug('----------- Trying to get facts')
|
||||
facts = __opts__['proxymodule']['junos.facts']()
|
||||
facts['version_info'] = 'override'
|
||||
return facts
|
||||
if 'proxymodule' in __opts__:
|
||||
facts = __opts__['proxymodule']['junos.facts']()
|
||||
facts['version_info'] = 'override'
|
||||
return facts
|
||||
return None
|
||||
|
||||
|
||||
def os_family():
|
||||
|
|
|
@ -845,6 +845,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
|||
whitelist=None,
|
||||
virtual_enable=True,
|
||||
): # pylint: disable=W0231
|
||||
|
||||
self.opts = self.__prep_mod_opts(opts)
|
||||
|
||||
self.module_dirs = module_dirs
|
||||
|
@ -1182,10 +1183,10 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
|||
# containing the names of the proxy types that the module supports.
|
||||
#
|
||||
# Render modules and state modules are OK though
|
||||
if 'proxy' in self.opts:
|
||||
if self.tag not in ['render', 'states']:
|
||||
if 'proxymodule' in self.opts:
|
||||
if self.tag not in ['render', 'states', 'utils']:
|
||||
if not hasattr(mod, '__proxyenabled__') or \
|
||||
(self.opts['proxy']['proxytype'] not in mod.__proxyenabled__ and
|
||||
(self.opts['proxymodule'].loaded_base_name not in mod.__proxyenabled__ and
|
||||
'*' not in mod.__proxyenabled__):
|
||||
err_string = 'not a proxy_minion enabled module'
|
||||
self.missing_modules[module_name] = err_string
|
||||
|
|
|
@ -13,6 +13,7 @@ import time
|
|||
import errno
|
||||
import logging
|
||||
import tempfile
|
||||
import traceback
|
||||
|
||||
# Import third party libs
|
||||
import zmq
|
||||
|
@ -1715,9 +1716,6 @@ class ClearFuncs(object):
|
|||
clear_load['groups'] = groups
|
||||
return self.loadauth.mk_token(clear_load)
|
||||
except Exception as exc:
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
type_, value_, traceback_ = sys.exc_info()
|
||||
log.error(
|
||||
'Exception occurred while authenticating: {0}'.format(exc)
|
||||
|
@ -1874,9 +1872,6 @@ class ClearFuncs(object):
|
|||
return ''
|
||||
|
||||
except Exception as exc:
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
type_, value_, traceback_ = sys.exc_info()
|
||||
log.error(
|
||||
'Exception occurred while authenticating: {0}'.format(exc)
|
||||
|
|
146
salt/minion.py
146
salt/minion.py
|
@ -27,7 +27,6 @@ if six.PY3:
|
|||
else:
|
||||
import salt.ext.ipaddress as ipaddress
|
||||
from salt.ext.six.moves import range
|
||||
from salt.utils import reinit_crypto
|
||||
# pylint: enable=no-name-in-module,redefined-builtin
|
||||
|
||||
# Import third party libs
|
||||
|
@ -317,10 +316,7 @@ def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
|
|||
_args.append(arg)
|
||||
|
||||
if invalid_kwargs and not ignore_invalid:
|
||||
raise SaltInvocationError(
|
||||
'The following keyword arguments are not valid: {0}'
|
||||
.format(', '.join(invalid_kwargs))
|
||||
)
|
||||
salt.utils.invalid_kwargs(invalid_kwargs)
|
||||
|
||||
if argspec.keywords and isinstance(data, dict):
|
||||
# this function accepts **kwargs, pack in the publish data
|
||||
|
@ -554,7 +550,6 @@ class Minion(MinionBase):
|
|||
This class instantiates a minion, runs connections for a minion,
|
||||
and loads all of the functions into the minion
|
||||
'''
|
||||
|
||||
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None): # pylint: disable=W0231
|
||||
'''
|
||||
Pass in the options dict
|
||||
|
@ -591,13 +586,15 @@ class Minion(MinionBase):
|
|||
)
|
||||
# Late setup the of the opts grains, so we can log from the grains
|
||||
# module
|
||||
self.opts['grains'] = salt.loader.grains(opts)
|
||||
if 'proxyid' not in self.opts:
|
||||
self.opts['grains'] = salt.loader.grains(opts)
|
||||
|
||||
# TODO: remove?
|
||||
def sync_connect_master(self):
|
||||
'''
|
||||
Block until we are connected to a master
|
||||
'''
|
||||
log.debug("sync_connect_master")
|
||||
self._connect_master_future = self.connect_master()
|
||||
# finish connecting to master
|
||||
self._connect_master_future.add_done_callback(lambda f: self.io_loop.stop())
|
||||
|
@ -642,6 +639,7 @@ class Minion(MinionBase):
|
|||
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
|
||||
uid = salt.utils.get_uid(user=self.opts.get('user', None))
|
||||
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
|
||||
|
||||
self.schedule = salt.utils.schedule.Schedule(
|
||||
self.opts,
|
||||
self.functions,
|
||||
|
@ -676,24 +674,6 @@ class Minion(MinionBase):
|
|||
|
||||
self.grains_cache = self.opts['grains']
|
||||
|
||||
if 'proxy' in self.opts['pillar']:
|
||||
log.info('I am {0} and I need to start some proxies for {1}'.format(self.opts['id'],
|
||||
self.opts['pillar']['proxy'].keys()))
|
||||
for p in self.opts['pillar']['proxy']:
|
||||
log.info('Starting {0} proxy.'.format(p))
|
||||
pid = os.fork()
|
||||
if pid > 0:
|
||||
reinit_crypto()
|
||||
continue
|
||||
else:
|
||||
reinit_crypto()
|
||||
proxyminion = salt.cli.daemons.ProxyMinion(self.opts)
|
||||
proxyminion.start(self.opts['pillar']['proxy'][p])
|
||||
self.clean_die(signal.SIGTERM, None)
|
||||
else:
|
||||
log.info('I am {0} and I am not supposed to start any proxies. '
|
||||
'(Likely not a problem)'.format(self.opts['id']))
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def eval_master(self,
|
||||
opts,
|
||||
|
@ -1558,6 +1538,7 @@ class Minion(MinionBase):
|
|||
del self.pub_channel
|
||||
self._connect_master_future = self.connect_master()
|
||||
self.block_until_connected() # TODO: remove
|
||||
self.functions, self.returners, self.function_errors = self._load_modules()
|
||||
self._fire_master_minion_start()
|
||||
log.info('Minion is ready to receive requests!')
|
||||
|
||||
|
@ -2483,62 +2464,44 @@ class ProxyMinion(Minion):
|
|||
This class instantiates a 'proxy' minion--a minion that does not manipulate
|
||||
the host it runs on, but instead manipulates a device that cannot run a minion.
|
||||
'''
|
||||
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None): # pylint: disable=W0231
|
||||
'''
|
||||
Pass in the options dict
|
||||
'''
|
||||
self._running = None
|
||||
self.win_proc = []
|
||||
self.loaded_base_name = loaded_base_name
|
||||
|
||||
# Warn if ZMQ < 3.2
|
||||
if HAS_ZMQ:
|
||||
try:
|
||||
zmq_version_info = zmq.zmq_version_info()
|
||||
except AttributeError:
|
||||
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
|
||||
# using zmq.zmq_version() and build a version info tuple.
|
||||
zmq_version_info = tuple(
|
||||
[int(x) for x in zmq.zmq_version().split('.')]
|
||||
)
|
||||
if zmq_version_info < (3, 2):
|
||||
log.warning(
|
||||
'You have a version of ZMQ less than ZMQ 3.2! There are '
|
||||
'known connection keep-alive issues with ZMQ < 3.2 which '
|
||||
'may result in loss of contact with minions. Please '
|
||||
'upgrade your ZMQ!'
|
||||
)
|
||||
# Late setup the of the opts grains, so we can log from the grains
|
||||
# module
|
||||
opts['master'] = self.eval_master(opts,
|
||||
timeout,
|
||||
safe)
|
||||
fq_proxyname = opts['proxy']['proxytype']
|
||||
# Need to match the function signature of the other loader fns
|
||||
# which is def proxy(opts, functions, whitelist=None, loaded_base_name=None)
|
||||
# 'functions' for other loaders is a LazyLoader object
|
||||
# but since we are not needing to merge functions into another fn dictionary
|
||||
# we will pass 'None' in
|
||||
self.proxymodule = salt.loader.proxy(opts, None, loaded_base_name=fq_proxyname)
|
||||
opts['proxymodule'] = self.proxymodule
|
||||
opts['grains'] = salt.loader.grains(opts)
|
||||
opts['id'] = opts['proxymodule'][fq_proxyname+'.id'](opts)
|
||||
opts.update(resolve_dns(opts))
|
||||
self.opts = opts
|
||||
self.opts['pillar'] = salt.pillar.get_pillar(
|
||||
opts,
|
||||
opts['grains'],
|
||||
opts['id'],
|
||||
opts['environment'],
|
||||
pillarenv=opts.get('pillarenv'),
|
||||
# TODO: better name...
|
||||
@tornado.gen.coroutine
|
||||
def _post_master_init(self, master):
|
||||
'''
|
||||
Function to finish init after connecting to a master
|
||||
|
||||
This is primarily loading modules, pillars, etc. (since they need
|
||||
to know which master they connected to)
|
||||
'''
|
||||
log.debug("subclassed _post_master_init")
|
||||
self.opts['master'] = master
|
||||
|
||||
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
|
||||
self.opts,
|
||||
self.opts['grains'],
|
||||
self.opts['id'],
|
||||
self.opts['environment'],
|
||||
pillarenv=self.opts.get('pillarenv'),
|
||||
).compile_pillar()
|
||||
opts['proxymodule'][fq_proxyname+'.init'](opts)
|
||||
fq_proxyname = self.opts['pillar']['proxy']['proxytype']
|
||||
self.opts['proxy'] = self.opts['pillar']['proxy']
|
||||
|
||||
# We need to do this again, because we are going to throw out a lot of grains.
|
||||
self.opts['grains'] = salt.loader.grains(self.opts)
|
||||
|
||||
self.opts['proxymodule'] = salt.loader.proxy(self.opts, None, loaded_base_name=fq_proxyname)
|
||||
self.functions, self.returners, self.function_errors = self._load_modules()
|
||||
proxy_fn = self.opts['proxymodule'].loaded_base_name + '.init'
|
||||
self.opts['proxymodule'][proxy_fn](self.opts)
|
||||
|
||||
self.serial = salt.payload.Serial(self.opts)
|
||||
self.mod_opts = self._prep_mod_opts()
|
||||
self.matcher = Matcher(self.opts, self.functions)
|
||||
uid = salt.utils.get_uid(user=opts.get('user', None))
|
||||
self.proc_dir = get_proc_dir(opts['cachedir'], uid=uid)
|
||||
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
|
||||
uid = salt.utils.get_uid(user=self.opts.get('user', None))
|
||||
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
|
||||
|
||||
self.schedule = salt.utils.schedule.Schedule(
|
||||
self.opts,
|
||||
self.functions,
|
||||
|
@ -2551,25 +2514,24 @@ class ProxyMinion(Minion):
|
|||
'__mine_interval':
|
||||
{
|
||||
'function': 'mine.update',
|
||||
'minutes': opts['mine_interval'],
|
||||
'minutes': self.opts['mine_interval'],
|
||||
'jid_include': True,
|
||||
'maxrunning': 2
|
||||
}
|
||||
})
|
||||
}, persist=True)
|
||||
|
||||
# add master_alive job if enabled
|
||||
if self.opts['master_alive_interval'] > 0:
|
||||
self.schedule.add_job({
|
||||
'__master_alive':
|
||||
{
|
||||
'function': 'status.master',
|
||||
'seconds': self.opts['master_alive_interval'],
|
||||
'jid_include': True,
|
||||
'maxrunning': 1,
|
||||
'kwargs': {'master': self.opts['master'],
|
||||
'connected': True}
|
||||
}
|
||||
}, persist=True)
|
||||
|
||||
self.grains_cache = self.opts['grains']
|
||||
|
||||
# self._running = True
|
||||
|
||||
def _prep_mod_opts(self):
|
||||
'''
|
||||
Returns a copy of the opts with key bits stripped out
|
||||
'''
|
||||
return super(ProxyMinion, self)._prep_mod_opts()
|
||||
|
||||
def _load_modules(self, force_refresh=False, notify=False):
|
||||
'''
|
||||
Return the functions and the returners loaded up from the loader
|
||||
module
|
||||
'''
|
||||
return super(ProxyMinion, self)._load_modules(force_refresh=force_refresh, notify=notify)
|
||||
|
|
|
@ -231,12 +231,25 @@ def create_or_update_alarm(
|
|||
if isinstance(ok_actions, string_types):
|
||||
ok_actions = ok_actions.split(",")
|
||||
|
||||
# convert action names into ARN's
|
||||
alarm_actions = convert_to_arn(alarm_actions, region, key, keyid, profile)
|
||||
insufficient_data_actions = convert_to_arn(
|
||||
insufficient_data_actions, region, key, keyid, profile
|
||||
)
|
||||
ok_actions = convert_to_arn(ok_actions, region, key, keyid, profile)
|
||||
# convert provided action names into ARN's
|
||||
if alarm_actions:
|
||||
alarm_actions = convert_to_arn(alarm_actions,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile)
|
||||
if insufficient_data_actions:
|
||||
insufficient_data_actions = convert_to_arn(insufficient_data_actions,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile)
|
||||
if ok_actions:
|
||||
ok_actions = convert_to_arn(ok_actions,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile)
|
||||
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
|
||||
|
|
|
@ -276,14 +276,20 @@ def list_tab(user):
|
|||
ret['special'].append(dat)
|
||||
elif line.startswith('#'):
|
||||
# It's a comment! Catch it!
|
||||
comment = line.lstrip('# ')
|
||||
comment_line = line.lstrip('# ')
|
||||
|
||||
# load the identifier if any
|
||||
if SALT_CRON_IDENTIFIER in comment:
|
||||
parts = comment.split(SALT_CRON_IDENTIFIER)
|
||||
comment = parts[0].rstrip()
|
||||
if SALT_CRON_IDENTIFIER in comment_line:
|
||||
parts = comment_line.split(SALT_CRON_IDENTIFIER)
|
||||
comment_line = parts[0].rstrip()
|
||||
# skip leading :
|
||||
if len(parts[1]) > 1:
|
||||
identifier = parts[1][1:]
|
||||
|
||||
if comment is None:
|
||||
comment = comment_line
|
||||
else:
|
||||
comment += '\n' + comment_line
|
||||
elif len(line.split()) > 5:
|
||||
# Appears to be a standard cron line
|
||||
comps = line.split()
|
||||
|
|
|
@ -35,11 +35,11 @@ def __virtual__():
|
|||
return False
|
||||
|
||||
|
||||
def _get_env(env):
|
||||
def _get_build_env(env):
|
||||
'''
|
||||
Get environment overrides dictionary to use in build process
|
||||
Get build environment overrides dictionary to use in build process
|
||||
'''
|
||||
env_override = ""
|
||||
env_override = ''
|
||||
if env is None:
|
||||
return env_override
|
||||
if not isinstance(env, dict):
|
||||
|
@ -52,6 +52,22 @@ def _get_env(env):
|
|||
return env_override
|
||||
|
||||
|
||||
def _get_repo_env(env):
|
||||
'''
|
||||
Get repo environment overrides dictionary to use in repo process
|
||||
'''
|
||||
env_options = ''
|
||||
if env is None:
|
||||
return env_options
|
||||
if not isinstance(env, dict):
|
||||
raise SaltInvocationError(
|
||||
'\'env\' must be a Python dictionary'
|
||||
)
|
||||
for key, value in env.items():
|
||||
env_options += '{0}\n'.format(value)
|
||||
return env_options
|
||||
|
||||
|
||||
def _create_pbuilders(env):
|
||||
'''
|
||||
Create the .pbuilder family of files in user's home directory
|
||||
|
@ -131,16 +147,16 @@ OTHERMIRROR="deb http://ftp.us.debian.org/debian/ testing main contrib non-free
|
|||
os.makedirs(pbuilder_hooksdir)
|
||||
|
||||
d05hook = os.path.join(pbuilder_hooksdir, 'D05apt-preferences')
|
||||
with open(d05hook, "w") as fow:
|
||||
with salt.utils.fopen(d05hook, 'w') as fow:
|
||||
fow.write('{0}'.format(hook_text))
|
||||
|
||||
pbuilderrc = os.path.join(home, '.pbuilderrc')
|
||||
with open(pbuilderrc, "w") as fow:
|
||||
with salt.utils.fopen(pbuilderrc, 'w') as fow:
|
||||
fow.write('{0}'.format(pbldrc_text))
|
||||
|
||||
env_overrides = _get_env(env)
|
||||
env_overrides = _get_build_env(env)
|
||||
if env_overrides and not env_overrides.isspace():
|
||||
with open(pbuilderrc, "a") as fow:
|
||||
with salt.utils.fopen(pbuilderrc, 'a') as fow:
|
||||
fow.write('{0}'.format(env_overrides))
|
||||
|
||||
|
||||
|
@ -319,7 +335,7 @@ def build(runas, tgt, dest_dir, spec, sources, deps, env, template, saltenv='bas
|
|||
return ret
|
||||
|
||||
|
||||
def make_repo(repodir):
|
||||
def make_repo(repodir, keyid=None, env=None):
|
||||
'''
|
||||
Given the repodir, create a Debian repository out of the dsc therein
|
||||
|
||||
|
@ -341,9 +357,18 @@ Pull: jessie
|
|||
os.makedirs(repoconf)
|
||||
|
||||
repoconfdist = os.path.join(repoconf, 'distributions')
|
||||
with open(repoconfdist, "w") as fow:
|
||||
with salt.utils.fopen(repoconfdist, 'w') as fow:
|
||||
fow.write('{0}'.format(repocfg_text))
|
||||
|
||||
if keyid is not None:
|
||||
with salt.utils.fopen(repoconfdist, 'a') as fow:
|
||||
fow.write('Signwith: {0}\n'.format(keyid))
|
||||
|
||||
repocfg_opts = _get_repo_env(env)
|
||||
repoconfopts = os.path.join(repoconf, 'options')
|
||||
with salt.utils.fopen(repoconfopts, 'w') as fow:
|
||||
fow.write('{0}'.format(repocfg_opts))
|
||||
|
||||
for debfile in os.listdir(repodir):
|
||||
if debfile.endswith('.changes'):
|
||||
cmd = 'reprepro -Vb . include jessie {0}'.format(os.path.join(repodir, debfile))
|
||||
|
|
3609
salt/modules/git.py
3609
salt/modules/git.py
File diff suppressed because it is too large
Load diff
|
@ -6,30 +6,30 @@ Module for handling openstack glance calls.
|
|||
:configuration: This module is not usable until the following are specified
|
||||
either in a pillar or in the minion's config file::
|
||||
|
||||
glance.user: admin
|
||||
glance.password: verybadpass
|
||||
glance.tenant: admin
|
||||
glance.insecure: False #(optional)
|
||||
glance.auth_url: 'http://127.0.0.1:5000/v2.0/'
|
||||
keystone.user: admin
|
||||
keystone.password: verybadpass
|
||||
keystone.tenant: admin
|
||||
keystone.insecure: False #(optional)
|
||||
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
|
||||
|
||||
If configuration for multiple openstack accounts is required, they can be
|
||||
set up as different configuration profiles:
|
||||
For example::
|
||||
|
||||
openstack1:
|
||||
glance.user: admin
|
||||
glance.password: verybadpass
|
||||
glance.tenant: admin
|
||||
glance.auth_url: 'http://127.0.0.1:5000/v2.0/'
|
||||
keystone.user: admin
|
||||
keystone.password: verybadpass
|
||||
keystone.tenant: admin
|
||||
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
|
||||
|
||||
openstack2:
|
||||
glance.user: admin
|
||||
glance.password: verybadpass
|
||||
glance.tenant: admin
|
||||
glance.auth_url: 'http://127.0.0.2:5000/v2.0/'
|
||||
keystone.user: admin
|
||||
keystone.password: verybadpass
|
||||
keystone.tenant: admin
|
||||
keystone.auth_url: 'http://127.0.0.2:5000/v2.0/'
|
||||
|
||||
With this configuration in place, any of the keystone functions can make use
|
||||
of a configuration profile by declaring it explicitly.
|
||||
With this configuration in place, any of the glance functions can
|
||||
make use of a configuration profile by declaring it explicitly.
|
||||
For example::
|
||||
|
||||
salt '*' glance.image_list profile=openstack1
|
||||
|
@ -37,6 +37,7 @@ Module for handling openstack glance calls.
|
|||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import re
|
||||
|
||||
# Import third party libs
|
||||
#import salt.ext.six as six
|
||||
|
@ -67,10 +68,11 @@ except ImportError:
|
|||
pass
|
||||
|
||||
# Workaround, as the Glance API v2 requires you to
|
||||
# already have a keystone token
|
||||
# already have a keystone session token
|
||||
HAS_KEYSTONE = False
|
||||
try:
|
||||
from keystoneclient.v2_0 import client as kstone
|
||||
#import keystoneclient.apiclient.exceptions as kstone_exc
|
||||
HAS_KEYSTONE = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
@ -104,61 +106,60 @@ def _auth(profile=None, api_version=2, **connection_args):
|
|||
'''
|
||||
|
||||
if profile:
|
||||
prefix = profile + ":glance."
|
||||
prefix = profile + ":keystone."
|
||||
else:
|
||||
prefix = "glance."
|
||||
prefix = "keystone."
|
||||
|
||||
# look in connection_args first, then default to config file
|
||||
def get(key, default=None):
|
||||
'''
|
||||
TODO: Add docstring.
|
||||
Checks connection_args, then salt-minion config,
|
||||
falls back to specified default value.
|
||||
'''
|
||||
return connection_args.get('connection_' + key,
|
||||
__salt__['config.get'](prefix + key, default))
|
||||
|
||||
user = get('user', 'admin')
|
||||
password = get('password', 'ADMIN')
|
||||
password = get('password', None)
|
||||
tenant = get('tenant', 'admin')
|
||||
tenant_id = get('tenant_id')
|
||||
auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0/')
|
||||
auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0')
|
||||
insecure = get('insecure', False)
|
||||
token = get('token')
|
||||
admin_token = get('token')
|
||||
region = get('region')
|
||||
endpoint = get('endpoint', 'http://127.0.0.1:9292/')
|
||||
ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/')
|
||||
g_endpoint_url = __salt__['keystone.endpoint_get']('glance')
|
||||
# The trailing 'v2' causes URLs like thise one:
|
||||
# http://127.0.0.1:9292/v2/v1/images
|
||||
g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl'])
|
||||
|
||||
if token:
|
||||
kwargs = {'token': token,
|
||||
'username': user,
|
||||
'endpoint_url': endpoint,
|
||||
'auth_url': auth_url,
|
||||
'region_name': region,
|
||||
'tenant_name': tenant}
|
||||
else:
|
||||
if admin_token and api_version != 1 and not password:
|
||||
# If we had a password we could just
|
||||
# ignore the admin-token and move on...
|
||||
raise SaltInvocationError('Only can use keystone admin token ' +
|
||||
'with Glance API v1')
|
||||
elif password:
|
||||
# Can't use the admin-token anyway
|
||||
kwargs = {'username': user,
|
||||
'password': password,
|
||||
'tenant_id': tenant_id,
|
||||
'auth_url': auth_url,
|
||||
'endpoint_url': g_endpoint_url,
|
||||
'region_name': region,
|
||||
'tenant_name': tenant}
|
||||
# 'insecure' keyword not supported by all v2.0 keystone clients
|
||||
# this ensures it's only passed in when defined
|
||||
if insecure:
|
||||
kwargs['insecure'] = True
|
||||
|
||||
if token:
|
||||
log.debug('Calling glanceclient.client.Client(' +
|
||||
'{0}, {1}, **{2})'.format(api_version, endpoint, kwargs))
|
||||
try:
|
||||
return client.Client(api_version, endpoint, **kwargs)
|
||||
except exc.HTTPUnauthorized:
|
||||
kwargs.pop('token')
|
||||
kwargs['password'] = password
|
||||
log.warn('Supplied token is invalid, trying to ' +
|
||||
'get a new one using username and password.')
|
||||
elif api_version == 1 and admin_token:
|
||||
kwargs = {'token': admin_token,
|
||||
'auth_url': auth_url,
|
||||
'endpoint_url': g_endpoint_url}
|
||||
else:
|
||||
raise SaltInvocationError('No credentials to authenticate with.')
|
||||
|
||||
if HAS_KEYSTONE:
|
||||
log.debug('Calling keystoneclient.v2_0.client.Client(' +
|
||||
'{0}, **{1})'.format(endpoint, kwargs))
|
||||
'{0}, **{1})'.format(ks_endpoint, kwargs))
|
||||
keystone = kstone.Client(**kwargs)
|
||||
log.debug(help(keystone.get_token))
|
||||
kwargs['token'] = keystone.get_token(keystone.session)
|
||||
|
@ -167,8 +168,11 @@ def _auth(profile=None, api_version=2, **connection_args):
|
|||
# logging it anyway when in debug-mode
|
||||
kwargs.pop('password')
|
||||
log.debug('Calling glanceclient.client.Client(' +
|
||||
'{0}, {1}, **{2})'.format(api_version, endpoint, kwargs))
|
||||
return client.Client(api_version, endpoint, **kwargs)
|
||||
'{0}, {1}, **{2})'.format(api_version,
|
||||
g_endpoint_url, kwargs))
|
||||
# may raise exc.HTTPUnauthorized, exc.HTTPNotFound
|
||||
# but we deal with those elsewhere
|
||||
return client.Client(api_version, g_endpoint_url, **kwargs)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
"Can't retrieve a auth_token without keystone")
|
||||
|
@ -302,25 +306,38 @@ def image_delete(id=None, name=None, profile=None): # pylint: disable=C0103
|
|||
salt '*' glance.image_delete name=f16-jeos
|
||||
'''
|
||||
g_client = _auth(profile)
|
||||
image = {'id': False, 'name': None}
|
||||
if name:
|
||||
for image in g_client.images.list():
|
||||
if image.name == name:
|
||||
id = image.id # pylint: disable=C0103
|
||||
continue
|
||||
if not id:
|
||||
return {'Error': 'Unable to resolve '
|
||||
'image id for name {0}'.format(name)}
|
||||
return {
|
||||
'result': False,
|
||||
'comment':
|
||||
'Unable to resolve image id '
|
||||
'for name {0}'.format(name)
|
||||
}
|
||||
elif not name:
|
||||
name = image['name']
|
||||
try:
|
||||
g_client.images.delete(id)
|
||||
except exc.HTTPNotFound:
|
||||
return {'Error': 'No image with ID {0}'.format(id)}
|
||||
return {
|
||||
'result': False,
|
||||
'comment': 'No image with ID {0}'.format(id)
|
||||
}
|
||||
except exc.HTTPForbidden as forbidden:
|
||||
log.error(str(forbidden))
|
||||
return {'Error': str(forbidden)}
|
||||
ret = 'Deleted image with ID {0}'.format(id)
|
||||
if name:
|
||||
ret += ' ({0})'.format(name)
|
||||
return ret
|
||||
return {
|
||||
'result': False,
|
||||
'comment': str(forbidden)
|
||||
}
|
||||
return {
|
||||
'result': True,
|
||||
'comment': 'Deleted image \'{0}\' ({1}).'.format(name, id),
|
||||
}
|
||||
|
||||
|
||||
def image_show(id=None, name=None, profile=None): # pylint: disable=C0103
|
||||
|
@ -341,8 +358,19 @@ def image_show(id=None, name=None, profile=None): # pylint: disable=C0103
|
|||
id = image.id # pylint: disable=C0103
|
||||
continue
|
||||
if not id:
|
||||
return {'Error': 'Unable to resolve image id'}
|
||||
image = g_client.images.get(id)
|
||||
return {
|
||||
'result': False,
|
||||
'comment':
|
||||
'Unable to resolve image ID '
|
||||
'for name \'{0}\''.format(name)
|
||||
}
|
||||
try:
|
||||
image = g_client.images.get(id)
|
||||
except exc.HTTPNotFound:
|
||||
return {
|
||||
'result': False,
|
||||
'comment': 'No image with ID {0}'.format(id)
|
||||
}
|
||||
pformat = pprint.PrettyPrinter(indent=4).pformat
|
||||
log.debug('Properties of image {0}:\n{1}'.format(
|
||||
image.name, pformat(image)))
|
||||
|
@ -376,7 +404,11 @@ def image_list(id=None, profile=None, name=None): # pylint: disable=C0103
|
|||
|
||||
salt '*' glance.image_list
|
||||
'''
|
||||
#try:
|
||||
g_client = _auth(profile)
|
||||
#except kstone_exc.Unauthorized:
|
||||
# return False
|
||||
#
|
||||
# I may want to use this code on Beryllium
|
||||
# until we got Boron packages for Ubuntu
|
||||
# so please keep this code until Carbon!
|
||||
|
@ -397,8 +429,12 @@ def image_list(id=None, profile=None, name=None): # pylint: disable=C0103
|
|||
if name == image.name:
|
||||
if name in ret and CUR_VER < BORON:
|
||||
# Not really worth an exception
|
||||
return {'Error': 'More than one image '
|
||||
'with name "{0}"'.format(name)}
|
||||
return {
|
||||
'result': False,
|
||||
'comment':
|
||||
'More than one image with '
|
||||
'name "{0}"'.format(name)
|
||||
}
|
||||
_add_image(ret, image)
|
||||
log.debug('Returning images: {0}'.format(ret))
|
||||
return ret
|
||||
|
@ -422,19 +458,26 @@ def image_update(id=None, name=None, profile=None, **kwargs): # pylint: disable
|
|||
'''
|
||||
if id:
|
||||
image = image_show(id=id)
|
||||
# TODO: This unwrapping should get a warn_until
|
||||
if len(image) == 1:
|
||||
if 'result' in image and not image['result']:
|
||||
return image
|
||||
elif len(image) == 1:
|
||||
image = image.values()[0]
|
||||
elif name:
|
||||
img_list = image_list(name=name)
|
||||
if img_list is not list and 'Error' in img_list:
|
||||
if img_list is dict and 'result' in img_list:
|
||||
return img_list
|
||||
elif len(img_list) == 0:
|
||||
return {'result': False,
|
||||
'comment': 'No image with name \'{0}\' '
|
||||
'found.'.format(name)}
|
||||
return {
|
||||
'result': False,
|
||||
'comment':
|
||||
'No image with name \'{0}\' '
|
||||
'found.'.format(name)
|
||||
}
|
||||
elif len(img_list) == 1:
|
||||
image = img_list[0]
|
||||
try:
|
||||
image = img_list[0]
|
||||
except KeyError:
|
||||
image = img_list[name]
|
||||
else:
|
||||
raise SaltInvocationError
|
||||
log.debug('Found image:\n{0}'.format(image))
|
||||
|
|
|
@ -26,6 +26,14 @@ from salt.exceptions import SaltInvocationError
|
|||
|
||||
# Import 3rd-party libs
|
||||
import salt.ext.six as six
|
||||
try:
|
||||
from shlex import quote as _cmd_quote # pylint: disable=E0611
|
||||
except ImportError:
|
||||
from pipes import quote as _cmd_quote
|
||||
|
||||
from salt.exceptions import (
|
||||
SaltInvocationError
|
||||
)
|
||||
|
||||
try:
|
||||
from shlex import quote as _cmd_quote # pylint: disable=E0611
|
||||
|
|
|
@ -51,6 +51,7 @@ Module for handling openstack keystone calls.
|
|||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.ext.six as six
|
||||
|
@ -66,6 +67,8 @@ try:
|
|||
except ImportError:
|
||||
pass
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
|
@ -707,7 +710,13 @@ def user_get(user_id=None, name=None, profile=None, **connection_args):
|
|||
break
|
||||
if not user_id:
|
||||
return {'Error': 'Unable to resolve user id'}
|
||||
user = kstone.users.get(user_id)
|
||||
try:
|
||||
user = kstone.users.get(user_id)
|
||||
except keystoneclient.exceptions.NotFound:
|
||||
msg = 'Could not find user \'{0}\''.format(user_id)
|
||||
log.error(msg)
|
||||
return {'Error': msg}
|
||||
|
||||
ret[user.name] = {'id': user.id,
|
||||
'name': user.name,
|
||||
'email': user.email,
|
||||
|
|
|
@ -391,7 +391,7 @@ def vgremove(vgname):
|
|||
salt mymachine lvm.vgremove vgname
|
||||
salt mymachine lvm.vgremove vgname force=True
|
||||
'''
|
||||
cmd = ['vgremove' '-f', vgname]
|
||||
cmd = ['vgremove', '-f', vgname]
|
||||
out = __salt__['cmd.run'](cmd, python_shell=False)
|
||||
return out.strip()
|
||||
|
||||
|
|
|
@ -65,6 +65,9 @@ def ping(host, timeout=False, return_boolean=False):
|
|||
'''
|
||||
Performs an ICMP ping to a host
|
||||
|
||||
.. versionchanged:: 2015.8.0
|
||||
Added support for SunOS
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -86,7 +89,10 @@ def ping(host, timeout=False, return_boolean=False):
|
|||
salt '*' network.ping archlinux.org timeout=3
|
||||
'''
|
||||
if timeout:
|
||||
cmd = 'ping -W {0} -c 4 {1}'.format(timeout, salt.utils.network.sanitize_host(host))
|
||||
if __grains__['kernel'] == 'SunOS':
|
||||
cmd = 'ping -c 4 {1} {0}'.format(timeout, salt.utils.network.sanitize_host(host))
|
||||
else:
|
||||
cmd = 'ping -W {0} -c 4 {1}'.format(timeout, salt.utils.network.sanitize_host(host))
|
||||
else:
|
||||
cmd = 'ping -c 4 {0}'.format(salt.utils.network.sanitize_host(host))
|
||||
if return_boolean:
|
||||
|
@ -210,7 +216,10 @@ def _ppid():
|
|||
Return a dict of pid to ppid mappings
|
||||
'''
|
||||
ret = {}
|
||||
cmd = 'ps -ax -o pid,ppid | tail -n+2'
|
||||
if __grains__['kernel'] == 'SunOS':
|
||||
cmd = 'ps -a -o pid,ppid | tail -n+2'
|
||||
else:
|
||||
cmd = 'ps -ax -o pid,ppid | tail -n+2'
|
||||
out = __salt__['cmd.run'](cmd, python_shell=True)
|
||||
for line in out.splitlines():
|
||||
pid, ppid = line.split()
|
||||
|
@ -295,6 +304,38 @@ def _netstat_bsd():
|
|||
return ret
|
||||
|
||||
|
||||
def _netstat_sunos():
|
||||
'''
|
||||
Return netstat information for SunOS flavors
|
||||
'''
|
||||
log.warning('User and program not (yet) supported on SunOS')
|
||||
ret = []
|
||||
for addr_family in ('inet', 'inet6'):
|
||||
# Lookup TCP connections
|
||||
cmd = 'netstat -f {0} -P tcp -an | tail -n+5'.format(addr_family)
|
||||
out = __salt__['cmd.run'](cmd, python_shell=True)
|
||||
for line in out.splitlines():
|
||||
comps = line.split()
|
||||
ret.append({
|
||||
'proto': 'tcp6' if addr_family == 'inet6' else 'tcp',
|
||||
'recv-q': comps[5],
|
||||
'send-q': comps[4],
|
||||
'local-address': comps[0],
|
||||
'remote-address': comps[1],
|
||||
'state': comps[6]})
|
||||
# Lookup UDP connections
|
||||
cmd = 'netstat -f {0} -P udp -an | tail -n+5'.format(addr_family)
|
||||
out = __salt__['cmd.run'](cmd, python_shell=True)
|
||||
for line in out.splitlines():
|
||||
comps = line.split()
|
||||
ret.append({
|
||||
'proto': 'udp6' if addr_family == 'inet6' else 'udp',
|
||||
'local-address': comps[0],
|
||||
'remote-address': comps[1] if len(comps) > 2 else ''})
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def _netstat_route_linux():
|
||||
'''
|
||||
Return netstat routing information for Linux distros
|
||||
|
@ -426,6 +467,36 @@ def _netstat_route_openbsd():
|
|||
return ret
|
||||
|
||||
|
||||
def _netstat_route_sunos():
|
||||
'''
|
||||
Return netstat routing information for SunOS
|
||||
'''
|
||||
ret = []
|
||||
cmd = 'netstat -f inet -rn | tail -n+5'
|
||||
out = __salt__['cmd.run'](cmd, python_shell=True)
|
||||
for line in out.splitlines():
|
||||
comps = line.split()
|
||||
ret.append({
|
||||
'addr_family': 'inet',
|
||||
'destination': comps[0],
|
||||
'gateway': comps[1],
|
||||
'netmask': '',
|
||||
'flags': comps[2],
|
||||
'interface': comps[5]})
|
||||
cmd = 'netstat -f inet6 -rn | tail -n+5'
|
||||
out = __salt__['cmd.run'](cmd, python_shell=True)
|
||||
for line in out.splitlines():
|
||||
comps = line.split()
|
||||
ret.append({
|
||||
'addr_family': 'inet6',
|
||||
'destination': comps[0],
|
||||
'gateway': comps[1],
|
||||
'netmask': '',
|
||||
'flags': comps[2],
|
||||
'interface': comps[5]})
|
||||
return ret
|
||||
|
||||
|
||||
def netstat():
|
||||
'''
|
||||
Return information on open ports and states
|
||||
|
@ -437,6 +508,9 @@ def netstat():
|
|||
.. versionchanged:: 2014.1.4
|
||||
Added support for OpenBSD, FreeBSD, and NetBSD
|
||||
|
||||
.. versionchanged:: 2015.8.0
|
||||
Added support for SunOS
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -447,6 +521,8 @@ def netstat():
|
|||
return _netstat_linux()
|
||||
elif __grains__['kernel'] in ('OpenBSD', 'FreeBSD', 'NetBSD'):
|
||||
return _netstat_bsd()
|
||||
elif __grains__['kernel'] == 'SunOS':
|
||||
return _netstat_sunos()
|
||||
raise CommandExecutionError('Not yet supported on this platform')
|
||||
|
||||
|
||||
|
@ -467,6 +543,9 @@ def traceroute(host):
|
|||
'''
|
||||
Performs a traceroute to a 3rd party host
|
||||
|
||||
.. versionchanged:: 2015.8.0
|
||||
Added support for SunOS
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -483,29 +562,32 @@ def traceroute(host):
|
|||
out = __salt__['cmd.run'](cmd)
|
||||
|
||||
# Parse version of traceroute
|
||||
cmd2 = 'traceroute --version'
|
||||
out2 = __salt__['cmd.run'](cmd2)
|
||||
try:
|
||||
# Linux traceroute version looks like:
|
||||
# Modern traceroute for Linux, version 2.0.19, Dec 10 2012
|
||||
# Darwin and FreeBSD traceroute version looks like: Version 1.4a12+[FreeBSD|Darwin]
|
||||
|
||||
traceroute_version_raw = re.findall(r'.*[Vv]ersion (\d+)\.([\w\+]+)\.*(\w*)', out2)[0]
|
||||
log.debug('traceroute_version_raw: {0}'.format(traceroute_version_raw))
|
||||
traceroute_version = []
|
||||
for t in traceroute_version_raw:
|
||||
try:
|
||||
traceroute_version.append(int(t))
|
||||
except ValueError:
|
||||
traceroute_version.append(t)
|
||||
|
||||
if len(traceroute_version) < 3:
|
||||
traceroute_version.append(0)
|
||||
|
||||
log.debug('traceroute_version: {0}'.format(traceroute_version))
|
||||
|
||||
except IndexError:
|
||||
if salt.utils.is_sunos():
|
||||
traceroute_version = [0, 0, 0]
|
||||
else:
|
||||
cmd2 = 'traceroute --version'
|
||||
out2 = __salt__['cmd.run'](cmd2)
|
||||
try:
|
||||
# Linux traceroute version looks like:
|
||||
# Modern traceroute for Linux, version 2.0.19, Dec 10 2012
|
||||
# Darwin and FreeBSD traceroute version looks like: Version 1.4a12+[FreeBSD|Darwin]
|
||||
|
||||
traceroute_version_raw = re.findall(r'.*[Vv]ersion (\d+)\.([\w\+]+)\.*(\w*)', out2)[0]
|
||||
log.debug('traceroute_version_raw: {0}'.format(traceroute_version_raw))
|
||||
traceroute_version = []
|
||||
for t in traceroute_version_raw:
|
||||
try:
|
||||
traceroute_version.append(int(t))
|
||||
except ValueError:
|
||||
traceroute_version.append(t)
|
||||
|
||||
if len(traceroute_version) < 3:
|
||||
traceroute_version.append(0)
|
||||
|
||||
log.debug('traceroute_version: {0}'.format(traceroute_version))
|
||||
|
||||
except IndexError:
|
||||
traceroute_version = [0, 0, 0]
|
||||
|
||||
for line in out.splitlines():
|
||||
if ' ' not in line:
|
||||
|
@ -513,7 +595,8 @@ def traceroute(host):
|
|||
if line.startswith('traceroute'):
|
||||
continue
|
||||
|
||||
if 'Darwin' in str(traceroute_version[1]) or 'FreeBSD' in str(traceroute_version[1]):
|
||||
if 'Darwin' in str(traceroute_version[1]) or 'FreeBSD' in str(traceroute_version[1]) or \
|
||||
__grains__['kernel'] == 'SunOS':
|
||||
try:
|
||||
traceline = re.findall(r'\s*(\d*)\s+(.*)\s+\((.*)\)\s+(.*)$', line)[0]
|
||||
except IndexError:
|
||||
|
@ -591,6 +674,9 @@ def arp():
|
|||
'''
|
||||
Return the arp table from the minion
|
||||
|
||||
.. versionchanged:: 2015.8.0
|
||||
Added support for SunOS
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -603,12 +689,16 @@ def arp():
|
|||
comps = line.split()
|
||||
if len(comps) < 4:
|
||||
continue
|
||||
if not __grains__['kernel'] == 'OpenBSD':
|
||||
ret[comps[3]] = comps[1].strip('(').strip(')')
|
||||
else:
|
||||
if __grains__['kernel'] == 'SunOS':
|
||||
if ':' not in comps[-1]:
|
||||
continue
|
||||
ret[comps[-1]] = comps[1]
|
||||
elif __grains__['kernel'] == 'OpenBSD':
|
||||
if comps[0] == 'Host' or comps[1] == '(incomplete)':
|
||||
continue
|
||||
ret[comps[1]] = comps[0]
|
||||
else:
|
||||
ret[comps[3]] = comps[1].strip('(').strip(')')
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -809,16 +899,31 @@ def mod_hostname(hostname):
|
|||
'''
|
||||
Modify hostname
|
||||
|
||||
.. versionchanged:: 2015.8.0
|
||||
Added support for SunOS (Solaris 10, Illumos, SmartOS)
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' network.mod_hostname master.saltstack.com
|
||||
'''
|
||||
#
|
||||
# SunOS tested on SmartOS and OmniOS (Solaris 10 compatible)
|
||||
# Oracle Solaris 11 uses smf, currently not supported
|
||||
#
|
||||
# /etc/nodename is the hostname only, not fqdn
|
||||
# /etc/defaultdomain is the domain
|
||||
# /etc/hosts should have both fqdn and hostname entries
|
||||
#
|
||||
|
||||
if hostname is None:
|
||||
return False
|
||||
|
||||
hostname_cmd = salt.utils.which('hostnamectl') or salt.utils.which('hostname')
|
||||
if salt.utils.is_sunos():
|
||||
uname_cmd = '/usr/bin/uname' if salt.utils.is_smartos() else salt.utils.which('uname')
|
||||
check_hostname_cmd = salt.utils.which('check-hostname')
|
||||
|
||||
if hostname_cmd.endswith('hostnamectl'):
|
||||
__salt__['cmd.run']('{0} set-hostname {1}'.format(hostname_cmd, hostname))
|
||||
|
@ -826,9 +931,16 @@ def mod_hostname(hostname):
|
|||
|
||||
# Grab the old hostname so we know which hostname to change and then
|
||||
# change the hostname using the hostname command
|
||||
o_hostname = __salt__['cmd.run']('{0} -f'.format(hostname_cmd))
|
||||
if not salt.utils.is_sunos():
|
||||
o_hostname = __salt__['cmd.run']('{0} -f'.format(hostname_cmd))
|
||||
else:
|
||||
# output: Hostname core OK: fully qualified as core.acheron.be
|
||||
o_hostname = __salt__['cmd.run'](check_hostname_cmd).split(' ')[-1]
|
||||
|
||||
__salt__['cmd.run']('{0} {1}'.format(hostname_cmd, hostname))
|
||||
if not salt.utils.is_sunos():
|
||||
__salt__['cmd.run']('{0} {1}'.format(hostname_cmd, hostname))
|
||||
else:
|
||||
__salt__['cmd.run']('{0} -S {1}'.format(uname_cmd, hostname.split('.')[0]))
|
||||
|
||||
# Modify the /etc/hosts file to replace the old hostname with the
|
||||
# new hostname
|
||||
|
@ -840,6 +952,9 @@ def mod_hostname(hostname):
|
|||
|
||||
try:
|
||||
host[host.index(o_hostname)] = hostname
|
||||
if salt.utils.is_sunos():
|
||||
# also set a copy of the hostname
|
||||
host[host.index(o_hostname.split('.')[0])] = hostname.split('.')[0]
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
@ -863,6 +978,13 @@ def mod_hostname(hostname):
|
|||
with salt.utils.fopen('/etc/myname', 'w') as fh_:
|
||||
fh_.write(hostname + '\n')
|
||||
|
||||
# Update /etc/nodename and /etc/defaultdomain on SunOS
|
||||
if salt.utils.is_sunos():
|
||||
with salt.utils.fopen('/etc/nodename', 'w') as fh_:
|
||||
fh_.write(hostname.split('.')[0] + '\n')
|
||||
with salt.utils.fopen('/etc/defaultdomain', 'w') as fh_:
|
||||
fh_.write(".".join(hostname.split('.')[1:]) + '\n')
|
||||
|
||||
return True
|
||||
|
||||
|
||||
|
@ -1101,6 +1223,9 @@ def routes(family=None):
|
|||
'''
|
||||
Return currently configured routes from routing table
|
||||
|
||||
.. versionchanged:: 2015.8.0
|
||||
Added support for SunOS (Solaris 10, Illumos, SmartOS)
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -1112,6 +1237,8 @@ def routes(family=None):
|
|||
|
||||
if __grains__['kernel'] == 'Linux':
|
||||
routes_ = _netstat_route_linux()
|
||||
elif __grains__['kernel'] == 'SunOS':
|
||||
routes_ = _netstat_route_sunos()
|
||||
elif __grains__['os'] in ['FreeBSD', 'MacOS', 'Darwin']:
|
||||
routes_ = _netstat_route_freebsd()
|
||||
elif __grains__['os'] in ['NetBSD']:
|
||||
|
@ -1132,6 +1259,9 @@ def default_route(family=None):
|
|||
'''
|
||||
Return default route(s) from routing table
|
||||
|
||||
.. versionchanged:: 2015.8.0
|
||||
Added support for SunOS (Solaris 10, Illumos, SmartOS)
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -1147,7 +1277,8 @@ def default_route(family=None):
|
|||
if __grains__['kernel'] == 'Linux':
|
||||
default_route['inet'] = ['0.0.0.0', 'default']
|
||||
default_route['inet6'] = ['::/0', 'default']
|
||||
elif __grains__['os'] in ['FreeBSD', 'NetBSD', 'OpenBSD', 'MacOS', 'Darwin']:
|
||||
elif __grains__['os'] in ['FreeBSD', 'NetBSD', 'OpenBSD', 'MacOS', 'Darwin'] or \
|
||||
__grains__['kernel'] == 'SunOS':
|
||||
default_route['inet'] = ['default']
|
||||
default_route['inet6'] = ['default']
|
||||
else:
|
||||
|
@ -1172,6 +1303,10 @@ def get_route(ip):
|
|||
|
||||
.. versionadded:: 2015.5.3
|
||||
|
||||
.. versionchanged:: 2015.8.0
|
||||
Added support for SunOS (Solaris 10, Illumos, SmartOS)
|
||||
Added support for OpenBSD
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' network.get_route 10.10.10.10
|
||||
|
@ -1186,8 +1321,76 @@ def get_route(ip):
|
|||
'destination': ip,
|
||||
'gateway': m.group('gateway'),
|
||||
'interface': m.group('interface'),
|
||||
'source': m.group('source')}
|
||||
'source': m.group('source')
|
||||
}
|
||||
|
||||
return ret
|
||||
|
||||
if __grains__['kernel'] == 'SunOS':
|
||||
# [root@nacl ~]# route -n get 172.16.10.123
|
||||
# route to: 172.16.10.123
|
||||
#destination: 172.16.10.0
|
||||
# mask: 255.255.255.0
|
||||
# interface: net0
|
||||
# flags: <UP,DONE,KERNEL>
|
||||
# recvpipe sendpipe ssthresh rtt,ms rttvar,ms hopcount mtu expire
|
||||
# 0 0 0 0 0 0 1500 0
|
||||
cmd = '/usr/sbin/route -n get {0}'.format(ip)
|
||||
out = __salt__['cmd.run'](cmd, python_shell=False)
|
||||
|
||||
ret = {
|
||||
'destination': ip,
|
||||
'gateway': None,
|
||||
'interface': None,
|
||||
'source': None
|
||||
}
|
||||
|
||||
for line in out.splitlines():
|
||||
line = line.split(':')
|
||||
if 'route to' in line[0]:
|
||||
ret['destination'] = line[1].strip()
|
||||
if 'gateway' in line[0]:
|
||||
ret['gateway'] = line[1].strip()
|
||||
if 'interface' in line[0]:
|
||||
ret['interface'] = line[1].strip()
|
||||
ret['source'] = salt.utils.network.interface_ip(line[1].strip())
|
||||
|
||||
return ret
|
||||
|
||||
if __grains__['kernel'] == 'OpenBSD':
|
||||
# [root@exosphere] route -n get blackdot.be
|
||||
# route to: 5.135.127.100
|
||||
#destination: default
|
||||
# mask: default
|
||||
# gateway: 192.168.0.1
|
||||
# interface: vio0
|
||||
# if address: 192.168.0.2
|
||||
# priority: 8 (static)
|
||||
# flags: <UP,GATEWAY,DONE,STATIC>
|
||||
# use mtu expire
|
||||
# 8352657 0 0
|
||||
cmd = 'route -n get {0}'.format(ip)
|
||||
out = __salt__['cmd.run'](cmd, python_shell=False)
|
||||
|
||||
ret = {
|
||||
'destination': ip,
|
||||
'gateway': None,
|
||||
'interface': None,
|
||||
'source': None
|
||||
}
|
||||
|
||||
for line in out.splitlines():
|
||||
line = line.split(':')
|
||||
if 'route to' in line[0]:
|
||||
ret['destination'] = line[1].strip()
|
||||
if 'gateway' in line[0]:
|
||||
ret['gateway'] = line[1].strip()
|
||||
if 'interface' in line[0]:
|
||||
ret['interface'] = line[1].strip()
|
||||
if 'if address' in line[0]:
|
||||
ret['source'] = line[1].strip()
|
||||
|
||||
return ret
|
||||
|
||||
else:
|
||||
raise CommandExecutionError('Not yet supported on this platform')
|
||||
|
|
|
@ -181,6 +181,8 @@ def members(name, members_list):
|
|||
'''
|
||||
Replaces members of the group with a provided list.
|
||||
|
||||
.. versionadded:: 2015.5.4
|
||||
|
||||
CLI Example:
|
||||
|
||||
salt '*' group.members foo 'user1,user2,user3,...'
|
||||
|
|
|
@ -9,8 +9,9 @@ import copy
|
|||
import logging
|
||||
try:
|
||||
import pwd
|
||||
HAS_PWD = True
|
||||
except ImportError:
|
||||
pass
|
||||
HAS_PWD = False
|
||||
|
||||
# Import 3rd party libs
|
||||
import salt.ext.six as six
|
||||
|
@ -29,7 +30,9 @@ def __virtual__():
|
|||
'''
|
||||
Set the user module if the kernel is FreeBSD
|
||||
'''
|
||||
return __virtualname__ if __grains__['kernel'] == 'FreeBSD' else False
|
||||
if HAS_PWD and __grains__['kernel'] == 'FreeBSD':
|
||||
return __virtualname__
|
||||
return False
|
||||
|
||||
|
||||
def _get_gecos(name):
|
||||
|
|
|
@ -1,29 +1,36 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage the registry on Windows.
|
||||
===========================
|
||||
Manage the Windows registry
|
||||
===========================
|
||||
|
||||
The read_key and set_key functions will be updated in Boron to reflect proper
|
||||
registry usage. The registry has three main components. Hives, Keys, and Values.
|
||||
|
||||
### Hives
|
||||
-----
|
||||
Hives
|
||||
-----
|
||||
Hives are the main sections of the registry and all begin with the word HKEY.
|
||||
- HKEY_LOCAL_MACHINE
|
||||
- HKEY_CURRENT_USER
|
||||
- HKEY_USER
|
||||
|
||||
### Keys
|
||||
----
|
||||
Keys
|
||||
----
|
||||
Keys are the folders in the registry. Keys can have many nested subkeys. Keys
|
||||
can have a value assigned to them under the (Default)
|
||||
|
||||
### Values
|
||||
Values are name/data pairs. There can be many values in a key. The (Default)
|
||||
value corresponds to the Key, the rest are their own value pairs.
|
||||
-----------------
|
||||
Values or Entries
|
||||
-----------------
|
||||
Values/Entries are name/data pairs. There can be many values in a key. The
|
||||
(Default) value corresponds to the Key, the rest are their own value pairs.
|
||||
|
||||
:depends: - winreg Python module
|
||||
'''
|
||||
|
||||
# TODO: Figure out the exceptions _winreg can raise and properly catch
|
||||
# them instead of a bare except that catches any exception at all
|
||||
# TODO: Figure out the exceptions _winreg can raise and properly catch them
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
|
@ -142,44 +149,35 @@ def read_key(hkey, path, key=None):
|
|||
key=path,
|
||||
vname=key)
|
||||
|
||||
registry = Registry()
|
||||
hive = registry.hkeys[hkey]
|
||||
|
||||
try:
|
||||
value = _winreg.QueryValue(hive, path)
|
||||
if value:
|
||||
ret['vdata'] = value
|
||||
else:
|
||||
ret['vdata'] = None
|
||||
ret['comment'] = 'Empty Value'
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
log.debug(exc)
|
||||
ret['comment'] = '{0}'.format(exc)
|
||||
ret['success'] = False
|
||||
|
||||
return ret
|
||||
return read_value(hive=hkey, key=path)
|
||||
|
||||
|
||||
def read_value(hive, key, vname=None):
|
||||
r'''
|
||||
Reads a registry value or the default value for a key.
|
||||
Reads a registry value entry or the default value for a key.
|
||||
|
||||
:param hive: string
|
||||
The name of the hive. Can be one of the following
|
||||
- HKEY_LOCAL_MACHINE or HKLM
|
||||
- HKEY_CURRENT_USER or HKCU
|
||||
- HKEY_USER or HKU
|
||||
:param str hive:
|
||||
The name of the hive. Can be one of the following
|
||||
- HKEY_LOCAL_MACHINE or HKLM
|
||||
- HKEY_CURRENT_USER or HKCU
|
||||
- HKEY_USER or HKU
|
||||
|
||||
:param key: string
|
||||
The key (looks like a path) to the value name.
|
||||
:param str key:
|
||||
The key (looks like a path) to the value name.
|
||||
|
||||
:param vname: string
|
||||
The value name. These are the individual name/data pairs under the key. If
|
||||
not passed, the key (Default) value will be returned
|
||||
:param str vname:
|
||||
The value name. These are the individual name/data pairs under the key.
|
||||
If not passed, the key (Default) value will be returned
|
||||
|
||||
:return: dict
|
||||
A dictionary containing the passed settings as well as the value_data if
|
||||
successful. If unsuccessful, sets success to False
|
||||
:return:
|
||||
A dictionary containing the passed settings as well as the value_data if
|
||||
successful. If unsuccessful, sets success to False
|
||||
|
||||
If vname is not passed:
|
||||
- Returns the first unnamed value (Default) as a string.
|
||||
- Returns none if first unnamed value is empty.
|
||||
- Returns False if key not found.
|
||||
:rtype: dict
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -205,9 +203,9 @@ def read_value(hive, key, vname=None):
|
|||
|
||||
try:
|
||||
handle = _winreg.OpenKey(hive, key)
|
||||
value, vtype = _winreg.QueryValueEx(handle, vname)
|
||||
if value:
|
||||
ret['vdata'] = value
|
||||
vdata, vtype = _winreg.QueryValueEx(handle, vname)
|
||||
if vdata:
|
||||
ret['vdata'] = vdata
|
||||
ret['vtype'] = registry.vtype_reverse[vtype]
|
||||
else:
|
||||
ret['comment'] = 'Empty Value'
|
||||
|
@ -257,53 +255,45 @@ def set_key(hkey, path, value, key=None, vtype='REG_DWORD', reflection=True):
|
|||
vdata=value,
|
||||
vtype=vtype)
|
||||
|
||||
registry = Registry()
|
||||
hive = registry.hkeys[hkey]
|
||||
vtype = registry.vtype['REG_SZ']
|
||||
|
||||
try:
|
||||
_winreg.SetValue(hive, path, vtype, value)
|
||||
return True
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
log.error(exc)
|
||||
return False
|
||||
return set_value(hive=hkey, key=path, vdata=value, vtype=vtype)
|
||||
|
||||
|
||||
def set_value(hive, key, vname=None, vdata=None, vtype='REG_SZ', reflection=True):
|
||||
'''
|
||||
Sets a registry value.
|
||||
Sets a registry value entry or the default value for a key.
|
||||
|
||||
:param hive: string
|
||||
The name of the hive. Can be one of the following
|
||||
- HKEY_LOCAL_MACHINE or HKLM
|
||||
- HKEY_CURRENT_USER or HKCU
|
||||
- HKEY_USER or HKU
|
||||
:param str hive:
|
||||
The name of the hive. Can be one of the following
|
||||
- HKEY_LOCAL_MACHINE or HKLM
|
||||
- HKEY_CURRENT_USER or HKCU
|
||||
- HKEY_USER or HKU
|
||||
|
||||
:param key: string
|
||||
The key (looks like a path) to the value name.
|
||||
:param str key:
|
||||
The key (looks like a path) to the value name.
|
||||
|
||||
:param vname: string
|
||||
The value name. These are the individual name/data pairs under the key. If
|
||||
not passed, the key (Default) value will be set.
|
||||
:param str vname:
|
||||
The value name. These are the individual name/data pairs under the key.
|
||||
If not passed, the key (Default) value will be set.
|
||||
|
||||
:param vdata: string
|
||||
The value data to be set.
|
||||
:param str vdata:
|
||||
The value data to be set.
|
||||
|
||||
:param vtype: string
|
||||
The value type. Can be one of the following:
|
||||
- REG_BINARY
|
||||
- REG_DWORD
|
||||
- REG_EXPAND_SZ
|
||||
- REG_MULTI_SZ
|
||||
- REG_SZ
|
||||
:param str vtype:
|
||||
The value type. Can be one of the following:
|
||||
- REG_BINARY
|
||||
- REG_DWORD
|
||||
- REG_EXPAND_SZ
|
||||
- REG_MULTI_SZ
|
||||
- REG_SZ
|
||||
|
||||
:param reflection: boolean
|
||||
A boolean value indicating that the value should also be set in the
|
||||
Wow6432Node portion of the registry. Only applies to 64 bit Windows. This
|
||||
setting is ignored for 32 bit Windows.
|
||||
:param bool reflection:
|
||||
A boolean value indicating that the value should also be set in the
|
||||
Wow6432Node portion of the registry. Only applies to 64 bit Windows.
|
||||
This setting is ignored for 32 bit Windows.
|
||||
|
||||
:return: boolean
|
||||
Returns True if successful, False if not
|
||||
:return:
|
||||
Returns True if successful, False if not
|
||||
:rtype: bool
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -321,7 +311,7 @@ def set_value(hive, key, vname=None, vdata=None, vtype='REG_SZ', reflection=True
|
|||
_winreg.SetValueEx(handle, vname, 0, vtype, vdata)
|
||||
_winreg.CloseKey(handle)
|
||||
return True
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
except (WindowsError, ValueError) as exc: # pylint: disable=E0602
|
||||
log.error(exc)
|
||||
return False
|
||||
|
||||
|
@ -353,7 +343,7 @@ def create_key(hkey, path, key=None, value=None, reflection=True):
|
|||
salt '*' reg.create_key HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version' '0.97'
|
||||
'''
|
||||
if key: # This if statement will be removed in Boron
|
||||
salt.utils.warn_until('Boron', 'Use reg.set_value to set a registry '
|
||||
salt.utils.warn_until('Boron', 'Use reg.set_value to create a registry '
|
||||
'value. This functionality will be '
|
||||
'removed in Salt Boron')
|
||||
return set_value(hive=hkey,
|
||||
|
@ -362,21 +352,10 @@ def create_key(hkey, path, key=None, value=None, reflection=True):
|
|||
vdata=value,
|
||||
vtype='REG_SZ')
|
||||
|
||||
registry = Registry()
|
||||
hive = registry.hkeys[hkey]
|
||||
key = path
|
||||
access_mask = registry.reflection_mask[reflection]
|
||||
|
||||
try:
|
||||
handle = _winreg.CreateKeyEx(hive, key, 0, access_mask)
|
||||
_winreg.CloseKey(handle)
|
||||
return True
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
log.error(exc)
|
||||
return False
|
||||
return set_value(hive=hkey, key=path)
|
||||
|
||||
|
||||
def delete_key(hkey, path, key=None, reflection=True):
|
||||
def delete_key(hkey, path, key=None, reflection=True, force=False):
|
||||
'''
|
||||
*** Incorrect Usage ***
|
||||
The name of this function is misleading and will be changed to reflect
|
||||
|
@ -396,29 +375,62 @@ def delete_key(hkey, path, key=None, reflection=True):
|
|||
|
||||
Delete a registry key
|
||||
|
||||
Note: This cannot delete a key with subkeys
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' reg.delete_key HKEY_CURRENT_USER 'SOFTWARE\\Salt'
|
||||
|
||||
:param str hkey: (will be changed to hive)
|
||||
The name of the hive. Can be one of the following
|
||||
- HKEY_LOCAL_MACHINE or HKLM
|
||||
- HKEY_CURRENT_USER or HKCU
|
||||
- HKEY_USER or HKU
|
||||
|
||||
:param str path: (will be changed to key)
|
||||
The key (looks like a path) to remove.
|
||||
|
||||
:param str key: (used incorrectly)
|
||||
Will be removed in Boron
|
||||
|
||||
:param bool reflection:
|
||||
A boolean value indicating that the value should also be removed from
|
||||
the Wow6432Node portion of the registry. Only applies to 64 bit Windows.
|
||||
This setting is ignored for 32 bit Windows.
|
||||
|
||||
Only applies to delete value. If the key parameter is passed, this
|
||||
function calls delete_value instead. Will be changed in Boron.
|
||||
|
||||
:param bool force:
|
||||
A boolean value indicating that all subkeys should be removed as well.
|
||||
If this is set to False (default) and there are subkeys, the delete_key
|
||||
function will fail.
|
||||
|
||||
:return:
|
||||
Returns True if successful, False if not
|
||||
If force=True, the results of delete_key_recursive are returned.
|
||||
:rtype: bool
|
||||
'''
|
||||
|
||||
if key: # This if statement will be removed in Boron
|
||||
salt.utils.warn_until('Boron', 'Use reg.set_value to set a registry '
|
||||
'value. This functionality will be '
|
||||
'removed in Salt Boron')
|
||||
salt.utils.warn_until('Boron',
|
||||
'Variable names will be changed to match Windows '
|
||||
'Registry terminology. These changes will be '
|
||||
'made in Boron')
|
||||
return delete_value(hive=hkey,
|
||||
key=path,
|
||||
vname=key,
|
||||
reflection=reflection)
|
||||
|
||||
if force:
|
||||
return delete_key_recursive(hkey, path)
|
||||
|
||||
registry = Registry()
|
||||
hive = registry.hkeys[hkey]
|
||||
key = path
|
||||
|
||||
try:
|
||||
# Can't use delete_value to delete a key
|
||||
_winreg.DeleteKey(hive, key)
|
||||
return True
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
|
@ -426,30 +438,102 @@ def delete_key(hkey, path, key=None, reflection=True):
|
|||
return False
|
||||
|
||||
|
||||
def delete_key_recursive(hive, key):
|
||||
'''
|
||||
.. versionadded:: 2015.5.4
|
||||
|
||||
Delete a registry key to include all subkeys.
|
||||
|
||||
:param hive:
|
||||
The name of the hive. Can be one of the following
|
||||
- HKEY_LOCAL_MACHINE or HKLM
|
||||
- HKEY_CURRENT_USER or HKCU
|
||||
- HKEY_USER or HKU
|
||||
|
||||
:param key:
|
||||
The key to remove (looks like a path)
|
||||
|
||||
:return:
|
||||
A dictionary listing the keys that deleted successfully as well as those
|
||||
that failed to delete.
|
||||
:rtype: dict
|
||||
'''
|
||||
# Functions for traversing the registry tree
|
||||
def subkeys(key):
|
||||
i = 0
|
||||
while True:
|
||||
try:
|
||||
subkey = _winreg.EnumKey(key, i)
|
||||
yield subkey
|
||||
i += 1
|
||||
except WindowsError: # pylint: disable=E0602
|
||||
break
|
||||
|
||||
def traverse_registry_tree(hkey, keypath, ret):
|
||||
key = _winreg.OpenKey(hkey, keypath, 0, _winreg.KEY_READ)
|
||||
for subkeyname in subkeys(key):
|
||||
subkeypath = r'{0}\{1}'.format(keypath, subkeyname)
|
||||
ret = traverse_registry_tree(hkey, subkeypath, ret)
|
||||
ret.append('{0}'.format(subkeypath))
|
||||
return ret
|
||||
|
||||
# Instantiate the registry object
|
||||
registry = Registry()
|
||||
hkey = registry.hkeys[hive]
|
||||
keypath = key
|
||||
|
||||
# Get a reverse list of registry keys to be deleted
|
||||
key_list = []
|
||||
key_list = traverse_registry_tree(hkey, keypath, key_list)
|
||||
|
||||
ret = {'Deleted': [],
|
||||
'Failed': []}
|
||||
|
||||
# Delete all subkeys
|
||||
for keypath in key_list:
|
||||
try:
|
||||
_winreg.DeleteKey(hkey, keypath)
|
||||
ret['Deleted'].append(r'{0}\{1}'.format(hive, keypath))
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
log.error(exc)
|
||||
ret['Failed'].append(r'{0}\{1} {2}'.format(hive, key, exc))
|
||||
|
||||
# Delete the key now that all the subkeys are deleted
|
||||
try:
|
||||
_winreg.DeleteKey(hkey, key)
|
||||
ret['Deleted'].append(r'{0}\{1}'.format(hive, key))
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
log.error(exc)
|
||||
ret['Failed'].append(r'{0}\{1} {2}'.format(hive, key, exc))
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def delete_value(hive, key, vname=None, reflection=True):
|
||||
'''
|
||||
Deletes a registry value.
|
||||
Delete a registry value entry or the default value for a key.
|
||||
|
||||
:param hive: string
|
||||
The name of the hive. Can be one of the following
|
||||
- HKEY_LOCAL_MACHINE or HKLM
|
||||
- HKEY_CURRENT_USER or HKCU
|
||||
- HKEY_USER or HKU
|
||||
:param str hive:
|
||||
The name of the hive. Can be one of the following
|
||||
- HKEY_LOCAL_MACHINE or HKLM
|
||||
- HKEY_CURRENT_USER or HKCU
|
||||
- HKEY_USER or HKU
|
||||
|
||||
:param key: string
|
||||
The key (looks like a path) to the value name.
|
||||
:param str key:
|
||||
The key (looks like a path) to the value name.
|
||||
|
||||
:param vname: string
|
||||
The value name. These are the individual name/data pairs under the key. If
|
||||
not passed, the key (Default) value will be deleted.
|
||||
:param str vname:
|
||||
The value name. These are the individual name/data pairs under the key.
|
||||
If not passed, the key (Default) value will be deleted.
|
||||
|
||||
:param reflection: boolean
|
||||
A boolean value indicating that the value should also be set in the
|
||||
Wow6432Node portion of the registry. Only applies to 64 bit Windows. This
|
||||
setting is ignored for 32 bit Windows.
|
||||
:param bool reflection:
|
||||
A boolean value indicating that the value should also be set in the
|
||||
Wow6432Node portion of the registry. Only applies to 64 bit Windows.
|
||||
This setting is ignored for 32 bit Windows.
|
||||
|
||||
:return: boolean
|
||||
Returns True if successful, False if not
|
||||
:return:
|
||||
Returns True if successful, False if not
|
||||
:rtype: bool
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ def _create_rpmmacros():
|
|||
os.makedirs(mockdir)
|
||||
|
||||
rpmmacros = os.path.join(home, '.rpmmacros')
|
||||
with open(rpmmacros, "w") as afile:
|
||||
with salt.utils.fopen(rpmmacros, 'w') as afile:
|
||||
afile.write('%_topdir {0}\n'.format(rpmbuilddir))
|
||||
afile.write('%signature gpg\n')
|
||||
afile.write('%_gpg_name packaging@saltstack.com\n')
|
||||
|
@ -100,7 +100,7 @@ def _get_distset(tgt):
|
|||
if tgtattrs[1] in ['5', '6', '7']:
|
||||
distset = '--define "dist .el{0}"'.format(tgtattrs[1])
|
||||
else:
|
||||
distset = ""
|
||||
distset = ''
|
||||
|
||||
return distset
|
||||
|
||||
|
@ -109,7 +109,7 @@ def _get_deps(deps, tree_base, saltenv='base'):
|
|||
'''
|
||||
Get include string for list of dependent rpms to build package
|
||||
'''
|
||||
deps_list = ""
|
||||
deps_list = ''
|
||||
if deps is None:
|
||||
return deps_list
|
||||
if not isinstance(deps, list):
|
||||
|
@ -174,7 +174,7 @@ def build(runas, tgt, dest_dir, spec, sources, deps, env, template, saltenv='bas
|
|||
salt '*' pkgbuild.build mock epel-7-x86_64 /var/www/html/ https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
|
||||
|
||||
This example command should build the libnacl package for rhel 7 using user
|
||||
"mock" and place it in /var/www/html/ on the minion
|
||||
mock and place it in /var/www/html/ on the minion
|
||||
'''
|
||||
ret = {}
|
||||
if not os.path.isdir(dest_dir):
|
||||
|
@ -187,13 +187,13 @@ def build(runas, tgt, dest_dir, spec, sources, deps, env, template, saltenv='bas
|
|||
|
||||
distset = _get_distset(tgt)
|
||||
|
||||
noclean = ""
|
||||
noclean = ''
|
||||
deps_dir = tempfile.mkdtemp()
|
||||
deps_list = _get_deps(deps, deps_dir, saltenv)
|
||||
if deps_list and not deps_list.isspace():
|
||||
cmd = 'mock --root={0} {1}'.format(tgt, deps_list)
|
||||
__salt__['cmd.run'](cmd, runas=runas)
|
||||
noclean += " --no-clean"
|
||||
noclean += ' --no-clean'
|
||||
|
||||
for srpm in srpms:
|
||||
dbase = os.path.dirname(srpm)
|
||||
|
@ -231,7 +231,7 @@ def build(runas, tgt, dest_dir, spec, sources, deps, env, template, saltenv='bas
|
|||
return ret
|
||||
|
||||
|
||||
def make_repo(repodir):
|
||||
def make_repo(repodir, keyid=None, env=None):
|
||||
'''
|
||||
Given the repodir, create a yum repository out of the rpms therein
|
||||
|
||||
|
|
|
@ -105,6 +105,11 @@ def list_(show_all=False,
|
|||
del schedule[job]
|
||||
continue
|
||||
|
||||
# if enabled is not included in the job,
|
||||
# assume job is enabled.
|
||||
if 'enabled' not in schedule[job]:
|
||||
schedule[job]['enabled'] = True
|
||||
|
||||
for item in pycopy.copy(schedule[job]):
|
||||
if item not in SCHEDULE_CONF:
|
||||
del schedule[job][item]
|
||||
|
|
|
@ -437,6 +437,7 @@ def start(name):
|
|||
'''
|
||||
if _untracked_custom_unit_found(name) or _unit_file_changed(name):
|
||||
systemctl_reload()
|
||||
unmask(name)
|
||||
return not __salt__['cmd.retcode'](_systemctl_cmd('start', name))
|
||||
|
||||
|
||||
|
@ -467,6 +468,7 @@ def restart(name):
|
|||
'''
|
||||
if _untracked_custom_unit_found(name) or _unit_file_changed(name):
|
||||
systemctl_reload()
|
||||
unmask(name)
|
||||
return not __salt__['cmd.retcode'](_systemctl_cmd('restart', name))
|
||||
|
||||
|
||||
|
@ -482,6 +484,7 @@ def reload_(name):
|
|||
'''
|
||||
if _untracked_custom_unit_found(name) or _unit_file_changed(name):
|
||||
systemctl_reload()
|
||||
unmask(name)
|
||||
return not __salt__['cmd.retcode'](_systemctl_cmd('reload', name))
|
||||
|
||||
|
||||
|
@ -497,6 +500,7 @@ def force_reload(name):
|
|||
'''
|
||||
if _untracked_custom_unit_found(name) or _unit_file_changed(name):
|
||||
systemctl_reload()
|
||||
unmask(name)
|
||||
return not __salt__['cmd.retcode'](_systemctl_cmd('force-reload', name))
|
||||
|
||||
|
||||
|
@ -531,8 +535,7 @@ def enable(name, **kwargs):
|
|||
'''
|
||||
if _untracked_custom_unit_found(name) or _unit_file_changed(name):
|
||||
systemctl_reload()
|
||||
if masked(name):
|
||||
unmask(name)
|
||||
unmask(name)
|
||||
if _service_is_sysv(name):
|
||||
executable = _get_service_exec()
|
||||
cmd = '{0} -f {1} defaults 99'.format(executable, name)
|
||||
|
|
|
@ -110,7 +110,6 @@ def ping():
|
|||
|
||||
salt '*' test.ping
|
||||
'''
|
||||
|
||||
if 'proxymodule' in __opts__:
|
||||
ping_cmd = __opts__['proxymodule'].loaded_base_name + '.ping'
|
||||
return __opts__['proxymodule'][ping_cmd]()
|
||||
|
|
|
@ -9,8 +9,9 @@ import re
|
|||
|
||||
try:
|
||||
import pwd
|
||||
HAS_PWD = True
|
||||
except ImportError:
|
||||
pass
|
||||
HAS_PWD = False
|
||||
import logging
|
||||
import copy
|
||||
|
||||
|
@ -32,10 +33,9 @@ __virtualname__ = 'user'
|
|||
def __virtual__():
|
||||
'''
|
||||
Set the user module if the kernel is Linux, OpenBSD or NetBSD
|
||||
and remove some of the functionality on OS X
|
||||
'''
|
||||
|
||||
if __grains__['kernel'] in ('Linux', 'OpenBSD', 'NetBSD'):
|
||||
if HAS_PWD and __grains__['kernel'] in ('Linux', 'OpenBSD', 'NetBSD'):
|
||||
return __virtualname__
|
||||
return False
|
||||
|
||||
|
|
|
@ -22,12 +22,11 @@ options
|
|||
The ``bucket`` parameter specifies the target S3 bucket. It is required.
|
||||
|
||||
The ``keyid`` parameter specifies the key id to use when access the S3 bucket.
|
||||
When it is set to None or omitted it will try to grab credentials from IAM role.
|
||||
The parameter has default value set to None.
|
||||
If it is not provided, an attempt to fetch it from EC2 instance meta-data will
|
||||
be made.
|
||||
|
||||
The ``key`` parameter specifies the key to use when access the S3 bucket. It
|
||||
When it is set to None or omitted it will try to grab credentials from IAM role.
|
||||
The parameter has default value set to None.
|
||||
The ``key`` parameter specifies the key to use when access the S3 bucket. If it
|
||||
is not provided, an attempt to fetch it from EC2 instance meta-data will be made.
|
||||
|
||||
The ``multiple_env`` defaults to False. It specifies whether the pillar should
|
||||
interpret top level folders as pillar environments (see mode section below).
|
||||
|
|
|
@ -49,16 +49,6 @@ def init(opts):
|
|||
DETAILS['url'] += '/'
|
||||
|
||||
|
||||
def id(opts):
|
||||
'''
|
||||
Return a unique ID for this proxy minion. This ID MUST NOT CHANGE.
|
||||
If it changes while the proxy is running the salt-master will get
|
||||
really confused and may stop talking to this minion
|
||||
'''
|
||||
r = salt.utils.http.query(opts['proxy']['url']+'id', decode_type='json', decode=True)
|
||||
return r['dict']['id'].encode('ascii', 'ignore')
|
||||
|
||||
|
||||
def grains():
|
||||
'''
|
||||
Get the grains from the proxied device
|
||||
|
|
107
salt/scripts.py
107
salt/scripts.py
|
@ -154,6 +154,113 @@ def salt_minion():
|
|||
logging.basicConfig()
|
||||
|
||||
|
||||
def proxy_minion_process(queue):
|
||||
'''
|
||||
Start a proxy minion process
|
||||
'''
|
||||
import salt.cli.daemons
|
||||
# salt_minion spawns this function in a new process
|
||||
|
||||
def suicide_when_without_parent(parent_pid):
|
||||
'''
|
||||
Have the minion suicide if the parent process is gone
|
||||
|
||||
NOTE: there is a small race issue where the parent PID could be replace
|
||||
with another process with the same PID!
|
||||
'''
|
||||
while True:
|
||||
time.sleep(5)
|
||||
try:
|
||||
# check pid alive (Unix only trick!)
|
||||
os.kill(parent_pid, 0)
|
||||
except OSError:
|
||||
# forcibly exit, regular sys.exit raises an exception-- which
|
||||
# isn't sufficient in a thread
|
||||
os._exit(999)
|
||||
if not salt.utils.is_windows():
|
||||
thread = threading.Thread(target=suicide_when_without_parent, args=(os.getppid(),))
|
||||
thread.start()
|
||||
|
||||
restart = False
|
||||
proxyminion = None
|
||||
try:
|
||||
proxyminion = salt.cli.daemons.ProxyMinion()
|
||||
proxyminion.start()
|
||||
except (Exception, SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc:
|
||||
log.error('Proxy Minion failed to start: ', exc_info=True)
|
||||
restart = True
|
||||
except SystemExit as exc:
|
||||
restart = False
|
||||
|
||||
if restart is True:
|
||||
log.warn('** Restarting proxy minion **')
|
||||
delay = 60
|
||||
if proxyminion is not None:
|
||||
if hasattr(proxyminion, 'config'):
|
||||
delay = proxyminion.config.get('random_reauth_delay', 60)
|
||||
random_delay = randint(1, delay)
|
||||
log.info('Sleeping random_reauth_delay of {0} seconds'.format(random_delay))
|
||||
# preform delay after minion resources have been cleaned
|
||||
queue.put(random_delay)
|
||||
else:
|
||||
queue.put(0)
|
||||
|
||||
|
||||
def salt_proxy_minion():
|
||||
'''
|
||||
Start a proxy minion.
|
||||
'''
|
||||
import salt.cli.daemons
|
||||
import multiprocessing
|
||||
if '' in sys.path:
|
||||
sys.path.remove('')
|
||||
|
||||
if salt.utils.is_windows():
|
||||
proxyminion = salt.cli.daemons.ProxyMinion()
|
||||
proxyminion.start()
|
||||
return
|
||||
|
||||
if '--disable-keepalive' in sys.argv:
|
||||
sys.argv.remove('--disable-keepalive')
|
||||
proxyminion = salt.cli.daemons.ProxyMinion()
|
||||
proxyminion.start()
|
||||
return
|
||||
|
||||
# keep one minion subprocess running
|
||||
while True:
|
||||
try:
|
||||
queue = multiprocessing.Queue()
|
||||
except Exception:
|
||||
# This breaks in containers
|
||||
proxyminion = salt.cli.daemons.ProxyMinion()
|
||||
proxyminion.start()
|
||||
return
|
||||
process = multiprocessing.Process(target=proxy_minion_process, args=(queue,))
|
||||
process.start()
|
||||
try:
|
||||
process.join()
|
||||
try:
|
||||
restart_delay = queue.get(block=False)
|
||||
except Exception:
|
||||
if process.exitcode == 0:
|
||||
# Minion process ended naturally, Ctrl+C or --version
|
||||
break
|
||||
restart_delay = 60
|
||||
if restart_delay == 0:
|
||||
# Minion process ended naturally, Ctrl+C, --version, etc.
|
||||
break
|
||||
# delay restart to reduce flooding and allow network resources to close
|
||||
time.sleep(restart_delay)
|
||||
except KeyboardInterrupt:
|
||||
break
|
||||
# need to reset logging because new minion objects
|
||||
# cause extra log handlers to accumulate
|
||||
rlogger = logging.getLogger()
|
||||
for handler in rlogger.handlers:
|
||||
rlogger.removeHandler(handler)
|
||||
logging.basicConfig()
|
||||
|
||||
|
||||
def salt_syndic():
|
||||
'''
|
||||
Start the salt syndic.
|
||||
|
|
|
@ -2329,15 +2329,15 @@ class BaseHighState(object):
|
|||
for ord_env in env_order:
|
||||
if ord_env in env_intersection:
|
||||
final_list.append(ord_env)
|
||||
return final_list
|
||||
return set(final_list)
|
||||
|
||||
elif env_order:
|
||||
return env_order
|
||||
return set(env_order)
|
||||
else:
|
||||
for cenv in client_envs:
|
||||
if cenv not in envs:
|
||||
envs.append(cenv)
|
||||
return envs
|
||||
return set(envs)
|
||||
|
||||
def get_tops(self):
|
||||
'''
|
||||
|
|
|
@ -193,7 +193,11 @@ Overriding the alarm values on the resource:
|
|||
attributes:
|
||||
threshold: 2.0
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils.dictupdate as dictupdate
|
||||
from salt.exceptions import SaltInvocationError
|
||||
import salt.ext.six as six
|
||||
|
@ -307,13 +311,17 @@ def present(
|
|||
ret['result'] = _ret['result']
|
||||
if ret['result'] is False:
|
||||
return ret
|
||||
_ret = _attributes_present(name, attributes, region, key, keyid, profile)
|
||||
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
|
||||
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
|
||||
if not _ret['result']:
|
||||
ret['result'] = _ret['result']
|
||||
if ret['result'] is False:
|
||||
return ret
|
||||
|
||||
if attributes:
|
||||
_ret = _attributes_present(name, attributes, region, key, keyid, profile)
|
||||
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
|
||||
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
|
||||
|
||||
if not _ret['result']:
|
||||
ret['result'] = _ret['result']
|
||||
if ret['result'] is False:
|
||||
return ret
|
||||
|
||||
_ret = _health_check_present(name, health_check, region, key, keyid,
|
||||
profile)
|
||||
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
|
||||
|
|
|
@ -125,8 +125,7 @@ from salt.ext.six import string_types
|
|||
import salt.utils
|
||||
from salt.modules.cron import (
|
||||
_needs_change,
|
||||
_cron_matched,
|
||||
SALT_CRON_NO_IDENTIFIER
|
||||
_cron_matched
|
||||
)
|
||||
|
||||
|
||||
|
@ -217,7 +216,7 @@ def present(name,
|
|||
month='*',
|
||||
dayweek='*',
|
||||
comment=None,
|
||||
identifier=None):
|
||||
identifier=False):
|
||||
'''
|
||||
Verifies that the specified cron job is present for the specified user.
|
||||
For more advanced information about what exactly can be set in the cron
|
||||
|
@ -257,8 +256,8 @@ def present(name,
|
|||
edits. This defaults to the state id
|
||||
'''
|
||||
name = ' '.join(name.strip().split())
|
||||
if not identifier:
|
||||
identifier = SALT_CRON_NO_IDENTIFIER
|
||||
if identifier is False:
|
||||
identifier = name
|
||||
ret = {'changes': {},
|
||||
'comment': '',
|
||||
'name': name,
|
||||
|
@ -313,7 +312,7 @@ def present(name,
|
|||
|
||||
def absent(name,
|
||||
user='root',
|
||||
identifier=None,
|
||||
identifier=False,
|
||||
**kwargs):
|
||||
'''
|
||||
Verifies that the specified cron job is absent for the specified user; only
|
||||
|
@ -335,8 +334,8 @@ def absent(name,
|
|||
### of unsupported arguments will result in a traceback.
|
||||
|
||||
name = ' '.join(name.strip().split())
|
||||
if not identifier:
|
||||
identifier = SALT_CRON_NO_IDENTIFIER
|
||||
if identifier is False:
|
||||
identifier = name
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
|
|
1829
salt/states/git.py
1829
salt/states/git.py
File diff suppressed because it is too large
Load diff
|
@ -11,6 +11,12 @@ import time
|
|||
# Import salt libs
|
||||
from salt.utils import warn_until
|
||||
|
||||
# Import OpenStack libs
|
||||
from keystoneclient.apiclient.exceptions import \
|
||||
Unauthorized as kstone_Unauthorized
|
||||
from glanceclient.exc import \
|
||||
HTTPUnauthorized as glance_Unauthorized
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -21,7 +27,12 @@ def _find_image(name):
|
|||
- None, 'No such image found'
|
||||
- False, 'Found more than one image with given name'
|
||||
'''
|
||||
images_dict = __salt__['glance.image_list'](name=name)
|
||||
try:
|
||||
images_dict = __salt__['glance.image_list'](name=name)
|
||||
except kstone_Unauthorized:
|
||||
return False, 'keystoneclient: Unauthorized'
|
||||
except glance_Unauthorized:
|
||||
return False, 'glanceclient: Unauthorized'
|
||||
log.debug('Got images_dict: {0}'.format(images_dict))
|
||||
|
||||
warn_until('Boron', 'Starting with Boron '
|
||||
|
|
|
@ -220,8 +220,7 @@ def absent(name, destructive=False):
|
|||
'changes': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
grain = __grains__.get(name)
|
||||
if grain:
|
||||
if name in __grains__:
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
if destructive is True:
|
||||
|
|
|
@ -8,10 +8,12 @@ Manage OpenStack configuration file settings.
|
|||
:platform: linux
|
||||
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import salt libs
|
||||
import salt.exceptions
|
||||
# Import Salt Libs
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
|
||||
def __virtual__():
|
||||
|
@ -48,18 +50,30 @@ def present(name, filename, section, value, parameter=None):
|
|||
if parameter is None:
|
||||
parameter = name
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
|
||||
try:
|
||||
old_value = __salt__['openstack_config.get'](filename=filename,
|
||||
section=section,
|
||||
parameter=parameter)
|
||||
|
||||
if old_value == value:
|
||||
return {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': 'The value is already set to the correct value'}
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'The value is already set to the correct value'
|
||||
return ret
|
||||
|
||||
except salt.exceptions.CommandExecutionError as e:
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Value \'{0}\' is set to be changed to \'{1}\'.'.format(
|
||||
old_value,
|
||||
value
|
||||
)
|
||||
return ret
|
||||
|
||||
except CommandExecutionError as e:
|
||||
if not str(e).lower().startswith('parameter not found:'):
|
||||
raise
|
||||
|
||||
|
@ -68,10 +82,11 @@ def present(name, filename, section, value, parameter=None):
|
|||
parameter=parameter,
|
||||
value=value)
|
||||
|
||||
return {'name': name,
|
||||
'changes': {'Value': 'Updated'},
|
||||
'result': True,
|
||||
'comment': 'The value has been updated'}
|
||||
ret['changes'] = {'Value': 'Updated'}
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'The value has been updated'
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def absent(name, filename, section, parameter=None):
|
||||
|
@ -92,23 +107,35 @@ def absent(name, filename, section, parameter=None):
|
|||
if parameter is None:
|
||||
parameter = name
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
|
||||
try:
|
||||
old_value = __salt__['openstack_config.get'](filename=filename,
|
||||
section=section,
|
||||
parameter=parameter)
|
||||
except salt.exceptions.CommandExecutionError as e:
|
||||
except CommandExecutionError as e:
|
||||
if str(e).lower().startswith('parameter not found:'):
|
||||
return {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': 'The value is already absent'}
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'The value is already absent'
|
||||
return ret
|
||||
raise
|
||||
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Value \'{0}\' is set to be deleted.'.format(
|
||||
old_value
|
||||
)
|
||||
return ret
|
||||
|
||||
__salt__['openstack_config.delete'](filename=filename,
|
||||
section=section,
|
||||
parameter=parameter)
|
||||
|
||||
return {'name': name,
|
||||
'changes': {'Value': 'Deleted'},
|
||||
'result': True,
|
||||
'comment': 'The value has been deleted'}
|
||||
ret['changes'] = {'Value': 'Deleted'}
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'The value has been deleted'
|
||||
|
||||
return ret
|
||||
|
|
|
@ -161,7 +161,7 @@ def built(
|
|||
return ret
|
||||
|
||||
|
||||
def repo(name):
|
||||
def repo(name, keyid=None, env=None):
|
||||
'''
|
||||
Make a package repository, the name is directoty to turn into a repo.
|
||||
This state is best used with onchanges linked to your package building
|
||||
|
@ -169,6 +169,30 @@ def repo(name):
|
|||
|
||||
name
|
||||
The directory to find packages that will be in the repository
|
||||
|
||||
keyid
|
||||
Optional Key ID to use in signing repository
|
||||
|
||||
env
|
||||
A dictionary of environment variables to be utlilized in creating the repository.
|
||||
Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
- env:
|
||||
OPTIONS: 'ask-passphrase'
|
||||
|
||||
.. warning::
|
||||
|
||||
The above illustrates a common PyYAML pitfall, that **yes**,
|
||||
**no**, **on**, **off**, **true**, and **false** are all loaded as
|
||||
boolean ``True`` and ``False`` values, and must be enclosed in
|
||||
quotes to be used as strings. More info on this (and other) PyYAML
|
||||
idiosyncrasies can be found :doc:`here
|
||||
</topics/troubleshooting/yaml_idiosyncrasies>`.
|
||||
|
||||
Use of OPTIONS on some platforms, for example: ask-passphrase, will
|
||||
require gpg-agent or similar to cache passphrases.
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
|
@ -178,6 +202,14 @@ def repo(name):
|
|||
ret['result'] = None
|
||||
ret['comment'] = 'Package repo at {0} will be rebuilt'.format(name)
|
||||
return ret
|
||||
__salt__['pkgbuild.make_repo'](name)
|
||||
|
||||
# Need the check for None here, if env is not provided then it falls back
|
||||
# to None and it is assumed that the environment is not being overridden.
|
||||
if env is not None and not isinstance(env, dict):
|
||||
ret['comment'] = ('Invalidly-formatted \'env\' parameter. See '
|
||||
'documentation.')
|
||||
return ret
|
||||
|
||||
__salt__['pkgbuild.make_repo'](name, keyid, env)
|
||||
ret['changes'] = {'refresh': True}
|
||||
return ret
|
||||
|
|
|
@ -42,14 +42,14 @@ def __virtual__():
|
|||
return salt.utils.which('rabbitmqctl') is not None
|
||||
|
||||
|
||||
def _check_perms_changes(name, newperms):
|
||||
def _check_perms_changes(name, newperms, runas=None):
|
||||
'''
|
||||
Whether Rabbitmq user's permissions need to be changed
|
||||
'''
|
||||
if not newperms:
|
||||
return False
|
||||
|
||||
existing_perms = __salt__['rabbitmq.list_user_permissions'](name)
|
||||
existing_perms = __salt__['rabbitmq.list_user_permissions'](name, runas=runas)
|
||||
|
||||
perm_need_change = False
|
||||
for vhost_perms in newperms:
|
||||
|
@ -63,14 +63,14 @@ def _check_perms_changes(name, newperms):
|
|||
return perm_need_change
|
||||
|
||||
|
||||
def _check_tags_changes(name, newtags):
|
||||
def _check_tags_changes(name, newtags, runas=None):
|
||||
'''
|
||||
Whether Rabbitmq user's tags need to be changed
|
||||
'''
|
||||
if newtags:
|
||||
if isinstance(newtags, str):
|
||||
newtags = newtags.split()
|
||||
return __salt__['rabbitmq.list_users']()[name] - set(newtags)
|
||||
return __salt__['rabbitmq.list_users'](runas=runas)[name] - set(newtags)
|
||||
else:
|
||||
return []
|
||||
|
||||
|
@ -147,7 +147,7 @@ def present(name,
|
|||
name, runas=runas)
|
||||
changes['old'] += 'Removed password.\n'
|
||||
|
||||
if _check_tags_changes(name, tags):
|
||||
if _check_tags_changes(name, tags, runas=runas):
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] += ('Tags for user {0} '
|
||||
|
@ -158,7 +158,7 @@ def present(name,
|
|||
)
|
||||
changes['new'] += 'Set tags: {0}\n'.format(tags)
|
||||
|
||||
if _check_perms_changes(name, perms):
|
||||
if _check_perms_changes(name, perms, runas=runas):
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] += ('Permissions for user {0} '
|
||||
|
@ -167,7 +167,7 @@ def present(name,
|
|||
for vhost_perm in perms:
|
||||
for vhost, perm in six.iteritems(vhost_perm):
|
||||
result.update(__salt__['rabbitmq.set_permissions'](
|
||||
vhost, name, perm[0], perm[1], perm[2], runas)
|
||||
vhost, name, perm[0], perm[1], perm[2], runas=runas)
|
||||
)
|
||||
changes['new'] += (
|
||||
'Set permissions {0} for vhost {1}'
|
||||
|
|
|
@ -1,11 +1,68 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage the registry on Windows
|
||||
r'''
|
||||
===========================
|
||||
Manage the Windows registry
|
||||
===========================
|
||||
Many python developers think of registry keys as if they were python keys in a
|
||||
dictionary which is not the case. The windows registry is broken down into the
|
||||
following components:
|
||||
|
||||
-----
|
||||
Hives
|
||||
-----
|
||||
|
||||
This is the top level of the registry. They all begin with HKEY.
|
||||
- HKEY_CLASSES_ROOT (HKCR)
|
||||
- HKEY_CURRENT_USER(HKCU)
|
||||
- HKEY_LOCAL MACHINE (HKLM)
|
||||
- HKEY_USER (HKU)
|
||||
- HKEY_CURRENT_CONFIG
|
||||
|
||||
----
|
||||
Keys
|
||||
----
|
||||
|
||||
Hives contain keys. These are basically the folders beneath the hives. They can
|
||||
contain any number of subkeys.
|
||||
|
||||
-----------------
|
||||
Values or Entries
|
||||
-----------------
|
||||
|
||||
Values or Entries are the name/data pairs beneath the keys and subkeys. All keys
|
||||
have a default name/data pair. It is usually "(Default)"="(value not set)". The
|
||||
actual value for the name and the date is Null. The registry editor will display
|
||||
"(Default)" and "(value not set)".
|
||||
|
||||
-------
|
||||
Example
|
||||
-------
|
||||
|
||||
The following example is taken from the windows startup portion of the registry:
|
||||
```
|
||||
[HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Run]
|
||||
"RTHDVCPL"="\"C:\\Program Files\\Realtek\\Audio\\HDA\\RtkNGUI64.exe\" -s"
|
||||
"NvBackend"="\"C:\\Program Files (x86)\\NVIDIA Corporation\\Update Core\\NvBackend.exe\""
|
||||
"BTMTrayAgent"="rundll32.exe \"C:\\Program Files (x86)\\Intel\\Bluetooth\\btmshellex.dll\",TrayApp"
|
||||
```
|
||||
In this example these are the values for each:
|
||||
|
||||
Hive: `HKEY_LOCAL_MACHINE`
|
||||
|
||||
Key and subkeys: `SOFTWARE\Microsoft\Windows\CurrentVersion\Run`
|
||||
|
||||
Value:
|
||||
- There are 3 value names: `RTHDVCPL`, `NvBackend`, and `BTMTrayAgent`
|
||||
- Each value name has a corresponding value
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import logging
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -23,7 +80,7 @@ def _parse_key_value(key):
|
|||
splt = key.split("\\")
|
||||
hive = splt.pop(0)
|
||||
vname = splt.pop(-1)
|
||||
key = r'\\'.join(splt)
|
||||
key = '\\'.join(splt)
|
||||
return hive, key, vname
|
||||
|
||||
|
||||
|
@ -33,71 +90,149 @@ def _parse_key(key):
|
|||
'''
|
||||
splt = key.split("\\")
|
||||
hive = splt.pop(0)
|
||||
key = r'\\'.join(splt)
|
||||
key = '\\'.join(splt)
|
||||
return hive, key
|
||||
|
||||
|
||||
def present(name, value, vtype='REG_SZ', reflection=True):
|
||||
def present(name, value=None, vname=None, vdata=None, vtype='REG_SZ', reflection=True):
|
||||
'''
|
||||
Set a registry value
|
||||
Ensure a registry key or value is present.
|
||||
|
||||
Optionally set ``reflection`` to ``False`` to disable reflection.
|
||||
``reflection`` has no effect on a 32-bit OS.
|
||||
:param str name:
|
||||
A string value representing the full path of the key to include the
|
||||
HIVE, Key, and all Subkeys. For example:
|
||||
|
||||
In the example below, this will prevent Windows from silently creating
|
||||
the key in:
|
||||
``HKEY_CURRENT_USER\\SOFTWARE\\Wow6432Node\\Salt\\version``
|
||||
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
|
||||
|
||||
Valid hive values include:
|
||||
- HKEY_CURRENT_USER or HKCU
|
||||
- HKEY_LOCAL_MACHINE or HKLM
|
||||
- HKEY_USERS or HKU
|
||||
|
||||
:param str value:
|
||||
Deprecated. Use vname and vdata instead. Included here for backwards
|
||||
compatability.
|
||||
|
||||
:param str vname:
|
||||
The name of the value you'd like to create beneath the Key. If this
|
||||
parameter is not passed it will assume you want to set the (Default)
|
||||
value
|
||||
|
||||
:param str vdata:
|
||||
The value you'd like to set for the Key. If a value name (vname) is
|
||||
passed, this will be the data for that value name. If not, this will be
|
||||
the (Default) value for the key.
|
||||
|
||||
The type for the (Default) value is always REG_SZ and cannot be changed.
|
||||
This parameter is optional. If not passed, the Key will be created with.
|
||||
|
||||
:param str vtype:
|
||||
The value type for the data you wish to store in the registry. Valid
|
||||
values are:
|
||||
|
||||
- REG_BINARY
|
||||
- REG_DWORD
|
||||
- REG_EXPAND_SZ
|
||||
- REG_MULTI_SZ
|
||||
- REG_SZ (Default)
|
||||
|
||||
:param bool reflection:
|
||||
On 64 bit machines a duplicate value will be created in the
|
||||
``Wow6432Node`` for 32bit programs. This only applies to the SOFTWARE
|
||||
key. This option is ignored on 32bit operating systems. This value
|
||||
defaults to True. Set it to False to disable reflection.
|
||||
|
||||
:return:
|
||||
Returns a dictionary showing the results of the registry operation.
|
||||
:rtype: dict
|
||||
|
||||
The following example will set the ``(Default)`` value for the
|
||||
``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``0.15.3``. The
|
||||
value will not be reflected in ``Wow6432Node``:
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
HKEY_CURRENT_USER\\SOFTWARE\\Salt\\version:
|
||||
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
|
||||
reg.present:
|
||||
- value: 0.15.3
|
||||
- vtype: REG_SZ
|
||||
- vdata: 0.15.3
|
||||
- reflection: False
|
||||
|
||||
The following example will set the value for the ``version`` entry under the
|
||||
``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``0.15.3``. The
|
||||
value will be reflected in ``Wow6432Node``:
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
|
||||
reg.present:
|
||||
- vname: version
|
||||
- vdata: 0.15.3
|
||||
|
||||
In the above example the path is interpreted as follows:
|
||||
- ``HKEY_CURRENT_USER`` is the hive
|
||||
- ``SOFTWARE\\Salt`` is the key
|
||||
- ``version`` is the value name
|
||||
So ``version`` will be created in the ``SOFTWARE\\Salt`` key in the
|
||||
``HKEY_CURRENT_USER`` hive and given the ``REG_SZ`` value of ``0.15.3``.
|
||||
- ``vname`` is the value name ('version') that will be created under the key
|
||||
- ``vdata`` is the data that will be assigned to 'version'
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
'comment': ''}
|
||||
|
||||
hive, key, vname = _parse_key_value(name)
|
||||
# This is for backwards compatibility
|
||||
# If 'value' is passed a value, vdata becomes value and the vname is
|
||||
# obtained from the key path
|
||||
if value:
|
||||
hive, key, vname = _parse_key_value(name)
|
||||
vdata = value
|
||||
ret['comment'] = 'State file is using deprecated syntax. Please update.'
|
||||
salt.utils.warn_until(
|
||||
'Boron',
|
||||
'The \'value\' argument has been deprecated. '
|
||||
'Please use vdata instead.'
|
||||
)
|
||||
else:
|
||||
hive, key = _parse_key(name)
|
||||
|
||||
# Determine what to do
|
||||
if value == __salt__['reg.read_value'](hive, key, vname)['vdata']:
|
||||
ret['comment'] = '{0} is already configured'.format(name)
|
||||
reg_current = __salt__['reg.read_value'](hive, key, vname)
|
||||
|
||||
if vdata == reg_current['vdata'] and reg_current['success']:
|
||||
ret['comment'] = '{0} in {1} is already configured'.\
|
||||
format(vname if vname else '(Default)', name)
|
||||
return ret
|
||||
else:
|
||||
ret['changes'] = {'reg': 'configured to {0}'.format(value)}
|
||||
|
||||
add_change = {'Key': r'{0}\{1}'.format(hive, key),
|
||||
'Entry': '{0}'.format(vname if vname else '(Default)'),
|
||||
'Value': '{0}'.format(vdata if vdata else '(Empty String)')}
|
||||
|
||||
# Check for test option
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['changes'] = {'reg': {'Will add': add_change}}
|
||||
return ret
|
||||
|
||||
# Configure the value
|
||||
ret['result'] = __salt__['reg.set_value'](hive, key, vname, value, vtype,
|
||||
ret['result'] = __salt__['reg.set_value'](hive, key, vname, vdata, vtype,
|
||||
reflection)
|
||||
|
||||
if not ret:
|
||||
if not ret['result']:
|
||||
ret['changes'] = {}
|
||||
ret['comment'] = 'could not configure the registry key'
|
||||
ret['comment'] = r'Failed to add {0} to {1}\{2}'.format(name, hive, key)
|
||||
else:
|
||||
ret['changes'] = {'reg': {'Added': add_change}}
|
||||
ret['comment'] = r'Added {0} to {1}\{2}'.format(name, hive, key)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def absent(name):
|
||||
def absent(name, vname=None):
|
||||
'''
|
||||
Remove a registry value
|
||||
Ensure a registry value is removed. To remove a key use key_absent.
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -118,14 +253,89 @@ def absent(name):
|
|||
'changes': {},
|
||||
'comment': ''}
|
||||
|
||||
hive, key, vname = _parse_key_value(name)
|
||||
hive, key = _parse_key(name)
|
||||
|
||||
# Determine what to do
|
||||
if not __salt__['reg.read_value'](hive, key, vname)['success']:
|
||||
hive, key, vname = _parse_key_value(name)
|
||||
if not __salt__['reg.read_value'](hive, key, vname)['success']:
|
||||
ret['comment'] = '{0} is already absent'.format(name)
|
||||
return ret
|
||||
|
||||
remove_change = {'Key': r'{0}\{1}'.format(hive, key),
|
||||
'Entry': '{0}'.format(vname if vname else '(Default)')}
|
||||
|
||||
# Check for test option
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['changes'] = {'reg': {'Will remove': remove_change}}
|
||||
return ret
|
||||
|
||||
# Delete the value
|
||||
ret['result'] = __salt__['reg.delete_value'](hive, key, vname)
|
||||
if not ret['result']:
|
||||
ret['changes'] = {}
|
||||
ret['comment'] = r'Failed to remove {0} from {1}\{2}'.format(name, hive,
|
||||
key)
|
||||
else:
|
||||
ret['changes'] = {'reg': {'Removed': remove_change}}
|
||||
ret['comment'] = r'Removed {0} from {1}\{2}'.format(name, hive, key)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def key_absent(name, force=False):
|
||||
r'''
|
||||
.. versionadded:: 2015.5.4
|
||||
|
||||
Ensure a registry key is removed. This will remove a key and all value
|
||||
entries it contains. It will fail if the key contains subkeys.
|
||||
|
||||
:param str name:
|
||||
A string representing the full path to the key to be removed to include
|
||||
the hive and the keypath. The hive can be any of the following:
|
||||
- HKEY_LOCAL_MACHINE or HKLM
|
||||
- HKEY_CURRENT_USER or HKCU
|
||||
- HKEY_USER or HKU
|
||||
|
||||
:param bool force:
|
||||
A boolean value indicating that all subkeys should be deleted with the
|
||||
key. If force=False and subkeys exists beneath the key you want to
|
||||
delete, key_absent will fail. Use with caution. The default is False.
|
||||
|
||||
:return:
|
||||
Returns a dictionary showing the results of the registry operation.
|
||||
:rtype: dict
|
||||
|
||||
The following example will delete the ``SOFTWARE\Salt`` key and all subkeys
|
||||
under the ``HKEY_CURRENT_USER`` hive.
|
||||
|
||||
Example::
|
||||
|
||||
'HKEY_CURRENT_USER\SOFTWARE\Salt':
|
||||
reg.key_absent:
|
||||
- force: True
|
||||
|
||||
In the above example the path is interpreted as follows:
|
||||
- ``HKEY_CURRENT_USER`` is the hive
|
||||
- ``SOFTWARE\Salt`` is the key
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
'comment': ''}
|
||||
|
||||
hive, key = _parse_key(name)
|
||||
|
||||
# Determine what to do
|
||||
if not __salt__['reg.read_value'](hive, key)['success']:
|
||||
ret['comment'] = '{0} is already absent'.format(name)
|
||||
return ret
|
||||
else:
|
||||
ret['changes'] = {'reg': 'Removed {0}'.format(name)}
|
||||
|
||||
ret['changes'] = {'reg': {
|
||||
'Removed': {
|
||||
'Key': r'{0}\{1}'.format(hive, key)
|
||||
}}}
|
||||
|
||||
# Check for test option
|
||||
if __opts__['test']:
|
||||
|
@ -133,9 +343,10 @@ def absent(name):
|
|||
return ret
|
||||
|
||||
# Delete the value
|
||||
ret['result'] = __salt__['reg.delete_value'](hive, key, vname)
|
||||
if not ret['result']:
|
||||
__salt__['reg.delete_key'](hive, key, force=force)
|
||||
if __salt__['reg.read_value'](hive, key)['success']:
|
||||
ret['result'] = False
|
||||
ret['changes'] = {}
|
||||
ret['comment'] = 'failed to remove registry key {0}'.format(name)
|
||||
ret['comment'] = 'Failed to remove registry key {0}'.format(name)
|
||||
|
||||
return ret
|
||||
|
|
|
@ -1,11 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage Windows Package Repository
|
||||
|
||||
.. note::
|
||||
|
||||
This state only loads on minions that have the ``roles: salt-master`` grain
|
||||
set.
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
@ -21,15 +16,7 @@ import salt.config
|
|||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Load this state if this is the salt-master
|
||||
'''
|
||||
try:
|
||||
return ('winrepo'
|
||||
if 'salt-master' in __grains__.get('roles', [])
|
||||
else False)
|
||||
except TypeError:
|
||||
return False
|
||||
return 'winrepo'
|
||||
|
||||
|
||||
def genrepo(name, force=False, allow_empty=False):
|
||||
|
|
|
@ -13,7 +13,7 @@ Here is a simple example scenario. In this example ``ca`` is the ca server,
|
|||
and ``www`` is a web server that needs a certificate signed by ``ca``.
|
||||
|
||||
For remote signing, peers must be permitted to remotely call the
|
||||
:mod:`pem_managed <salt.states.x509.pem_managed>` function.
|
||||
:mod:`sign_remote_certificate <salt.modules.x509.sign_remote_certificate>` function.
|
||||
|
||||
|
||||
/etc/salt/master.d/peer.sls
|
||||
|
|
6
salt/templates/git/ssh-id-wrapper.bat
Executable file
6
salt/templates/git/ssh-id-wrapper.bat
Executable file
|
@ -0,0 +1,6 @@
|
|||
@echo off
|
||||
set opts=-oStrictHostKeyChecking=no -oPasswordAuthentication=no -oKbdInteractiveAuthentication=no -oChallengeResponseAuthentication=no
|
||||
if "%GIT_IDENTITY%" == "" goto NOIDENT
|
||||
set ident=-oIdentityFile='%GIT_IDENTITY%'
|
||||
:NOIDENT
|
||||
"%GIT_SSH_EXE%" %opts% %ident% %*
|
|
@ -1553,6 +1553,18 @@ def sanitize_win_path_string(winpath):
|
|||
return winpath
|
||||
|
||||
|
||||
@real_memoize
|
||||
def is_proxy():
|
||||
'''
|
||||
Return True if this minion is a proxy minion.
|
||||
Leverages the fact that is_linux() returns False
|
||||
for proxies.
|
||||
TODO: Need to extend this for proxies that might run on
|
||||
other Unices or Windows.
|
||||
'''
|
||||
return not is_linux()
|
||||
|
||||
|
||||
@real_memoize
|
||||
def is_linux():
|
||||
'''
|
||||
|
@ -1565,7 +1577,7 @@ def is_linux():
|
|||
# then this will fail.
|
||||
is_proxy = False
|
||||
try:
|
||||
if 'salt-proxy-minion' in main.__file__:
|
||||
if 'salt-proxy' in main.__file__:
|
||||
is_proxy = True
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -2808,3 +2820,45 @@ def is_list(value):
|
|||
Check if a variable is a list.
|
||||
'''
|
||||
return isinstance(value, list)
|
||||
|
||||
|
||||
def invalid_kwargs(invalid_kwargs, raise_exc=True):
|
||||
'''
|
||||
Raise a SaltInvocationError if invalid_kwargs is non-empty
|
||||
'''
|
||||
if invalid_kwargs:
|
||||
if isinstance(invalid_kwargs, dict):
|
||||
new_invalid = [
|
||||
'{0}={1}'.format(x, y)
|
||||
for x, y in six.iteritems(invalid_kwargs)
|
||||
]
|
||||
invalid_kwargs = new_invalid
|
||||
msg = (
|
||||
'The following keyword arguments are not valid: {0}'
|
||||
.format(', '.join(invalid_kwargs))
|
||||
)
|
||||
if raise_exc:
|
||||
raise SaltInvocationError(msg)
|
||||
else:
|
||||
return msg
|
||||
|
||||
|
||||
def itersplit(orig, sep=None):
|
||||
'''
|
||||
Generator function for iterating through large strings, particularly useful
|
||||
as a replacement for str.splitlines() if the string is expected to contain
|
||||
a lot of lines.
|
||||
|
||||
See http://stackoverflow.com/a/3865367
|
||||
'''
|
||||
exp = re.compile(r'\s+' if sep is None else re.escape(sep))
|
||||
pos = 0
|
||||
while True:
|
||||
match = exp.search(orig, pos)
|
||||
if not match:
|
||||
if pos < len(orig) or sep is not None:
|
||||
yield orig[pos:]
|
||||
break
|
||||
if pos < match.start() or sep is not None:
|
||||
yield orig[pos:match.start()]
|
||||
pos = match.end()
|
||||
|
|
|
@ -61,7 +61,6 @@ import hashlib
|
|||
import logging
|
||||
import datetime
|
||||
import multiprocessing
|
||||
import re
|
||||
from collections import MutableMapping
|
||||
|
||||
# Import third party libs
|
||||
|
@ -190,7 +189,6 @@ class SaltEvent(object):
|
|||
if listen:
|
||||
self.connect_pub()
|
||||
self.pending_tags = []
|
||||
self.pending_rtags = []
|
||||
self.pending_events = []
|
||||
self.connect_pub()
|
||||
self.__load_cache_regex()
|
||||
|
@ -262,7 +260,7 @@ class SaltEvent(object):
|
|||
)
|
||||
return puburi, pulluri
|
||||
|
||||
def subscribe(self, tag):
|
||||
def subscribe(self, tag, match_type=None):
|
||||
'''
|
||||
Subscribe to events matching the passed tag.
|
||||
|
||||
|
@ -272,42 +270,24 @@ class SaltEvent(object):
|
|||
to get_event from discarding a response required by a subsequent call
|
||||
to get_event.
|
||||
'''
|
||||
self.pending_tags.append(tag)
|
||||
match_func = self._get_match_func(match_type)
|
||||
|
||||
self.pending_tags.append([tag, match_func])
|
||||
|
||||
return
|
||||
|
||||
def subscribe_regex(self, tag_regex):
|
||||
'''
|
||||
Subscribe to events matching the passed tag expression.
|
||||
|
||||
If you do not subscribe to a tag, events will be discarded by calls to
|
||||
get_event that request a different tag. In contexts where many different
|
||||
jobs are outstanding it is important to subscribe to prevent one call
|
||||
to get_event from discarding a response required by a subsequent call
|
||||
to get_event.
|
||||
'''
|
||||
self.pending_rtags.append(re.compile(tag_regex))
|
||||
|
||||
return
|
||||
|
||||
def unsubscribe(self, tag):
|
||||
def unsubscribe(self, tag, match_type=None):
|
||||
'''
|
||||
Un-subscribe to events matching the passed tag.
|
||||
'''
|
||||
self.pending_tags.remove(tag)
|
||||
match_func = self._get_match_func(match_type)
|
||||
|
||||
return
|
||||
|
||||
def unsubscribe_regex(self, tag_regex):
|
||||
'''
|
||||
Un-subscribe to events matching the passed tag.
|
||||
'''
|
||||
self.pending_rtags.remove(tag_regex)
|
||||
self.pending_tags.remove([tag, match_func])
|
||||
|
||||
old_events = self.pending_events
|
||||
self.pending_events = []
|
||||
for evt in old_events:
|
||||
if any(evt['tag'].startswith(ptag) for ptag in self.pending_tags) or any(rtag.search(evt['tag']) for rtag in self.pending_rtags):
|
||||
if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags):
|
||||
self.pending_events.append(evt)
|
||||
|
||||
return
|
||||
|
@ -349,7 +329,7 @@ class SaltEvent(object):
|
|||
match_type = self.opts.get('event_match_type', 'startswith')
|
||||
return getattr(self, '_match_tag_{0}'.format(match_type), None)
|
||||
|
||||
def _check_pending(self, tag, tags_regex, match_func=None):
|
||||
def _check_pending(self, tag, match_func=None):
|
||||
"""Check the pending_events list for events that match the tag
|
||||
|
||||
:param tag: The tag to search for
|
||||
|
@ -364,14 +344,13 @@ class SaltEvent(object):
|
|||
self.pending_events = []
|
||||
ret = None
|
||||
for evt in old_events:
|
||||
if match_func(evt['tag'], tag) or any(rtag.search(evt['tag']) for rtag in tags_regex):
|
||||
if match_func(evt['tag'], tag):
|
||||
if ret is None:
|
||||
ret = evt
|
||||
log.trace('get_event() returning cached event = {0}'.format(ret))
|
||||
else:
|
||||
self.pending_events.append(evt)
|
||||
elif any(match_func(evt['tag'], ptag) for ptag in self.pending_tags) \
|
||||
or any(rtag.search(evt['tag']) for rtag in self.pending_rtags):
|
||||
elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags):
|
||||
self.pending_events.append(evt)
|
||||
else:
|
||||
log.trace('get_event() discarding cached event that no longer has any subscriptions = {0}'.format(evt))
|
||||
|
@ -409,7 +388,7 @@ class SaltEvent(object):
|
|||
'''
|
||||
return self.cache_regex.get(search_tag).search(event_tag) is not None
|
||||
|
||||
def _get_event(self, wait, tag, tags_regex, match_func=None):
|
||||
def _get_event(self, wait, tag, match_func=None):
|
||||
if match_func is None:
|
||||
match_func = self._get_match_func()
|
||||
start = time.time()
|
||||
|
@ -432,11 +411,9 @@ class SaltEvent(object):
|
|||
else:
|
||||
raise
|
||||
|
||||
if not match_func(ret['tag'], tag) \
|
||||
and not any(rtag.search(ret['tag']) for rtag in tags_regex):
|
||||
if not match_func(ret['tag'], tag):
|
||||
# tag not match
|
||||
if any(match_func(ret['tag'], ptag) for ptag in self.pending_tags) \
|
||||
or any(rtag.search(ret['tag']) for rtag in self.pending_rtags):
|
||||
if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags):
|
||||
log.trace('get_event() caching unwanted event = {0}'.format(ret))
|
||||
self.pending_events.append(ret)
|
||||
if wait: # only update the wait timeout if we had one
|
||||
|
@ -448,8 +425,7 @@ class SaltEvent(object):
|
|||
|
||||
return None
|
||||
|
||||
def get_event(self, wait=5, tag='', tags_regex=None, full=False,
|
||||
match_type=None):
|
||||
def get_event(self, wait=5, tag='', full=False, match_type=None):
|
||||
'''
|
||||
Get a single publication.
|
||||
IF no publication available THEN block for up to wait seconds
|
||||
|
@ -457,10 +433,11 @@ class SaltEvent(object):
|
|||
|
||||
IF wait is 0 then block forever.
|
||||
|
||||
A tag specification can be given to only return publications with a tag
|
||||
STARTING WITH a given string (tag) OR MATCHING one or more string
|
||||
regular expressions (tags_regex list). If tag is not specified or given
|
||||
as an empty string, all events are considered.
|
||||
tag
|
||||
Only return events matching the given tag. If not specified, or set
|
||||
to an empty string, all events are returned. It is recommended to
|
||||
always be selective on what is to be returned in the event that
|
||||
multiple requests are being multiplexed
|
||||
|
||||
match_type
|
||||
Set the function to match the search tag with event tags.
|
||||
|
@ -472,13 +449,15 @@ class SaltEvent(object):
|
|||
|
||||
.. versionadded:: Boron
|
||||
|
||||
Notes:
|
||||
|
||||
Searches cached publications first. If no cached publications are found
|
||||
that match the given tag specification, new publications are received
|
||||
and checked.
|
||||
|
||||
If a publication is received that does not match the tag specification,
|
||||
it is DISCARDED unless it is subscribed to via subscribe() and
|
||||
subscribe_regex() which will cause it to be cached.
|
||||
it is DISCARDED unless it is subscribed to via subscribe() which will
|
||||
cause it to be cached.
|
||||
|
||||
If a caller is not going to call get_event immediately after sending a
|
||||
request, it MUST subscribe the result to ensure the response is not lost
|
||||
|
@ -487,14 +466,9 @@ class SaltEvent(object):
|
|||
|
||||
match_func = self._get_match_func(match_type)
|
||||
|
||||
if tags_regex is None:
|
||||
tags_regex = []
|
||||
else:
|
||||
tags_regex = [re.compile(rtag) for rtag in tags_regex]
|
||||
|
||||
ret = self._check_pending(tag, tags_regex, match_func)
|
||||
ret = self._check_pending(tag, match_func)
|
||||
if ret is None:
|
||||
ret = self._get_event(wait, tag, tags_regex, match_func)
|
||||
ret = self._get_event(wait, tag, match_func)
|
||||
|
||||
if ret is None or full:
|
||||
return ret
|
||||
|
|
|
@ -68,6 +68,10 @@ except ImportError:
|
|||
try:
|
||||
import pygit2
|
||||
HAS_PYGIT2 = True
|
||||
try:
|
||||
GitError = pygit2.errors.GitError
|
||||
except AttributeError:
|
||||
GitError = Exception
|
||||
except ImportError:
|
||||
HAS_PYGIT2 = False
|
||||
|
||||
|
@ -839,7 +843,7 @@ class Pygit2(GitProvider):
|
|||
origin.credentials = credentials
|
||||
try:
|
||||
fetch_results = origin.fetch()
|
||||
except pygit2.errors.GitError as exc:
|
||||
except GitError as exc:
|
||||
# Using exc.__str__() here to avoid deprecation warning
|
||||
# when referencing exc.message
|
||||
if 'unsupported url protocol' in exc.__str__().lower() \
|
||||
|
|
|
@ -68,7 +68,7 @@ def get_iam_region(version='latest', url='http://169.254.169.254',
|
|||
'''
|
||||
Gets instance identity document and returns region
|
||||
'''
|
||||
instance_identity_url = '{0}/{1}/latest/dynamic/instance-identity/document'.format(url, version)
|
||||
instance_identity_url = '{0}/{1}/dynamic/instance-identity/document'.format(url, version)
|
||||
|
||||
region = None
|
||||
try:
|
||||
|
|
|
@ -210,6 +210,16 @@ def get_hostnames():
|
|||
except (IOError, OSError):
|
||||
pass
|
||||
|
||||
# try /etc/nodename (SunOS only)
|
||||
if salt.utils.is_sunos():
|
||||
try:
|
||||
name = ''
|
||||
with salt.utils.fopen('/etc/nodename') as hfl:
|
||||
name = hfl.read()
|
||||
h.append(name)
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
|
||||
# try /etc/hosts
|
||||
try:
|
||||
with salt.utils.fopen('/etc/hosts') as hfl:
|
||||
|
|
|
@ -1037,6 +1037,18 @@ class ArgsStdinMixIn(six.with_metaclass(MixInMeta, object)):
|
|||
)
|
||||
|
||||
|
||||
class ProxyIdMixIn(six.with_metaclass(MixInMeta, object)):
|
||||
_mixin_prio = 40
|
||||
|
||||
def _mixin_setup(self):
|
||||
self.add_option(
|
||||
'--proxyid',
|
||||
default=None,
|
||||
dest='proxyid',
|
||||
help=('Id for this proxy')
|
||||
)
|
||||
|
||||
|
||||
class OutputOptionsMixIn(six.with_metaclass(MixInMeta, object)):
|
||||
|
||||
_mixin_prio_ = 40
|
||||
|
@ -1515,6 +1527,31 @@ class MinionOptionParser(six.with_metaclass(OptionParserMeta, MasterOptionParser
|
|||
cache_minion_id=True)
|
||||
|
||||
|
||||
class ProxyMinionOptionParser(six.with_metaclass(OptionParserMeta,
|
||||
OptionParser,
|
||||
ConfigDirMixIn,
|
||||
MergeConfigMixIn,
|
||||
LogLevelMixIn,
|
||||
RunUserMixin,
|
||||
DaemonMixIn,
|
||||
PidfileMixin,
|
||||
SaltfileMixIn,
|
||||
ProxyIdMixIn)): # pylint: disable=no-init
|
||||
|
||||
description = (
|
||||
'The Salt proxy minion, connects to and controls devices not able to run a minion. Receives commands from a remote Salt master.'
|
||||
)
|
||||
|
||||
# ConfigDirMixIn config filename attribute
|
||||
_config_filename_ = 'minion'
|
||||
# LogLevelMixIn attributes
|
||||
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'proxyminion')
|
||||
|
||||
def setup_config(self):
|
||||
return config.minion_config(self.get_config_file_path(),
|
||||
cache_minion_id=False)
|
||||
|
||||
|
||||
class SyndicOptionParser(six.with_metaclass(OptionParserMeta,
|
||||
OptionParser,
|
||||
ConfigDirMixIn,
|
||||
|
|
26
scripts/salt-proxy
Executable file
26
scripts/salt-proxy
Executable file
|
@ -0,0 +1,26 @@
|
|||
#!/usr/bin/env python
|
||||
'''
|
||||
This script is used to kick off a salt proxy minion daemon
|
||||
'''
|
||||
|
||||
from salt.scripts import salt_proxy_minion
|
||||
from salt.utils import is_windows
|
||||
from multiprocessing import freeze_support
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if is_windows():
|
||||
# Since this file does not have a '.py' extension, when running on
|
||||
# Windows, spawning any addional processes will fail due to Python
|
||||
# not being able to load this 'module' in the new process.
|
||||
# Work around this by creating a '.pyc' file which will enable the
|
||||
# spawned process to load this 'module' and proceed.
|
||||
import os.path
|
||||
import py_compile
|
||||
cfile = os.path.splitext(__file__)[0] + '.pyc'
|
||||
if not os.path.exists(cfile):
|
||||
py_compile.compile(__file__, cfile)
|
||||
# This handles the bootstrapping code that is included with frozen
|
||||
# scripts. It is a no-op on unfrozen code.
|
||||
freeze_support()
|
||||
salt_proxy_minion()
|
330
setup.py
330
setup.py
|
@ -5,7 +5,7 @@ The setup script for salt
|
|||
'''
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
# pylint: disable=file-perms
|
||||
# pylint: disable=C0111,E1101,E1103,F0401,W0611,W0201,W0232,R0201,R0902,R0903
|
||||
|
||||
# For Python 2.5. A no-op on 2.6 and above.
|
||||
|
@ -18,7 +18,7 @@ import time
|
|||
try:
|
||||
from urllib2 import urlopen
|
||||
except ImportError:
|
||||
from urllib.request import urlopen
|
||||
from urllib.request import urlopen # pylint: disable=no-name-in-module
|
||||
from datetime import datetime
|
||||
# pylint: disable=E0611
|
||||
import distutils.dist
|
||||
|
@ -71,6 +71,7 @@ WITH_SETUPTOOLS = False
|
|||
if 'USE_SETUPTOOLS' in os.environ or 'setuptools' in sys.modules:
|
||||
try:
|
||||
from setuptools import setup
|
||||
from setuptools.command.develop import develop
|
||||
from setuptools.command.install import install
|
||||
from setuptools.command.sdist import sdist
|
||||
from setuptools.command.egg_info import egg_info
|
||||
|
@ -103,6 +104,7 @@ except ImportError:
|
|||
|
||||
SALT_VERSION = os.path.join(os.path.abspath(SETUP_DIRNAME), 'salt', 'version.py')
|
||||
SALT_VERSION_HARDCODED = os.path.join(os.path.abspath(SETUP_DIRNAME), 'salt', '_version.py')
|
||||
SALT_SYSPATHS_HARDCODED = os.path.join(os.path.abspath(SETUP_DIRNAME), 'salt', '_syspaths.py')
|
||||
SALT_REQS = os.path.join(os.path.abspath(SETUP_DIRNAME), 'requirements', 'base.txt')
|
||||
SALT_ZEROMQ_REQS = os.path.join(os.path.abspath(SETUP_DIRNAME), 'requirements', 'zeromq.txt')
|
||||
SALT_RAET_REQS = os.path.join(os.path.abspath(SETUP_DIRNAME), 'requirements', 'raet.txt')
|
||||
|
@ -125,8 +127,16 @@ def _parse_requirements_file(requirements_file):
|
|||
line = line.strip()
|
||||
if not line or line.startswith(('#', '-r')):
|
||||
continue
|
||||
if IS_WINDOWS_PLATFORM and 'libcloud' in line:
|
||||
continue
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
if 'libcloud' in line:
|
||||
continue
|
||||
if 'pycrypto' in line.lower():
|
||||
# On windows we install PyCrypto using python wheels
|
||||
continue
|
||||
if 'm2crypto' in line.lower() and __saltstack_version__.info < (2015, 8): # pylint: disable=undefined-variable
|
||||
# In Windows, we're installing M2CryptoWin{32,64} which comes
|
||||
# compiled
|
||||
continue
|
||||
parsed_requirements.append(line)
|
||||
return parsed_requirements
|
||||
# <---- Helper Functions ---------------------------------------------------------------------------------------------
|
||||
|
@ -139,12 +149,14 @@ class WriteSaltVersion(Command):
|
|||
user_options = []
|
||||
|
||||
def initialize_options(self):
|
||||
"""Abstract method that is required to be overwritten"""
|
||||
pass
|
||||
'''
|
||||
Abstract method that is required to be overwritten
|
||||
'''
|
||||
|
||||
def finalize_options(self):
|
||||
"""Abstract method that is required to be overwritten"""
|
||||
pass
|
||||
'''
|
||||
Abstract method that is required to be overwritten
|
||||
'''
|
||||
|
||||
def run(self):
|
||||
if not os.path.exists(SALT_VERSION_HARDCODED):
|
||||
|
@ -163,18 +175,54 @@ class WriteSaltVersion(Command):
|
|||
# pylint: enable=E0602
|
||||
|
||||
|
||||
class GenerateSaltSyspaths(Command):
|
||||
|
||||
description = 'Generate salt\'s hardcoded syspaths file'
|
||||
|
||||
def initialize_options(self):
|
||||
pass
|
||||
|
||||
def finalize_options(self):
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
# Write the syspaths file
|
||||
if getattr(self.distribution, 'salt_syspaths_hardcoded_path', None) is None:
|
||||
print('This command is not meant to be called on it\'s own')
|
||||
exit(1)
|
||||
|
||||
# Write the system paths file
|
||||
open(self.distribution.salt_syspaths_hardcoded_path, 'w').write(
|
||||
INSTALL_SYSPATHS_TEMPLATE.format(
|
||||
date=datetime.utcnow(),
|
||||
root_dir=self.distribution.salt_root_dir,
|
||||
config_dir=self.distribution.salt_config_dir,
|
||||
cache_dir=self.distribution.salt_cache_dir,
|
||||
sock_dir=self.distribution.salt_sock_dir,
|
||||
srv_root_dir=self.distribution.salt_srv_root_dir,
|
||||
base_file_roots_dir=self.distribution.salt_base_file_roots_dir,
|
||||
base_pillar_roots_dir=self.distribution.salt_base_pillar_roots_dir,
|
||||
base_master_roots_dir=self.distribution.salt_base_master_roots_dir,
|
||||
logs_dir=self.distribution.salt_logs_dir,
|
||||
pidfile_dir=self.distribution.salt_pidfile_dir,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class WriteSaltSshPackagingFile(Command):
|
||||
|
||||
description = 'Write salt\'s ssh packaging file'
|
||||
user_options = []
|
||||
|
||||
def initialize_options(self):
|
||||
"""Abstract method that is required to be overwritten"""
|
||||
pass
|
||||
'''
|
||||
Abstract method that is required to be overwritten
|
||||
'''
|
||||
|
||||
def finalize_options(self):
|
||||
"""Abstract method that is required to be overwritten"""
|
||||
pass
|
||||
'''
|
||||
Abstract method that is required to be overwritten
|
||||
'''
|
||||
|
||||
def run(self):
|
||||
if not os.path.exists(PACKAGED_FOR_SALT_SSH_FILE):
|
||||
|
@ -188,6 +236,198 @@ class WriteSaltSshPackagingFile(Command):
|
|||
# pylint: enable=E0602
|
||||
|
||||
|
||||
if WITH_SETUPTOOLS:
|
||||
class Develop(develop):
|
||||
user_options = develop.user_options + [
|
||||
('write-salt-version', None,
|
||||
'Generate Salt\'s _version.py file which allows proper version '
|
||||
'reporting. This defaults to False on develop/editable setups. '
|
||||
'If WRITE_SALT_VERSION is found in the environment this flag is '
|
||||
'switched to True.'),
|
||||
('generate-salt-syspaths', None,
|
||||
'Generate Salt\'s _syspaths.py file which allows tweaking some '
|
||||
'common paths that salt uses. This defaults to False on '
|
||||
'develop/editable setups. If GENERATE_SALT_SYSPATHS is found in '
|
||||
'the environment this flag is switched to True.'),
|
||||
('mimic-salt-install', None,
|
||||
'Mimmic the install command when running the develop command. '
|
||||
'This will generate salt\'s _version.py and _syspaths.py files. '
|
||||
'Generate Salt\'s _syspaths.py file which allows tweaking some '
|
||||
'This defaults to False on develop/editable setups. '
|
||||
'If MIMIC_INSTALL is found in the environment this flag is '
|
||||
'switched to True.')
|
||||
]
|
||||
boolean_options = develop.boolean_options + [
|
||||
'write-salt-version',
|
||||
'generate-salt-syspaths',
|
||||
'mimic-salt-install'
|
||||
]
|
||||
|
||||
def initialize_options(self):
|
||||
develop.initialize_options(self)
|
||||
self.write_salt_version = False
|
||||
self.generate_salt_syspaths = False
|
||||
self.mimic_salt_install = False
|
||||
|
||||
def finalize_options(self):
|
||||
develop.finalize_options(self)
|
||||
if 'WRITE_SALT_VERSION' in os.environ:
|
||||
self.write_salt_version = True
|
||||
if 'GENERATE_SALT_SYSPATHS' in os.environ:
|
||||
self.generate_salt_syspaths = True
|
||||
if 'MIMIC_SALT_INSTALL' in os.environ:
|
||||
self.mimic_salt_install = True
|
||||
|
||||
if self.mimic_salt_install:
|
||||
self.write_salt_version = True
|
||||
self.generate_salt_syspaths = True
|
||||
|
||||
def run(self):
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
if __saltstack_version__.info < (2015, 8): # pylint: disable=undefined-variable
|
||||
# Install M2Crypto first
|
||||
self.distribution.salt_installing_m2crypto_windows = True
|
||||
self.run_command('install-m2crypto-windows')
|
||||
self.distribution.salt_installing_m2crypto_windows = None
|
||||
|
||||
# Install PyCrypto
|
||||
self.distribution.salt_installing_pycrypto_windows = True
|
||||
self.run_command('install-pycrypto-windows')
|
||||
self.distribution.salt_installing_pycrypto_windows = None
|
||||
|
||||
# Download the required DLLs
|
||||
self.distribution.salt_download_windows_dlls = True
|
||||
self.run_command('download-windows-dlls')
|
||||
self.distribution.salt_download_windows_dlls = None
|
||||
|
||||
if self.write_salt_version is True:
|
||||
self.distribution.running_salt_install = True
|
||||
self.distribution.salt_version_hardcoded_path = SALT_VERSION_HARDCODED
|
||||
self.run_command('write_salt_version')
|
||||
|
||||
if self.generate_salt_syspaths:
|
||||
self.distribution.salt_syspaths_hardcoded_path = SALT_SYSPATHS_HARDCODED
|
||||
self.run_command('generate_salt_syspaths')
|
||||
|
||||
# Resume normal execution
|
||||
develop.run(self)
|
||||
|
||||
|
||||
class InstallM2CryptoWindows(Command):
|
||||
|
||||
description = 'Install M2CryptoWindows'
|
||||
|
||||
def initialize_options(self):
|
||||
pass
|
||||
|
||||
def finalize_options(self):
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
if getattr(self.distribution, 'salt_installing_m2crypto_windows', None) is None:
|
||||
print('This command is not meant to be called on it\'s own')
|
||||
exit(1)
|
||||
import platform
|
||||
from pip.utils import call_subprocess
|
||||
from pip.utils.logging import indent_log
|
||||
platform_bits, _ = platform.architecture()
|
||||
with indent_log():
|
||||
call_subprocess(
|
||||
['pip', 'install', '--egg', 'M2CryptoWin{0}'.format(platform_bits[:2])]
|
||||
)
|
||||
|
||||
|
||||
class InstallPyCryptoWindowsWheel(Command):
|
||||
|
||||
description = 'Install PyCrypto on Windows'
|
||||
|
||||
def initialize_options(self):
|
||||
pass
|
||||
|
||||
def finalize_options(self):
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
if getattr(self.distribution, 'salt_installing_pycrypto_windows', None) is None:
|
||||
print('This command is not meant to be called on it\'s own')
|
||||
exit(1)
|
||||
import platform
|
||||
from pip.utils import call_subprocess
|
||||
from pip.utils.logging import indent_log
|
||||
platform_bits, _ = platform.architecture()
|
||||
call_arguments = ['pip', 'install', 'wheel']
|
||||
if platform_bits == '64bit':
|
||||
call_arguments.append(
|
||||
'http://repo.saltstack.com/windows/dependencies/64/pycrypto-2.6.1-cp27-none-win_amd64.whl'
|
||||
)
|
||||
else:
|
||||
call_arguments.append(
|
||||
'http://repo.saltstack.com/windows/dependencies/32/pycrypto-2.6.1-cp27-none-win32.whl'
|
||||
)
|
||||
with indent_log():
|
||||
call_subprocess(call_arguments)
|
||||
|
||||
|
||||
class DownloadWindowsDlls(Command):
|
||||
|
||||
description = 'Download required DLL\'s for windows'
|
||||
|
||||
def initialize_options(self):
|
||||
pass
|
||||
|
||||
def finalize_options(self):
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
if getattr(self.distribution, 'salt_download_windows_dlls', None) is None:
|
||||
print('This command is not meant to be called on it\'s own')
|
||||
exit(1)
|
||||
import platform
|
||||
from pip.utils.logging import indent_log
|
||||
platform_bits, _ = platform.architecture()
|
||||
url = 'https://repo.saltstack.com/windows/dependencies/{bits}/{fname}32.dll'
|
||||
dest = os.path.join(os.path.dirname(sys.executable), '{fname}32.dll')
|
||||
with indent_log():
|
||||
for fname in ('libeay', 'ssleay'):
|
||||
furl = url.format(bits=platform_bits[:2], fname=fname)
|
||||
fdest = dest.format(fname=fname)
|
||||
if not os.path.exists(fdest):
|
||||
log.info('Downloading {0}32.dll to {1} from {2}'.format(fname, fdest, furl))
|
||||
try:
|
||||
import requests
|
||||
from contextlib import closing
|
||||
with closing(requests.get(furl, stream=True)) as req:
|
||||
if req.status_code == 200:
|
||||
with open(fdest, 'w') as wfh:
|
||||
for chunk in req.iter_content(chunk_size=4096):
|
||||
if chunk: # filter out keep-alive new chunks
|
||||
wfh.write(chunk)
|
||||
wfh.flush()
|
||||
else:
|
||||
log.error(
|
||||
'Failed to download {0}32.dll to {1} from {2}'.format(
|
||||
fname, fdest, furl
|
||||
)
|
||||
)
|
||||
except ImportError:
|
||||
req = urlopen(furl)
|
||||
|
||||
if req.getcode() == 200:
|
||||
with open(fdest, 'w') as wfh:
|
||||
while True:
|
||||
for chunk in req.read(4096):
|
||||
if not chunk:
|
||||
break
|
||||
wfh.write(chunk)
|
||||
wfh.flush()
|
||||
else:
|
||||
log.error(
|
||||
'Failed to download {0}32.dll to {1} from {2}'.format(
|
||||
fname, fdest, furl
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class Sdist(sdist):
|
||||
|
||||
def make_release_tree(self, base_dir, files):
|
||||
|
@ -322,8 +562,9 @@ class TestCommand(Command):
|
|||
self.runtests_opts = None
|
||||
|
||||
def finalize_options(self):
|
||||
"""Abstract method that is required to be overwritten"""
|
||||
pass
|
||||
'''
|
||||
Abstract method that is required to be overwritten
|
||||
'''
|
||||
|
||||
def run(self):
|
||||
from subprocess import Popen
|
||||
|
@ -394,24 +635,10 @@ class Build(build):
|
|||
self.run_command('write_salt_version')
|
||||
|
||||
# Write the system paths file
|
||||
system_paths_file_path = os.path.join(
|
||||
self.distribution.salt_syspaths_hardcoded_path = os.path.join(
|
||||
self.build_lib, 'salt', '_syspaths.py'
|
||||
)
|
||||
open(system_paths_file_path, 'w').write(
|
||||
INSTALL_SYSPATHS_TEMPLATE.format(
|
||||
date=datetime.utcnow(),
|
||||
root_dir=self.distribution.salt_root_dir,
|
||||
config_dir=self.distribution.salt_config_dir,
|
||||
cache_dir=self.distribution.salt_cache_dir,
|
||||
sock_dir=self.distribution.salt_sock_dir,
|
||||
srv_root_dir=self.distribution.salt_srv_root_dir,
|
||||
base_file_roots_dir=self.distribution.salt_base_file_roots_dir,
|
||||
base_pillar_roots_dir=self.distribution.salt_base_pillar_roots_dir,
|
||||
base_master_roots_dir=self.distribution.salt_base_master_roots_dir,
|
||||
logs_dir=self.distribution.salt_logs_dir,
|
||||
pidfile_dir=self.distribution.salt_pidfile_dir,
|
||||
)
|
||||
)
|
||||
self.run_command('generate_salt_syspaths')
|
||||
|
||||
|
||||
class Install(install):
|
||||
|
@ -504,6 +731,20 @@ class Install(install):
|
|||
self.distribution.salt_version_hardcoded_path = os.path.join(
|
||||
self.build_lib, 'salt', '_version.py'
|
||||
)
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
if __saltstack_version__.info < (2015, 8): # pylint: disable=undefined-variable
|
||||
# Install M2Crypto first
|
||||
self.distribution.salt_installing_m2crypto_windows = True
|
||||
self.run_command('install-m2crypto-windows')
|
||||
self.distribution.salt_installing_m2crypto_windows = None
|
||||
# Install PyCrypto
|
||||
self.distribution.salt_installing_pycrypto_windows = True
|
||||
self.run_command('install-pycrypto-windows')
|
||||
self.distribution.salt_installing_pycrypto_windows = None
|
||||
# Download the required DLLs
|
||||
self.distribution.salt_download_windows_dlls = True
|
||||
self.run_command('download-windows-dlls')
|
||||
self.distribution.salt_download_windows_dlls = None
|
||||
# Run install.run
|
||||
install.run(self)
|
||||
|
||||
|
@ -602,7 +843,6 @@ class SaltDistribution(distutils.dist.Distribution):
|
|||
self.salt_logs_dir = None
|
||||
self.salt_pidfile_dir = None
|
||||
|
||||
|
||||
self.name = 'salt-ssh' if PACKAGED_FOR_SALT_SSH else 'salt'
|
||||
self.salt_version = __version__ # pylint: disable=undefined-variable
|
||||
self.description = 'Portable, distributed, remote execution and configuration management system'
|
||||
|
@ -615,10 +855,19 @@ class SaltDistribution(distutils.dist.Distribution):
|
|||
'sdist': Sdist,
|
||||
'install': Install,
|
||||
'write_salt_version': WriteSaltVersion,
|
||||
'generate_salt_syspaths': GenerateSaltSyspaths,
|
||||
'write_salt_ssh_packaging_file': WriteSaltSshPackagingFile})
|
||||
if not IS_WINDOWS_PLATFORM:
|
||||
self.cmdclass.update({'sdist': CloudSdist,
|
||||
'install_lib': InstallLib})
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
self.cmdclass.update({'install-pycrypto-windows': InstallPyCryptoWindowsWheel,
|
||||
'download-windows-dlls': DownloadWindowsDlls})
|
||||
if __saltstack_version__.info < (2015, 8): # pylint: disable=undefined-variable
|
||||
self.cmdclass.update({'install-m2crypto-windows': InstallM2CryptoWindows})
|
||||
|
||||
if WITH_SETUPTOOLS:
|
||||
self.cmdclass.update({'develop': Develop})
|
||||
|
||||
self.license = 'Apache Software License 2.0'
|
||||
self.packages = self.discover_packages()
|
||||
|
@ -737,6 +986,7 @@ class SaltDistribution(distutils.dist.Distribution):
|
|||
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
install_requires.append('WMI')
|
||||
install_requires.append('pypiwin32 >= 219')
|
||||
|
||||
if self.salt_transport == 'zeromq':
|
||||
install_requires += _parse_requirements_file(SALT_ZEROMQ_REQS)
|
||||
|
@ -917,14 +1167,6 @@ class SaltDistribution(distutils.dist.Distribution):
|
|||
def parse_command_line(self):
|
||||
args = distutils.dist.Distribution.parse_command_line(self)
|
||||
|
||||
# Setup our property functions after class initialization and
|
||||
# after parsing the command line since most are set to None
|
||||
for funcname in dir(self):
|
||||
if not funcname.startswith('_property_'):
|
||||
continue
|
||||
property_name = funcname.split('_property_', 1)[-1]
|
||||
setattr(self, property_name, getattr(self, funcname))
|
||||
|
||||
if not self.ssh_packaging and PACKAGED_FOR_SALT_SSH:
|
||||
self.ssh_packaging = 1
|
||||
|
||||
|
@ -942,6 +1184,16 @@ class SaltDistribution(distutils.dist.Distribution):
|
|||
)
|
||||
)
|
||||
|
||||
# Setup our property functions after class initialization and
|
||||
# after parsing the command line since most are set to None
|
||||
# ATTENTION: This should be the last step before returning the args or
|
||||
# some of the requirements won't be correctly set
|
||||
for funcname in dir(self):
|
||||
if not funcname.startswith('_property_'):
|
||||
continue
|
||||
property_name = funcname.split('_property_', 1)[-1]
|
||||
setattr(self, property_name, getattr(self, funcname))
|
||||
|
||||
return args
|
||||
# <---- Overridden Methods ---------------------------------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -2,37 +2,912 @@
|
|||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import tarfile
|
||||
import tempfile
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting.helpers import ensure_in_syspath, skip_if_binaries_missing
|
||||
from distutils.version import LooseVersion
|
||||
from salttesting import skipIf
|
||||
from salttesting.helpers import (
|
||||
destructiveTest,
|
||||
ensure_in_syspath,
|
||||
skip_if_binaries_missing
|
||||
)
|
||||
ensure_in_syspath('../..')
|
||||
|
||||
# Import salt libs
|
||||
import integration
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _worktrees_supported():
|
||||
'''
|
||||
Check if the git version is 2.5.0 or later
|
||||
'''
|
||||
git_version = subprocess.Popen(
|
||||
'git --version',
|
||||
shell=True,
|
||||
close_fds=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE).communicate()[0]
|
||||
if not git_version:
|
||||
# Git not installed
|
||||
return False
|
||||
try:
|
||||
return LooseVersion(git_version.split()[-1]) >= LooseVersion('2.5.0')
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _makedirs(path):
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as exc:
|
||||
# Don't raise an exception if the directory exists
|
||||
if exc.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
|
||||
@skip_if_binaries_missing('git')
|
||||
class GitModuleTest(integration.ModuleCase):
|
||||
|
||||
def setUp(self):
|
||||
self.repos = tempfile.mkdtemp(dir=integration.TMP)
|
||||
self.addCleanup(shutil.rmtree, self.repos, ignore_errors=True)
|
||||
subprocess.check_call(['git', 'init', '--quiet', self.repos])
|
||||
|
||||
def test_config_set_value_has_space_characters(self):
|
||||
'''
|
||||
Tests the git.config_set function
|
||||
'''
|
||||
config_key = "user.name"
|
||||
config_value = "foo bar"
|
||||
|
||||
ret = self.run_function(
|
||||
'git.config_set',
|
||||
cwd=self.repos,
|
||||
setting_name=config_key,
|
||||
setting_value=config_value,
|
||||
super(GitModuleTest, self).setUp()
|
||||
self.orig_cwd = os.getcwd()
|
||||
self.addCleanup(os.chdir, self.orig_cwd)
|
||||
self.repo = tempfile.mkdtemp(dir=integration.TMP)
|
||||
self.addCleanup(shutil.rmtree, self.repo, ignore_errors=True)
|
||||
self.files = ('foo', 'bar', 'baz')
|
||||
self.dirs = ('', 'qux')
|
||||
self.branches = ('master', 'iamanewbranch')
|
||||
self.tags = ('git_testing',)
|
||||
for dirname in self.dirs:
|
||||
dir_path = os.path.join(self.repo, dirname)
|
||||
_makedirs(dir_path)
|
||||
for filename in self.files:
|
||||
with open(os.path.join(dir_path, filename), 'w') as fp_:
|
||||
fp_.write('This is a test file named ' + filename + '.')
|
||||
# Navigate to the root of the repo to init, stage, and commit
|
||||
os.chdir(self.repo)
|
||||
subprocess.check_call(['git', 'init', '--quiet', self.repo])
|
||||
subprocess.check_call(
|
||||
['git', 'config', '--global', 'user.name', 'Jenkins'])
|
||||
subprocess.check_call(
|
||||
['git', 'config', '--global', 'user.email', 'qa@saltstack.com'])
|
||||
subprocess.check_call(['git', 'add', '.'])
|
||||
subprocess.check_call(
|
||||
['git', 'commit', '--quiet', '--message', 'Initial commit']
|
||||
)
|
||||
self.assertEqual("", ret)
|
||||
# Add a tag
|
||||
subprocess.check_call(
|
||||
['git', 'tag', '--annotate', self.tags[0], '--message', 'Add tag']
|
||||
)
|
||||
# Checkout a second branch
|
||||
subprocess.check_call(
|
||||
['git', 'checkout', '--quiet', '-b', self.branches[1]]
|
||||
)
|
||||
# Add a line to the file
|
||||
with open(self.files[0], 'a') as fp_:
|
||||
fp_.write('Added a line\n')
|
||||
# Commit the updated file
|
||||
subprocess.check_call(
|
||||
['git', 'commit', '--quiet',
|
||||
'--message', 'Added a line to ' + self.files[0], self.files[0]]
|
||||
)
|
||||
# Switch back to master
|
||||
subprocess.check_call(['git', 'checkout', '--quiet', 'master'])
|
||||
# Go back to original cwd
|
||||
os.chdir(self.orig_cwd)
|
||||
|
||||
def test_add_dir(self):
|
||||
'''
|
||||
Test git.add with a directory
|
||||
'''
|
||||
newdir = 'quux'
|
||||
# Change to the repo dir
|
||||
newdir_path = os.path.join(self.repo, newdir)
|
||||
_makedirs(newdir_path)
|
||||
files = [os.path.join(newdir_path, x) for x in self.files]
|
||||
files_relpath = [os.path.join(newdir, x) for x in self.files]
|
||||
for path in files:
|
||||
with open(path, 'w') as fp_:
|
||||
fp_.write(
|
||||
'This is a test file with relative path {0}.\n'
|
||||
.format(path)
|
||||
)
|
||||
ret = self.run_function('git.add', [self.repo, newdir])
|
||||
self.assertEqual(
|
||||
ret,
|
||||
'\n'.join(
|
||||
sorted(['add \'{0}\''.format(x)
|
||||
for x in files_relpath])
|
||||
)
|
||||
)
|
||||
|
||||
def test_add_file(self):
|
||||
'''
|
||||
Test git.add with a file
|
||||
'''
|
||||
filename = 'quux'
|
||||
file_path = os.path.join(self.repo, filename)
|
||||
with open(file_path, 'w') as fp_:
|
||||
fp_.write('This is a test file named ' + filename + '.\n')
|
||||
ret = self.run_function('git.add', [self.repo, filename])
|
||||
self.assertEqual(ret, 'add \'{0}\''.format(filename))
|
||||
|
||||
def test_archive(self):
|
||||
'''
|
||||
Test git.archive
|
||||
'''
|
||||
tar_archive = os.path.join(integration.TMP, 'test_archive.tar.gz')
|
||||
self.assertTrue(
|
||||
self.run_function(
|
||||
'git.archive',
|
||||
[self.repo, tar_archive],
|
||||
prefix='foo/'
|
||||
)
|
||||
)
|
||||
self.assertTrue(tarfile.is_tarfile(tar_archive))
|
||||
with tarfile.open(tar_archive, 'r') as tar_obj:
|
||||
self.assertEqual(
|
||||
tar_obj.getnames(),
|
||||
['foo', 'foo/bar', 'foo/baz', 'foo/foo', 'foo/qux',
|
||||
'foo/qux/bar', 'foo/qux/baz', 'foo/qux/foo']
|
||||
)
|
||||
os.unlink(tar_archive)
|
||||
|
||||
def test_archive_subdir(self):
|
||||
'''
|
||||
Test git.archive on a subdir, giving only a partial copy of the repo in
|
||||
the resulting archive
|
||||
'''
|
||||
tar_archive = os.path.join(integration.TMP, 'test_archive.tar.gz')
|
||||
self.assertTrue(
|
||||
self.run_function(
|
||||
'git.archive',
|
||||
[os.path.join(self.repo, 'qux'), tar_archive],
|
||||
prefix='foo/'
|
||||
)
|
||||
)
|
||||
self.assertTrue(tarfile.is_tarfile(tar_archive))
|
||||
with tarfile.open(tar_archive, 'r') as tar_obj:
|
||||
self.assertEqual(
|
||||
tar_obj.getnames(),
|
||||
['foo', 'foo/bar', 'foo/baz', 'foo/foo']
|
||||
)
|
||||
os.unlink(tar_archive)
|
||||
|
||||
def test_branch(self):
|
||||
'''
|
||||
Test creating, renaming, and deleting a branch using git.branch
|
||||
'''
|
||||
renamed_branch = 'ihavebeenrenamed'
|
||||
self.assertTrue(
|
||||
self.run_function('git.branch', [self.repo, self.branches[1]])
|
||||
)
|
||||
self.assertTrue(
|
||||
self.run_function(
|
||||
'git.branch',
|
||||
[self.repo, renamed_branch],
|
||||
opts='-m ' + self.branches[1]
|
||||
)
|
||||
)
|
||||
self.assertTrue(
|
||||
self.run_function(
|
||||
'git.branch',
|
||||
[self.repo, renamed_branch],
|
||||
opts='-D'
|
||||
)
|
||||
)
|
||||
|
||||
def test_checkout(self):
|
||||
'''
|
||||
Test checking out a new branch and then checking out master again
|
||||
'''
|
||||
new_branch = 'iamanothernewbranch'
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.checkout',
|
||||
[self.repo, 'HEAD'],
|
||||
opts='-b ' + new_branch
|
||||
),
|
||||
'Switched to a new branch \'' + new_branch + '\''
|
||||
)
|
||||
self.assertTrue(
|
||||
'Switched to branch \'master\'' in
|
||||
self.run_function('git.checkout', [self.repo, 'master']),
|
||||
)
|
||||
|
||||
def test_checkout_no_rev(self):
|
||||
'''
|
||||
Test git.checkout without a rev, both with -b in opts and without
|
||||
'''
|
||||
new_branch = 'iamanothernewbranch'
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.checkout', [self.repo], rev=None, opts='-b ' + new_branch
|
||||
),
|
||||
'Switched to a new branch \'' + new_branch + '\''
|
||||
)
|
||||
self.assertTrue(
|
||||
'\'rev\' argument is required unless -b or -B in opts' in
|
||||
self.run_function('git.checkout', [self.repo])
|
||||
)
|
||||
|
||||
def test_clone(self):
|
||||
'''
|
||||
Test cloning an existing repo
|
||||
'''
|
||||
clone_parent_dir = tempfile.mkdtemp(dir=integration.TMP)
|
||||
self.assertTrue(
|
||||
self.run_function('git.clone', [clone_parent_dir, self.repo])
|
||||
)
|
||||
# Cleanup after yourself
|
||||
shutil.rmtree(clone_parent_dir)
|
||||
|
||||
def test_clone_with_alternate_name(self):
|
||||
'''
|
||||
Test cloning an existing repo with an alternate name for the repo dir
|
||||
'''
|
||||
clone_parent_dir = tempfile.mkdtemp(dir=integration.TMP)
|
||||
clone_name = os.path.basename(self.repo)
|
||||
# Change to newly-created temp dir
|
||||
self.assertTrue(
|
||||
self.run_function(
|
||||
'git.clone',
|
||||
[clone_parent_dir, self.repo],
|
||||
name=clone_name
|
||||
)
|
||||
)
|
||||
# Cleanup after yourself
|
||||
shutil.rmtree(clone_parent_dir)
|
||||
|
||||
def test_commit(self):
|
||||
'''
|
||||
Test git.commit two ways:
|
||||
1) First using git.add, then git.commit
|
||||
2) Using git.commit with the 'filename' argument to skip staging
|
||||
'''
|
||||
filename = 'foo'
|
||||
commit_re_prefix = r'^\[master [0-9a-f]+\] '
|
||||
# Add a line
|
||||
with open(os.path.join(self.repo, filename), 'a') as fp_:
|
||||
fp_.write('Added a line\n')
|
||||
# Stage the file
|
||||
self.run_function('git.add', [self.repo, filename])
|
||||
# Commit the staged file
|
||||
commit_msg = 'Add a line to ' + filename
|
||||
ret = self.run_function('git.commit', [self.repo, commit_msg])
|
||||
# Make sure the expected line is in the output
|
||||
self.assertTrue(bool(re.search(commit_re_prefix + commit_msg, ret)))
|
||||
# Add another line
|
||||
with open(os.path.join(self.repo, filename), 'a') as fp_:
|
||||
fp_.write('Added another line\n')
|
||||
# Commit the second file without staging
|
||||
commit_msg = 'Add another line to ' + filename
|
||||
ret = self.run_function(
|
||||
'git.commit',
|
||||
[self.repo, commit_msg],
|
||||
filename=filename
|
||||
)
|
||||
self.assertTrue(bool(re.search(commit_re_prefix + commit_msg, ret)))
|
||||
|
||||
@destructiveTest
|
||||
def test_config(self):
|
||||
'''
|
||||
Test setting, getting, and unsetting config values
|
||||
|
||||
WARNING: This test will modify and completely remove a config section
|
||||
'foo', both in the repo created in setUp() and in the user's global
|
||||
.gitconfig.
|
||||
'''
|
||||
def _clear_config():
|
||||
cmds = (
|
||||
['git', 'config', '--remove-section', 'foo'],
|
||||
['git', 'config', '--global', '--remove-section', 'foo']
|
||||
)
|
||||
for cmd in cmds:
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
try:
|
||||
subprocess.check_call(cmd, stderr=devnull)
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
|
||||
cfg_local = {
|
||||
'foo.single': ['foo'],
|
||||
'foo.multi': ['foo', 'bar', 'baz']
|
||||
}
|
||||
cfg_global = {
|
||||
'foo.single': ['abc'],
|
||||
'foo.multi': ['abc', 'def', 'ghi']
|
||||
}
|
||||
_clear_config()
|
||||
try:
|
||||
# Try to specify both single and multivar (error raised)
|
||||
self.assertTrue(
|
||||
'Only one of \'value\' and \'multivar\' is permitted' in
|
||||
self.run_function(
|
||||
'git.config_set',
|
||||
['foo.single'],
|
||||
value=cfg_local['foo.single'][0],
|
||||
multivar=cfg_local['foo.multi'],
|
||||
cwd=self.repo
|
||||
)
|
||||
)
|
||||
# Set single local value without cwd (error raised)
|
||||
self.assertTrue(
|
||||
'\'cwd\' argument required unless global=True' in
|
||||
self.run_function(
|
||||
'git.config_set',
|
||||
['foo.single'],
|
||||
value=cfg_local['foo.single'][0],
|
||||
)
|
||||
)
|
||||
# Set single local value
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.config_set',
|
||||
['foo.single'],
|
||||
value=cfg_local['foo.single'][0],
|
||||
cwd=self.repo
|
||||
),
|
||||
cfg_local['foo.single']
|
||||
)
|
||||
# Set single global value
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.config_set',
|
||||
['foo.single'],
|
||||
value=cfg_global['foo.single'][0],
|
||||
**{'global': True}
|
||||
),
|
||||
cfg_global['foo.single']
|
||||
)
|
||||
# Set local multivar
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.config_set',
|
||||
['foo.multi'],
|
||||
multivar=cfg_local['foo.multi'],
|
||||
cwd=self.repo
|
||||
),
|
||||
cfg_local['foo.multi']
|
||||
)
|
||||
# Set global multivar
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.config_set',
|
||||
['foo.multi'],
|
||||
multivar=cfg_global['foo.multi'],
|
||||
**{'global': True}
|
||||
),
|
||||
cfg_global['foo.multi']
|
||||
)
|
||||
# Get single local value
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.config_get',
|
||||
['foo.single'],
|
||||
cwd=self.repo
|
||||
),
|
||||
cfg_local['foo.single'][0]
|
||||
)
|
||||
# Get single value from local multivar
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.config_get',
|
||||
['foo.multi'],
|
||||
cwd=self.repo
|
||||
),
|
||||
cfg_local['foo.multi'][-1]
|
||||
)
|
||||
# Get all values from multivar (includes globals)
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.config_get',
|
||||
['foo.multi'],
|
||||
cwd=self.repo,
|
||||
**{'all': True}
|
||||
),
|
||||
cfg_local['foo.multi']
|
||||
)
|
||||
# Get single global value
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.config_get',
|
||||
['foo.single'],
|
||||
**{'global': True}
|
||||
),
|
||||
cfg_global['foo.single'][0]
|
||||
)
|
||||
# Get single value from global multivar
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.config_get',
|
||||
['foo.multi'],
|
||||
**{'global': True}
|
||||
),
|
||||
cfg_global['foo.multi'][-1]
|
||||
)
|
||||
# Get all values from global multivar
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.config_get',
|
||||
['foo.multi'],
|
||||
**{'all': True, 'global': True}
|
||||
),
|
||||
cfg_global['foo.multi']
|
||||
)
|
||||
# Get all local keys/values using regex
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.config_get_regexp',
|
||||
['foo.(single|multi)'],
|
||||
cwd=self.repo
|
||||
),
|
||||
cfg_local
|
||||
)
|
||||
# Get all global keys/values using regex
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.config_get_regexp',
|
||||
['foo.(single|multi)'],
|
||||
cwd=self.repo,
|
||||
**{'global': True}
|
||||
),
|
||||
cfg_global
|
||||
)
|
||||
# Get just the local foo.multi values containing 'a'
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.config_get_regexp',
|
||||
['foo.multi'],
|
||||
value_regex='a',
|
||||
cwd=self.repo
|
||||
),
|
||||
{'foo.multi': [x for x in cfg_local['foo.multi'] if 'a' in x]}
|
||||
)
|
||||
# Get just the global foo.multi values containing 'a'
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.config_get_regexp',
|
||||
['foo.multi'],
|
||||
value_regex='a',
|
||||
cwd=self.repo,
|
||||
**{'global': True}
|
||||
),
|
||||
{'foo.multi': [x for x in cfg_global['foo.multi'] if 'a' in x]}
|
||||
)
|
||||
|
||||
# TODO: More robust unset testing, try to trigger all the
|
||||
# exceptions raised.
|
||||
|
||||
# Unset a single local value
|
||||
self.assertTrue(
|
||||
self.run_function(
|
||||
'git.config_unset',
|
||||
['foo.single'],
|
||||
cwd=self.repo,
|
||||
)
|
||||
)
|
||||
# Unset an entire local multivar
|
||||
self.assertTrue(
|
||||
self.run_function(
|
||||
'git.config_unset',
|
||||
['foo.multi'],
|
||||
cwd=self.repo,
|
||||
**{'all': True}
|
||||
)
|
||||
)
|
||||
# Unset a single global value
|
||||
self.assertTrue(
|
||||
self.run_function(
|
||||
'git.config_unset',
|
||||
['foo.single'],
|
||||
**{'global': True}
|
||||
)
|
||||
)
|
||||
# Unset an entire local multivar
|
||||
self.assertTrue(
|
||||
self.run_function(
|
||||
'git.config_unset',
|
||||
['foo.multi'],
|
||||
**{'all': True, 'global': True}
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
raise
|
||||
finally:
|
||||
_clear_config()
|
||||
|
||||
def test_current_branch(self):
|
||||
'''
|
||||
Test git.current_branch
|
||||
'''
|
||||
self.assertEqual(
|
||||
self.run_function('git.current_branch', [self.repo]),
|
||||
'master'
|
||||
)
|
||||
|
||||
def test_describe(self):
|
||||
'''
|
||||
Test git.describe
|
||||
'''
|
||||
self.assertEqual(
|
||||
self.run_function('git.describe', [self.repo]),
|
||||
self.tags[0]
|
||||
)
|
||||
|
||||
# Test for git.fetch would be unreliable on Jenkins, skipping for now
|
||||
# The test should go into test_remotes when ready
|
||||
|
||||
def test_init(self):
|
||||
'''
|
||||
Use git.init to init a new repo
|
||||
'''
|
||||
new_repo = tempfile.mkdtemp(dir=integration.TMP)
|
||||
self.assertEqual(
|
||||
self.run_function('git.init', [new_repo]),
|
||||
'Initialized empty Git repository in {0}/.git/'.format(new_repo)
|
||||
)
|
||||
shutil.rmtree(new_repo)
|
||||
|
||||
# Test for git.is_worktree is in test_worktree
|
||||
|
||||
def test_list_branches(self):
|
||||
'''
|
||||
Test git.list_branches
|
||||
'''
|
||||
self.assertEqual(
|
||||
self.run_function('git.list_branches', [self.repo]),
|
||||
sorted(self.branches)
|
||||
)
|
||||
|
||||
def test_list_tags(self):
|
||||
'''
|
||||
Test git.list_tags
|
||||
'''
|
||||
self.assertEqual(
|
||||
self.run_function('git.list_tags', [self.repo]),
|
||||
sorted(self.tags)
|
||||
)
|
||||
|
||||
# Test for git.ls_remote will need to wait for now, while I think of how to
|
||||
# properly mock it.
|
||||
|
||||
def test_merge(self):
|
||||
'''
|
||||
Test git.merge
|
||||
|
||||
# TODO: Test more than just a fast-forward merge
|
||||
'''
|
||||
# Merge the second branch into the current branch
|
||||
ret = self.run_function(
|
||||
'git.merge',
|
||||
[self.repo],
|
||||
rev=self.branches[1]
|
||||
)
|
||||
# Merge should be a fast-forward
|
||||
self.assertTrue('Fast-forward' in ret.splitlines())
|
||||
|
||||
def test_merge_base_and_tree(self):
|
||||
'''
|
||||
Test git.merge_base, git.merge_tree and git.revision
|
||||
|
||||
TODO: Test all of the arguments
|
||||
'''
|
||||
# Get the SHA1 of current HEAD
|
||||
head_rev = self.run_function('git.revision', [self.repo], rev='HEAD')
|
||||
# Make sure revision is a 40-char string
|
||||
self.assertTrue(len(head_rev) == 40)
|
||||
# Get the second branch's SHA1
|
||||
second_rev = self.run_function(
|
||||
'git.revision',
|
||||
[self.repo],
|
||||
rev=self.branches[1]
|
||||
)
|
||||
# Make sure revision is a 40-char string
|
||||
self.assertTrue(len(second_rev) == 40)
|
||||
# self.branches[1] should be just one commit ahead, so the merge base
|
||||
# for master and self.branches[1] should be the same as the current
|
||||
# HEAD.
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.merge_base',
|
||||
[self.repo],
|
||||
refs=','.join((head_rev, second_rev))
|
||||
),
|
||||
head_rev
|
||||
)
|
||||
# There should be no conflict here, so the return should be an empty
|
||||
# string.
|
||||
ret = self.run_function(
|
||||
'git.merge_tree',
|
||||
[self.repo, head_rev, second_rev]
|
||||
).splitlines()
|
||||
self.assertTrue(len([x for x in ret if x.startswith('@@')]) == 1)
|
||||
|
||||
# Test for git.pull would be unreliable on Jenkins, skipping for now
|
||||
|
||||
# Test for git.push would be unreliable on Jenkins, skipping for now
|
||||
|
||||
def test_rebase(self):
|
||||
'''
|
||||
Test git.rebase
|
||||
'''
|
||||
# Make a change to a different file than the one modifed in setUp
|
||||
file_path = os.path.join(self.repo, self.files[1])
|
||||
with open(file_path, 'a') as fp_:
|
||||
fp_.write('Added a line\n')
|
||||
# Commit the change
|
||||
self.assertTrue(
|
||||
'ERROR' not in self.run_function(
|
||||
'git.commit',
|
||||
[self.repo, 'Added a line to ' + self.files[1]],
|
||||
filename=self.files[1]
|
||||
)
|
||||
)
|
||||
# Switch to the second branch
|
||||
self.assertTrue(
|
||||
'ERROR' not in self.run_function(
|
||||
'git.checkout',
|
||||
[self.repo],
|
||||
rev=self.branches[1]
|
||||
)
|
||||
)
|
||||
# Perform the rebase. The commit should show a comment about
|
||||
# self.files[0] being modified, as that is the file that was modified
|
||||
# in the second branch in the setUp function
|
||||
self.assertEqual(
|
||||
self.run_function('git.rebase', [self.repo]),
|
||||
'First, rewinding head to replay your work on top of it...\n'
|
||||
'Applying: Added a line to ' + self.files[0]
|
||||
)
|
||||
|
||||
# Test for git.remote_get is in test_remotes
|
||||
|
||||
# Test for git.remote_set is in test_remotes
|
||||
|
||||
def test_remotes(self):
|
||||
'''
|
||||
Test setting a remote (git.remote_set), and getting a remote
|
||||
(git.remote_get and git.remotes)
|
||||
|
||||
TODO: Properly mock fetching a remote (git.fetch), and build out more
|
||||
robust testing that confirms that the https auth bits work.
|
||||
'''
|
||||
remotes = {
|
||||
'first': {'fetch': '/dev/null', 'push': '/dev/null'},
|
||||
'second': {'fetch': '/dev/null', 'push': '/dev/stdout'}
|
||||
}
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.remote_set',
|
||||
[self.repo, remotes['first']['fetch']],
|
||||
remote='first'
|
||||
),
|
||||
remotes['first']
|
||||
)
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.remote_set',
|
||||
[self.repo, remotes['second']['fetch']],
|
||||
remote='second',
|
||||
push_url=remotes['second']['push']
|
||||
),
|
||||
remotes['second']
|
||||
)
|
||||
self.assertEqual(
|
||||
self.run_function('git.remotes', [self.repo]),
|
||||
remotes
|
||||
)
|
||||
|
||||
def test_reset(self):
|
||||
'''
|
||||
Test git.reset
|
||||
|
||||
TODO: Test more than just a hard reset
|
||||
'''
|
||||
# Switch to the second branch
|
||||
self.assertTrue(
|
||||
'ERROR' not in self.run_function(
|
||||
'git.checkout',
|
||||
[self.repo],
|
||||
rev=self.branches[1]
|
||||
)
|
||||
)
|
||||
# Back up one commit. We should now be at the same revision as master
|
||||
self.run_function(
|
||||
'git.reset',
|
||||
[self.repo],
|
||||
opts='--hard HEAD~1'
|
||||
)
|
||||
# Get the SHA1 of current HEAD (remember, we're on the second branch)
|
||||
head_rev = self.run_function('git.revision', [self.repo], rev='HEAD')
|
||||
# Make sure revision is a 40-char string
|
||||
self.assertTrue(len(head_rev) == 40)
|
||||
# Get the master branch's SHA1
|
||||
master_rev = self.run_function(
|
||||
'git.revision',
|
||||
[self.repo],
|
||||
rev='master'
|
||||
)
|
||||
# Make sure revision is a 40-char string
|
||||
self.assertTrue(len(master_rev) == 40)
|
||||
# The two revisions should be the same
|
||||
self.assertEqual(head_rev, master_rev)
|
||||
|
||||
def test_rev_parse(self):
|
||||
'''
|
||||
Test git.rev_parse
|
||||
'''
|
||||
# Using --abbrev-ref on HEAD will give us the current branch
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.rev_parse', [self.repo, 'HEAD'], opts='--abbrev-ref'
|
||||
),
|
||||
'master'
|
||||
)
|
||||
|
||||
# Test for git.revision happens in test_merge_base
|
||||
|
||||
def test_rm(self):
|
||||
'''
|
||||
Test git.rm
|
||||
'''
|
||||
single_file = self.files[0]
|
||||
entire_dir = self.dirs[1]
|
||||
# Remove a single file
|
||||
self.assertEqual(
|
||||
self.run_function('git.rm', [self.repo, single_file]),
|
||||
'rm \'' + single_file + '\''
|
||||
)
|
||||
# Remove an entire dir
|
||||
expected = '\n'.join(
|
||||
sorted(['rm \'' + os.path.join(entire_dir, x) + '\''
|
||||
for x in self.files])
|
||||
)
|
||||
self.assertEqual(
|
||||
self.run_function('git.rm', [self.repo, entire_dir], opts='-r'),
|
||||
expected
|
||||
)
|
||||
|
||||
def test_stash(self):
|
||||
'''
|
||||
Test git.stash
|
||||
|
||||
# TODO: test more stash actions
|
||||
'''
|
||||
file_path = os.path.join(self.repo, self.files[0])
|
||||
with open(file_path, 'a') as fp_:
|
||||
fp_.write('Temp change to be stashed')
|
||||
self.assertTrue(
|
||||
'ERROR' not in self.run_function('git.stash', [self.repo])
|
||||
)
|
||||
# List stashes
|
||||
ret = self.run_function('git.stash', [self.repo], action='list')
|
||||
self.assertTrue('ERROR' not in ret)
|
||||
self.assertTrue(len(ret.splitlines()) == 1)
|
||||
# Apply the stash
|
||||
self.assertTrue(
|
||||
'ERROR' not in self.run_function(
|
||||
'git.stash',
|
||||
[self.repo],
|
||||
action='apply',
|
||||
opts='stash@{0}'
|
||||
)
|
||||
)
|
||||
# Drop the stash
|
||||
self.assertTrue(
|
||||
'ERROR' not in self.run_function(
|
||||
'git.stash',
|
||||
[self.repo],
|
||||
action='drop',
|
||||
opts='stash@{0}'
|
||||
)
|
||||
)
|
||||
|
||||
def test_status(self):
|
||||
'''
|
||||
Test git.status
|
||||
'''
|
||||
changes = {
|
||||
'modified': ['foo'],
|
||||
'new': ['thisisdefinitelyanewfile'],
|
||||
'deleted': ['bar'],
|
||||
'untracked': ['thisisalsoanewfile']
|
||||
}
|
||||
for filename in changes['modified']:
|
||||
with open(os.path.join(self.repo, filename), 'a') as fp_:
|
||||
fp_.write('Added a line\n')
|
||||
for filename in changes['new']:
|
||||
with open(os.path.join(self.repo, filename), 'w') as fp_:
|
||||
fp_.write('This is a new file named ' + filename + '.')
|
||||
# Stage the new file so it shows up as a 'new' file
|
||||
self.assertTrue(
|
||||
'ERROR' not in self.run_function(
|
||||
'git.add',
|
||||
[self.repo, filename]
|
||||
)
|
||||
)
|
||||
for filename in changes['deleted']:
|
||||
self.run_function('git.rm', [self.repo, filename])
|
||||
for filename in changes['untracked']:
|
||||
with open(os.path.join(self.repo, filename), 'w') as fp_:
|
||||
fp_.write('This is a new file named ' + filename + '.')
|
||||
self.assertEqual(
|
||||
self.run_function('git.status', [self.repo]),
|
||||
changes
|
||||
)
|
||||
|
||||
# TODO: Add git.submodule test
|
||||
|
||||
def test_symbolic_ref(self):
|
||||
'''
|
||||
Test git.symbolic_ref
|
||||
'''
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.symbolic_ref',
|
||||
[self.repo, 'HEAD'],
|
||||
opts='--quiet'
|
||||
),
|
||||
'refs/heads/master'
|
||||
)
|
||||
|
||||
@skipIf(not _worktrees_supported(),
|
||||
'Git 2.5 or newer required for worktree support')
|
||||
def test_worktree(self):
|
||||
'''
|
||||
This tests git.worktree_add, git.is_worktree, git.worktree_rm, and
|
||||
git.worktree_prune
|
||||
'''
|
||||
worktree_name = 'hotfix'
|
||||
worktree_path = tempfile.mkdtemp(dir=integration.TMP)
|
||||
worktree_basename = os.path.basename(worktree_path)
|
||||
# Add a new worktree
|
||||
ret = self.run_function(
|
||||
'git.worktree_add',
|
||||
[self.repo, worktree_path],
|
||||
branch=worktree_name
|
||||
)
|
||||
self.assertTrue('Enter ' + worktree_path in ret)
|
||||
# Check if this new path is a worktree
|
||||
self.assertTrue(self.run_function('git.is_worktree', [worktree_path]))
|
||||
# Check if the main repo is a worktree
|
||||
self.assertFalse(self.run_function('git.is_worktree', [self.repo]))
|
||||
# Check if a non-repo directory is a worktree
|
||||
empty_dir = tempfile.mkdtemp(dir=integration.TMP)
|
||||
self.assertFalse(self.run_function('git.is_worktree', [empty_dir]))
|
||||
shutil.rmtree(empty_dir)
|
||||
# Remove the worktree
|
||||
self.assertTrue(self.run_function('git.worktree_rm', [worktree_path]))
|
||||
# Prune the worktrees
|
||||
prune_message = (
|
||||
'Removing worktrees/{0}: gitdir file points to non-existent '
|
||||
'location'.format(worktree_basename)
|
||||
)
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'git.worktree_prune',
|
||||
[self.repo],
|
||||
dry_run=True
|
||||
),
|
||||
prune_message
|
||||
)
|
||||
self.assertEqual(
|
||||
self.run_function('git.worktree_prune', [self.repo]),
|
||||
prune_message
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(GitModuleTest)
|
||||
|
|
|
@ -38,6 +38,22 @@ class GitTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
|
|||
self.skipTest(msg.format(self.__domain))
|
||||
|
||||
def test_latest(self):
|
||||
'''
|
||||
git.latest
|
||||
'''
|
||||
name = os.path.join(integration.TMP, 'salt_repo')
|
||||
try:
|
||||
ret = self.run_state(
|
||||
'git.latest',
|
||||
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
|
||||
target=name
|
||||
)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
self.assertTrue(os.path.isdir(os.path.join(name, '.git')))
|
||||
finally:
|
||||
shutil.rmtree(name, ignore_errors=True)
|
||||
|
||||
def test_latest_with_rev_and_submodules(self):
|
||||
'''
|
||||
git.latest
|
||||
'''
|
||||
|
|
|
@ -151,7 +151,7 @@ class TopFileMergeTestCase(TestCase):
|
|||
with patch('salt.fileclient.FSClient.envs', MagicMock(return_value=['a', 'b', 'c'])):
|
||||
highstate = HighState(config)
|
||||
ret = highstate.get_tops()
|
||||
self.assertEqual(ret, OrderedDict([('b', [{}]), ('a', [{}]), ('c', [{}])]))
|
||||
self.assertEqual(ret, OrderedDict([('a', [{}]), ('c', [{}]), ('b', [{}])]))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -1,355 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import TestCase, skipIf
|
||||
from salttesting.mock import (
|
||||
MagicMock,
|
||||
patch,
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON
|
||||
)
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.modules import git
|
||||
from salt.exceptions import SaltInvocationError
|
||||
|
||||
# Globals
|
||||
git.__salt__ = {}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class GitTestCase(TestCase):
|
||||
'''
|
||||
Test cases for salt.modules.git
|
||||
'''
|
||||
# 'get_' function tests: 1
|
||||
|
||||
def test_current_branch(self):
|
||||
'''
|
||||
Test if it returns the current branch name
|
||||
'''
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.dict(git.__salt__, {'cmd.run_stdout': mock}):
|
||||
self.assertTrue(git.current_branch('develop'))
|
||||
|
||||
# 'revision' function tests: 1
|
||||
|
||||
def test_revision(self):
|
||||
'''
|
||||
Test if it returns the long hash of a given identifier
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertTrue(git.revision('develop'),)
|
||||
|
||||
# 'clone' function tests: 1
|
||||
|
||||
def test_clone(self):
|
||||
'''
|
||||
Test if it clone a new repository
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertTrue(git.clone('origin', 'develop'))
|
||||
|
||||
# 'describe' function tests: 1
|
||||
|
||||
def test_describe(self):
|
||||
'''
|
||||
Test if it returns the git describe string (or the SHA hash
|
||||
if there are no tags) for the given revision
|
||||
'''
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.dict(git.__salt__, {'cmd.run_stdout': mock}):
|
||||
self.assertTrue(git.describe('develop'))
|
||||
|
||||
# 'archive' function tests: 1
|
||||
|
||||
def test_archive(self):
|
||||
'''
|
||||
Test if it exports a tarball from the repository
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
mock_val = MagicMock(return_value='true')
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': mock_val}):
|
||||
self.assertTrue(git.archive('develop', 'archive.tar.gz'))
|
||||
|
||||
# 'fetch' function tests: 1
|
||||
|
||||
def test_fetch(self):
|
||||
'''
|
||||
Test if it perform a fetch on the given repository
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertTrue(git.fetch('develop'))
|
||||
|
||||
# 'pull' function tests: 1
|
||||
|
||||
def test_pull(self):
|
||||
'''
|
||||
Test if it perform a pull on the given repository
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertTrue(git.pull('develop'))
|
||||
|
||||
# 'rebase' function tests: 1
|
||||
|
||||
def test_rebase(self):
|
||||
'''
|
||||
Test if it rebase the current branch
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertEqual(git.rebase('develop'), True)
|
||||
|
||||
# 'checkout' function tests: 1
|
||||
|
||||
def test_checkout(self):
|
||||
'''
|
||||
Test if it checkout a given revision
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertTrue(git.checkout('develop', 'mybranch'))
|
||||
|
||||
# 'merge' function tests: 1
|
||||
|
||||
def test_merge(self):
|
||||
'''
|
||||
Test if it merge a given branch
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertTrue(git.merge('develop'))
|
||||
|
||||
# 'init' function tests: 1
|
||||
|
||||
def test_init(self):
|
||||
'''
|
||||
Test if it initialize a new git repository
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertTrue(git.init('develop'))
|
||||
|
||||
# 'submodule' function tests: 1
|
||||
|
||||
def test_submodule(self):
|
||||
'''
|
||||
Test if it initialize git submodules
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertTrue(git.submodule('develop'))
|
||||
|
||||
# 'status' function tests: 1
|
||||
|
||||
def test_status(self):
|
||||
'''
|
||||
Test if it return the status of the repository
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertListEqual(git.status('develop'), [])
|
||||
|
||||
# 'add' function tests: 1
|
||||
|
||||
def test_add(self):
|
||||
'''
|
||||
Test if it add a file to git
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertTrue(git.add('develop',
|
||||
'/salt/tests/unit/modules/example.py'))
|
||||
|
||||
# 'rm' function tests: 1
|
||||
|
||||
def test_rm(self):
|
||||
'''
|
||||
Test if it remove a file to git
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertTrue(git.rm('develop',
|
||||
'/salt/tests/unit/modules/example.py'))
|
||||
|
||||
# 'commit' function tests: 1
|
||||
|
||||
def test_commit(self):
|
||||
'''
|
||||
Test if it create a commit
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertTrue(git.commit('develop', 'The comit message'))
|
||||
|
||||
# 'push' function tests: 1
|
||||
|
||||
def test_push(self):
|
||||
'''
|
||||
Test if it Push to remote
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertTrue(git.push('develop', 'remote-name'))
|
||||
|
||||
# 'remotes' function tests: 1
|
||||
|
||||
def test_remotes(self):
|
||||
'''
|
||||
Test if it gets remotes like git remote -v
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertDictEqual(git.remotes('develop'), {})
|
||||
|
||||
# 'remote_get' function tests: 1
|
||||
|
||||
def test_remote_get(self):
|
||||
'''
|
||||
Test if it get the fetch and push URL for a specified remote name
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0,
|
||||
'stdout': '\nSalt\nStack'})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertEqual(git.remote_get('develop'), ('Salt', 'Stack'))
|
||||
|
||||
mock = MagicMock(return_value={'retcode': 0,
|
||||
'stdout': '\norigin\norigin'})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertEqual(git.remote_get('develop'), None)
|
||||
|
||||
mock = MagicMock(return_value={'retcode': 1,
|
||||
'stdout': '\norigin\norigin',
|
||||
'stderr': 'error'})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertEqual(git.remote_get('develop'), None)
|
||||
|
||||
# 'remote_set' function tests: 1
|
||||
|
||||
def test_remote_set(self):
|
||||
'''
|
||||
Test if it sets a remote with name and URL like git remote add name url
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0,
|
||||
'stdout': '\nSalt\nStack'})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertEqual(git.remote_set('develop'), ('Salt', 'Stack'))
|
||||
|
||||
# 'branch' function tests: 1
|
||||
|
||||
def test_branch(self):
|
||||
'''
|
||||
Test if it interacts with branches
|
||||
'''
|
||||
mock_all = MagicMock(return_value={'retcode': 0, 'stdout': ''})
|
||||
mock_stdout = MagicMock(return_value=True)
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock_all,
|
||||
'cmd.run_stdout': mock_stdout}):
|
||||
self.assertEqual(git.branch('develop', 'origin/develop'), True)
|
||||
|
||||
# 'reset' function tests: 1
|
||||
|
||||
def test_reset(self):
|
||||
'''
|
||||
Test if it reset the repository checkout
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertEqual(git.reset('develop'), True)
|
||||
|
||||
# 'stash' function tests: 1
|
||||
|
||||
def test_stash(self):
|
||||
'''
|
||||
Test if stash changes in the repository checkout
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertEqual(git.stash('develop'), True)
|
||||
|
||||
# 'config_set' function tests: 1
|
||||
|
||||
def test_config_set(self):
|
||||
'''
|
||||
Test if it sets a key in the git configuration file
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertRaises(TypeError, git.config_set)
|
||||
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertRaises(SaltInvocationError, git.config_set,
|
||||
None, 'myname', 'me@example.com')
|
||||
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertEqual(git.config_set(None, 'myname', 'me@example.com',
|
||||
'me', True), True)
|
||||
|
||||
# 'config_get' function tests: 1
|
||||
|
||||
def test_config_get(self):
|
||||
'''
|
||||
Test if it gets a key or keys from the git configuration file
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertRaises(TypeError, git.config_get)
|
||||
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertEqual(git.config_get(None, 'myname', 'me'), True)
|
||||
|
||||
# 'ls_remote' function tests: 1
|
||||
|
||||
def test_ls_remote(self):
|
||||
'''
|
||||
Test if it returns the upstream hash for any given URL and branch.
|
||||
'''
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': True})
|
||||
with patch.dict(git.__salt__, {'cmd.run_all': mock,
|
||||
'cmd.run_stdout': True}):
|
||||
self.assertEqual(git.ls_remote('develop'), True)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(GitTestCase, needs_daemon=False)
|
|
@ -18,7 +18,11 @@ from salttesting.mock import (
|
|||
# Import Salt Libs
|
||||
from salt.modules import pw_user
|
||||
from salt.exceptions import CommandExecutionError
|
||||
import pwd
|
||||
try:
|
||||
import pwd
|
||||
HAS_PWD = True
|
||||
except ImportError:
|
||||
HAS_PWD = False
|
||||
|
||||
|
||||
# Globals
|
||||
|
@ -27,6 +31,7 @@ pw_user.__salt__ = {}
|
|||
pw_user.__context__ = {}
|
||||
|
||||
|
||||
@skipIf(not HAS_PWD, 'These tests can only run on systems with the python pwd module')
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class PwUserTestCase(TestCase):
|
||||
'''
|
||||
|
@ -49,16 +54,21 @@ class PwUserTestCase(TestCase):
|
|||
with patch.dict(pw_user.__salt__, {'cmd.run_all': mock}):
|
||||
self.assertTrue(pw_user.delete('A'), 1)
|
||||
|
||||
@patch('salt.modules.pw_user.__context__', MagicMock(return_value='A'))
|
||||
def test_getent(self):
|
||||
'''
|
||||
Test if user.getent already have a value
|
||||
'''
|
||||
self.assertTrue(pw_user.getent())
|
||||
mock_user = 'saltdude'
|
||||
|
||||
mock = MagicMock(return_value='A')
|
||||
with patch.object(pw_user, 'info', mock):
|
||||
self.assertEqual(pw_user.getent(True)[0], 'A')
|
||||
class MockData(object):
|
||||
pw_name = mock_user
|
||||
|
||||
with patch('pwd.getpwall', MagicMock(return_value=[MockData()])):
|
||||
with patch.dict(pw_user.__context__, {'user.getent': mock_user}):
|
||||
self.assertEqual(pw_user.getent(), mock_user)
|
||||
|
||||
with patch.object(pw_user, 'info', MagicMock(return_value=mock_user)):
|
||||
self.assertEqual(pw_user.getent(True)[0], mock_user)
|
||||
|
||||
def test_chuid(self):
|
||||
'''
|
||||
|
@ -291,13 +301,22 @@ class PwUserTestCase(TestCase):
|
|||
'''
|
||||
Return a list of groups the named user belongs to
|
||||
'''
|
||||
self.assertEqual(pw_user.list_groups('name'), 'A')
|
||||
mock_group = 'saltgroup'
|
||||
|
||||
with patch('salt.utils.get_group_list', MagicMock(return_value=[mock_group])):
|
||||
self.assertEqual(pw_user.list_groups('name'), [mock_group])
|
||||
|
||||
def test_list_users(self):
|
||||
'''
|
||||
Return a list of all users
|
||||
'''
|
||||
self.assertTrue(pw_user.list_users())
|
||||
mock_user = 'saltdude'
|
||||
|
||||
class MockData(object):
|
||||
pw_name = mock_user
|
||||
|
||||
with patch('pwd.getpwall', MagicMock(return_value=[MockData()])):
|
||||
self.assertEqual(pw_user.list_users(), [mock_user])
|
||||
|
||||
def test_rename(self):
|
||||
'''
|
||||
|
|
|
@ -41,26 +41,6 @@ class ScheduleTestCase(TestCase):
|
|||
'''
|
||||
Test cases for salt.modules.schedule
|
||||
'''
|
||||
# 'list_' function tests: 1
|
||||
|
||||
def test_list(self):
|
||||
'''
|
||||
Test if it list the jobs currently scheduled on the minion.
|
||||
'''
|
||||
with patch.dict(schedule.__opts__, {'schedule': {'_seconds': {'enabled': True}}, 'sock_dir': SOCK_DIR}):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.dict(schedule.__salt__, {'event.fire': mock}):
|
||||
_ret_value = {'complete': True, 'schedule': {'_seconds': {'enabled': True}}}
|
||||
with patch.object(SaltEvent, 'get_event', return_value=_ret_value):
|
||||
self.assertEqual(schedule.list_(), "schedule:\n _seconds:\n enabled: true\n")
|
||||
self.assertDictEqual(schedule.list_(show_all=True, return_yaml=False), {'_seconds': {'enabled': True}})
|
||||
|
||||
with patch.dict(schedule.__opts__, {'schedule': {}, 'sock_dir': SOCK_DIR}):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.dict(schedule.__salt__, {'event.fire': mock}):
|
||||
_ret_value = {'complete': True, 'schedule': {}}
|
||||
with patch.object(SaltEvent, 'get_event', return_value=_ret_value):
|
||||
self.assertDictEqual(schedule.list_(), {'schedule': {}})
|
||||
|
||||
# 'purge' function tests: 1
|
||||
|
||||
|
@ -193,8 +173,8 @@ class ScheduleTestCase(TestCase):
|
|||
'result': False})
|
||||
|
||||
self.assertDictEqual(schedule.modify('job1', function='test.ping'),
|
||||
{'changes': {},
|
||||
'comment': 'Job job1 in correct state',
|
||||
{'changes': {'diff': '--- \n+++ \n@@ -1,4 +1,3 @@\n-enabled:True\n function:test.ping\n jid_include:True\n maxrunning:1\n'},
|
||||
'comment': 'Modified job: job1 in schedule.',
|
||||
'result': True})
|
||||
|
||||
ret = schedule.modify('job3', function='test.ping', test=True)
|
||||
|
@ -285,19 +265,6 @@ class ScheduleTestCase(TestCase):
|
|||
{'comment': 'Schedule would be disabled.',
|
||||
'result': True})
|
||||
|
||||
# 'reload_' function tests: 1
|
||||
|
||||
def test_reload_(self):
|
||||
'''
|
||||
Test if it reload saved scheduled jobs on the minion.
|
||||
'''
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.dict(schedule.__opts__, {'config_dir': '',
|
||||
'default_include': '/tmp'}):
|
||||
with patch.dict(schedule.__salt__, {'event.fire': mock}):
|
||||
self.assertDictEqual(schedule.reload_(),
|
||||
{'comment': [], 'result': True})
|
||||
|
||||
# 'move' function tests: 1
|
||||
|
||||
def test_move(self):
|
||||
|
|
|
@ -196,20 +196,13 @@ class SystemdTestCase(TestCase):
|
|||
exe = MagicMock(return_value='foo')
|
||||
tmock = MagicMock(return_value=True)
|
||||
mock = MagicMock(return_value=False)
|
||||
disabled_mock = MagicMock(return_value={'retcode': 1, 'stdout': 'disabled', 'stderr': ''})
|
||||
masked_mock = MagicMock(return_value={'retcode': 1, 'stdout': 'masked', 'stderr': ''})
|
||||
with patch.object(systemd, '_untracked_custom_unit_found', mock):
|
||||
with patch.object(systemd, '_unit_file_changed', mock):
|
||||
with patch.dict(systemd.__salt__, {'cmd.retcode': mock}):
|
||||
with patch.dict(systemd.__salt__, {'cmd.run_all': disabled_mock}):
|
||||
with patch.object(systemd, "_service_is_sysv", mock):
|
||||
self.assertTrue(systemd.enable("sshd"))
|
||||
with patch.object(systemd, "_get_service_exec", exe):
|
||||
with patch.object(systemd, "_service_is_sysv", tmock):
|
||||
self.assertTrue(systemd.enable("sshd"))
|
||||
|
||||
with patch.dict(systemd.__salt__, {'cmd.run_all': masked_mock}):
|
||||
with patch.object(systemd, "_service_is_sysv", mock):
|
||||
with patch.object(systemd, "_service_is_sysv", mock):
|
||||
self.assertTrue(systemd.enable("sshd"))
|
||||
with patch.object(systemd, "_get_service_exec", exe):
|
||||
with patch.object(systemd, "_service_is_sysv", tmock):
|
||||
self.assertTrue(systemd.enable("sshd"))
|
||||
|
||||
def test_disable(self):
|
||||
|
|
|
@ -98,7 +98,7 @@ class BotoElbTestCase(TestCase):
|
|||
self.assertTrue(boto_elb.__salt__['boto_elb.exists'].called)
|
||||
self.assertTrue(boto_elb.__salt__['boto_elb.create'].called)
|
||||
self.assertTrue(boto_elb.__salt__['state.single'].called)
|
||||
self.assertTrue(
|
||||
self.assertFalse(
|
||||
boto_elb.__salt__['boto_elb.get_attributes'].called
|
||||
)
|
||||
self.assertTrue(
|
||||
|
|
|
@ -139,7 +139,8 @@ class CronTestCase(TestCase):
|
|||
cron.present(
|
||||
name='foo',
|
||||
hour='2',
|
||||
user='root')
|
||||
user='root',
|
||||
identifier=None)
|
||||
self.assertEqual(
|
||||
get_crontab(),
|
||||
('# Lines below here are managed by Salt, do not edit\n'
|
||||
|
@ -147,7 +148,7 @@ class CronTestCase(TestCase):
|
|||
'* 2 * * * foo\n'
|
||||
'# SALT_CRON_IDENTIFIER:2\n'
|
||||
'* 2 * * * foo\n'
|
||||
'* 2 * * * foo\n'))
|
||||
'* 2 * * * foo'))
|
||||
|
||||
@patch('salt.modules.cron.raw_cron',
|
||||
new=MagicMock(side_effect=get_crontab))
|
||||
|
@ -196,214 +197,107 @@ class CronTestCase(TestCase):
|
|||
new=MagicMock(side_effect=get_crontab))
|
||||
@patch('salt.modules.cron._write_cron_lines',
|
||||
new=MagicMock(side_effect=write_crontab))
|
||||
def test_aissue_1072(self):
|
||||
def test_multiline_comments_are_updated(self):
|
||||
set_crontab(
|
||||
'# Lines below here are managed by Salt, do not edit\n'
|
||||
'# I have a multi-line comment SALT_CRON_IDENTIFIER:1\n'
|
||||
'# First crontab - single line comment SALT_CRON_IDENTIFIER:1\n'
|
||||
'* 1 * * * foo'
|
||||
)
|
||||
cron.present(
|
||||
name='foo',
|
||||
hour='1',
|
||||
comment='1I have a multi-line comment\n2about my script here.\n',
|
||||
comment='First crontab\nfirst multi-line comment\n',
|
||||
identifier='1',
|
||||
user='root')
|
||||
cron.present(
|
||||
name='foo',
|
||||
hour='1',
|
||||
comment='3I have a multi-line comment\n3about my script here.\n',
|
||||
comment='First crontab\nsecond multi-line comment\n',
|
||||
identifier='1',
|
||||
user='root')
|
||||
cron.present(
|
||||
name='foo',
|
||||
hour='1',
|
||||
comment='I have a multi-line comment\nabout my script here.\n',
|
||||
comment='Second crontab\nmulti-line comment\n',
|
||||
identifier='2',
|
||||
user='root')
|
||||
self.assertEqual(
|
||||
get_crontab(),
|
||||
'# Lines below here are managed by Salt, do not edit\n'
|
||||
'# 2about my script here. SALT_CRON_IDENTIFIER:1\n'
|
||||
'# First crontab\n'
|
||||
'# second multi-line comment SALT_CRON_IDENTIFIER:1\n'
|
||||
'* 1 * * * foo\n'
|
||||
'# I have a multi-line comment\n'
|
||||
'# about my script here. SALT_CRON_IDENTIFIER:2\n'
|
||||
'# Second crontab\n'
|
||||
'# multi-line comment SALT_CRON_IDENTIFIER:2\n'
|
||||
'* 1 * * * foo')
|
||||
|
||||
@patch('salt.modules.cron.raw_cron',
|
||||
new=MagicMock(side_effect=get_crontab))
|
||||
@patch('salt.modules.cron._write_cron_lines',
|
||||
new=MagicMock(side_effect=write_crontab))
|
||||
def test_issue_11935(self):
|
||||
def test_existing_unmanaged_jobs_are_made_managed(self):
|
||||
set_crontab(
|
||||
'# Lines below here are managed by Salt, do not edit\n'
|
||||
'0 2 * * * find /var/www -type f '
|
||||
'-mtime -7 -print0 | xargs -0 '
|
||||
'clamscan -i --no-summary 2>/dev/null'
|
||||
'0 2 * * * foo'
|
||||
)
|
||||
cmd = (
|
||||
'find /var/www -type f -mtime -7 -print0 '
|
||||
'| xargs -0 clamscan -i --no-summary 2>/dev/null'
|
||||
)
|
||||
self.assertEqual(cron._check_cron('root', cmd, hour='2', minute='0'),
|
||||
'present')
|
||||
ret = cron.present(cmd, 'root', minute='0', hour='2')
|
||||
self.assertEqual(ret['changes'], {})
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Cron find /var/www -type f -mtime -7 -print0 '
|
||||
'| xargs -0 clamscan -i --no-summary 2>/dev/null already present')
|
||||
self.assertEqual(cron._check_cron('root', cmd, hour='3', minute='0'),
|
||||
'update')
|
||||
ret = cron.present(cmd, 'root', minute='0', hour='3')
|
||||
self.assertEqual(ret['changes'],
|
||||
{'root': 'find /var/www -type f -mtime -7 -print0 | '
|
||||
'xargs -0 clamscan -i --no-summary 2>/dev/null'})
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Cron find /var/www -type f -mtime -7 -print0 '
|
||||
'| xargs -0 clamscan -i --no-summary 2>/dev/null updated')
|
||||
ret = cron._check_cron('root', 'foo', hour='2', minute='0')
|
||||
self.assertEqual(ret, 'present')
|
||||
ret = cron.present('foo', 'root', minute='0', hour='2')
|
||||
self.assertEqual(ret['changes'], {'root': 'foo'})
|
||||
self.assertEqual(ret['comment'], 'Cron foo updated')
|
||||
self.assertEqual(
|
||||
get_crontab(),
|
||||
'# Lines below here are managed by Salt, do not edit\n'
|
||||
'0 3 * * * find /var/www -type f -mtime -7 -print0 |'
|
||||
' xargs -0 clamscan -i --no-summary 2>/dev/null')
|
||||
'# SALT_CRON_IDENTIFIER:foo\n'
|
||||
'0 2 * * * foo')
|
||||
ret = cron.present('foo', 'root', minute='0', hour='2')
|
||||
self.assertEqual(ret['changes'], {})
|
||||
self.assertEqual(ret['comment'], 'Cron foo already present')
|
||||
|
||||
@patch('salt.modules.cron.raw_cron',
|
||||
new=MagicMock(side_effect=get_crontab))
|
||||
@patch('salt.modules.cron._write_cron_lines',
|
||||
new=MagicMock(side_effect=write_crontab))
|
||||
def test_issue_11935_with_id(self):
|
||||
def test_existing_noid_jobs_are_updated_with_identifier(self):
|
||||
set_crontab(
|
||||
'# Lines below here are managed by Salt, do not edit\n'
|
||||
'# SALT_CRON_IDENTIFIER:1\n'
|
||||
'0 2 * * * find /var/www -type f '
|
||||
'-mtime -7 -print0 | xargs -0 '
|
||||
'clamscan -i --no-summary 2>/dev/null'
|
||||
'# SALT_CRON_IDENTIFIER:NO ID SET\n'
|
||||
'1 * * * * foo'
|
||||
)
|
||||
cmd = (
|
||||
'find /var/www -type f -mtime -7 -print0 '
|
||||
'| xargs -0 clamscan -i --no-summary 2>/dev/null'
|
||||
)
|
||||
self.assertEqual(cron._check_cron(
|
||||
'root', cmd, hour='2', minute='0', identifier=1), 'present')
|
||||
ret = cron.present(cmd, 'root', minute='0', hour='2', identifier='1')
|
||||
self.assertEqual(ret['changes'], {})
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Cron find /var/www -type f -mtime -7 -print0 '
|
||||
'| xargs -0 clamscan -i --no-summary 2>/dev/null already present')
|
||||
self.assertEqual(cron._check_cron(
|
||||
'root', cmd, hour='3', minute='0', identifier='1'), 'update')
|
||||
ret = cron.present(cmd, 'root', minute='0', hour='3', identifier='1')
|
||||
self.assertEqual(ret['changes'],
|
||||
{'root': 'find /var/www -type f -mtime -7 -print0 | '
|
||||
'xargs -0 clamscan -i --no-summary 2>/dev/null'})
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Cron find /var/www -type f -mtime -7 -print0 '
|
||||
'| xargs -0 clamscan -i --no-summary 2>/dev/null updated')
|
||||
ret = cron._check_cron('root', 'foo', minute=1)
|
||||
self.assertEqual(ret, 'present')
|
||||
ret = cron.present('foo', 'root', minute=1)
|
||||
self.assertEqual(ret['changes'], {'root': 'foo'})
|
||||
self.assertEqual(ret['comment'], 'Cron foo updated')
|
||||
self.assertEqual(
|
||||
get_crontab(),
|
||||
'# Lines below here are managed by Salt, do not edit\n'
|
||||
'# SALT_CRON_IDENTIFIER:1\n'
|
||||
'0 3 * * * find /var/www -type f -mtime -7 -print0 |'
|
||||
' xargs -0 clamscan -i --no-summary 2>/dev/null')
|
||||
'# SALT_CRON_IDENTIFIER:foo\n'
|
||||
'1 * * * * foo')
|
||||
|
||||
@patch('salt.modules.cron.raw_cron',
|
||||
new=MagicMock(side_effect=get_crontab))
|
||||
@patch('salt.modules.cron._write_cron_lines',
|
||||
new=MagicMock(side_effect=write_crontab))
|
||||
def test_issue_11935_mixed(self):
|
||||
def test_existing_duplicate_unmanaged_jobs_are_merged_and_given_id(self):
|
||||
set_crontab(
|
||||
'# Lines below here are managed by Salt, do not edit\n'
|
||||
'0 2 * * * find /var/www -type f '
|
||||
'-mtime -7 -print0 | xargs -0 '
|
||||
'clamscan -i --no-summary 2>/dev/null'
|
||||
'0 2 * * * foo\n'
|
||||
'0 2 * * * foo'
|
||||
)
|
||||
cmd = (
|
||||
'find /var/www -type f -mtime -7 -print0 '
|
||||
'| xargs -0 clamscan -i --no-summary 2>/dev/null'
|
||||
)
|
||||
self.assertEqual(cron._check_cron('root', cmd, hour='2', minute='0'),
|
||||
'present')
|
||||
ret = cron.present(cmd, 'root', minute='0', hour='2')
|
||||
ret = cron._check_cron('root', 'foo', hour='2', minute='0')
|
||||
self.assertEqual(ret, 'present')
|
||||
ret = cron.present('foo', 'root', minute='0', hour='2')
|
||||
self.assertEqual(ret['changes'], {'root': 'foo'})
|
||||
self.assertEqual(ret['comment'], 'Cron foo updated')
|
||||
self.assertEqual(
|
||||
get_crontab(),
|
||||
'# Lines below here are managed by Salt, do not edit\n'
|
||||
'# SALT_CRON_IDENTIFIER:foo\n'
|
||||
'0 2 * * * foo')
|
||||
ret = cron.present('foo', 'root', minute='0', hour='2')
|
||||
self.assertEqual(ret['changes'], {})
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Cron find /var/www -type f -mtime -7 -print0 '
|
||||
'| xargs -0 clamscan -i --no-summary 2>/dev/null already present')
|
||||
self.assertEqual(cron._check_cron('root', cmd, hour='3', minute='0'),
|
||||
'update')
|
||||
ret = cron.present(cmd, 'root', minute='0', hour='3')
|
||||
self.assertEqual(ret['changes'],
|
||||
{'root': 'find /var/www -type f -mtime -7 -print0 | '
|
||||
'xargs -0 clamscan -i --no-summary 2>/dev/null'})
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Cron find /var/www -type f -mtime -7 -print0 '
|
||||
'| xargs -0 clamscan -i --no-summary 2>/dev/null updated')
|
||||
self.assertEqual(
|
||||
get_crontab(),
|
||||
'# Lines below here are managed by Salt, do not edit\n'
|
||||
'0 3 * * * find /var/www -type f -mtime -7 -print0 |'
|
||||
' xargs -0 clamscan -i --no-summary 2>/dev/null')
|
||||
self.assertEqual(cron._check_cron(
|
||||
'root', cmd, hour='2', minute='0', identifier='1'), 'update')
|
||||
ret = cron.present(cmd, 'root', minute='0', hour='2', identifier='1')
|
||||
self.assertEqual(
|
||||
ret['changes'],
|
||||
{'root': 'find /var/www -type f -mtime -7 -print0 | '
|
||||
'xargs -0 clamscan -i --no-summary 2>/dev/null'})
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Cron find /var/www -type f -mtime -7 -print0 '
|
||||
'| xargs -0 clamscan -i --no-summary 2>/dev/null updated')
|
||||
self.assertEqual(cron._check_cron(
|
||||
'root', cmd, hour='3', minute='0', identifier='1'), 'update')
|
||||
ret = cron.present(cmd, 'root', minute='0', hour='3', identifier='1')
|
||||
self.assertEqual(ret['changes'],
|
||||
{'root': 'find /var/www -type f -mtime -7 -print0 | '
|
||||
'xargs -0 clamscan -i --no-summary 2>/dev/null'})
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Cron find /var/www -type f -mtime -7 -print0 '
|
||||
'| xargs -0 clamscan -i --no-summary 2>/dev/null updated')
|
||||
self.assertEqual(
|
||||
get_crontab(),
|
||||
'# Lines below here are managed by Salt, do not edit\n'
|
||||
'# SALT_CRON_IDENTIFIER:1\n'
|
||||
'0 3 * * * find /var/www -type f -mtime -7 -print0 |'
|
||||
' xargs -0 clamscan -i --no-summary 2>/dev/null')
|
||||
|
||||
set_crontab(
|
||||
'# Lines below here are managed by Salt, do not edit\n'
|
||||
'0 2 * * * find /var/www -type f '
|
||||
'-mtime -7 -print0 | xargs -0 '
|
||||
'clamscan -i --no-summary 2>/dev/null'
|
||||
)
|
||||
self.assertEqual(cron._check_cron(
|
||||
'root', cmd + "a", hour='2', minute='0', identifier='1'), 'absent')
|
||||
ret = cron.present(
|
||||
cmd + "a", 'root', minute='0', hour='2', identifier='1')
|
||||
self.assertEqual(
|
||||
ret['changes'],
|
||||
{'root': 'find /var/www -type f -mtime -7 -print0 | '
|
||||
'xargs -0 clamscan -i --no-summary 2>/dev/nulla'})
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Cron find /var/www -type f -mtime -7 -print0 | '
|
||||
'xargs -0 clamscan -i --no-summary 2>/dev/nulla added '
|
||||
'to root\'s crontab')
|
||||
self.assertEqual(
|
||||
get_crontab(),
|
||||
'# Lines below here are managed by Salt, do not edit\n'
|
||||
'0 2 * * *'
|
||||
' find /var/www -type f -mtime -7 -print0'
|
||||
' | xargs -0 clamscan -i --no-summary 2>/dev/null\n'
|
||||
'# SALT_CRON_IDENTIFIER:1\n'
|
||||
'0 2 * * *'
|
||||
' find /var/www -type f -mtime -7 -print0'
|
||||
' | xargs -0 clamscan -i --no-summary 2>/dev/nulla')
|
||||
|
||||
self.assertEqual(ret['comment'], 'Cron foo already present')
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
|
|
|
@ -1,256 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Rahul Handay <rahulha@saltstack.com>`
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import TestCase, skipIf
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
from salttesting.mock import (
|
||||
MagicMock,
|
||||
patch,
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON
|
||||
)
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.states import git
|
||||
|
||||
# Globals
|
||||
git.__salt__ = {}
|
||||
git.__grains__ = {}
|
||||
git.__opts__ = {}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class GitTestCase(TestCase):
|
||||
'''
|
||||
Validate the git state
|
||||
'''
|
||||
def test_latest(self):
|
||||
'''
|
||||
Test to make sure the repository is cloned and is up to date
|
||||
'''
|
||||
arg = ["git@gitlab.example.com:user/website.git"]
|
||||
ret = {'changes': {'new': 'git@gitlab.example.com:user/website.git',
|
||||
'revision': None},
|
||||
'comment': 'Repository git@gitlab.example.'
|
||||
'com:user/website.git cloned to salt',
|
||||
'name': 'git@gitlab.example.com:user/website.git',
|
||||
'result': True}
|
||||
|
||||
mock = MagicMock(return_value={'result': False, 'comment': '"rev"'
|
||||
'is not compatible with the "mirror"'
|
||||
'and "bare" arguments'})
|
||||
with patch.object(git, '_fail', mock):
|
||||
self.assertDictEqual(git.latest("git@gitlab.example."
|
||||
"com:user/website.git",
|
||||
True,
|
||||
mirror=True,
|
||||
bare=True),
|
||||
{'comment': '"rev"is not compatible with the'
|
||||
' "mirror"and "bare" arguments',
|
||||
'result': False})
|
||||
|
||||
mock = MagicMock(return_value={'result': False,
|
||||
'comment': '"target" option'
|
||||
' is required'})
|
||||
with patch.object(git, '_fail', mock):
|
||||
self.assertDictEqual(git.latest("git@gitlab.example.com:"
|
||||
"user/website.git"),
|
||||
{'comment': '"target" option is required',
|
||||
'result': False})
|
||||
|
||||
with patch.dict(git.__grains__, {"shell": True}):
|
||||
mock = MagicMock(return_value={'comment': 'onlyif execution'
|
||||
' failed', 'skip_watch': True,
|
||||
'result': True})
|
||||
with patch.object(git, 'mod_run_check', mock):
|
||||
self.assertDictEqual(git.latest("git@gitlab.example.com:"
|
||||
"user/website.git",
|
||||
target="/usr/share/nginx/prod",
|
||||
onlyif=True),
|
||||
{'changes': {},
|
||||
'comment': 'onlyif execution failed',
|
||||
'name': 'git@gitlab.example.com:'
|
||||
'user/website.git',
|
||||
'result': True,
|
||||
'skip_watch': True})
|
||||
|
||||
mock = MagicMock(return_value="salt")
|
||||
with patch.object(git, 'mod_run_check', mock):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os.path, 'isdir', mock):
|
||||
mock = MagicMock(return_value=Exception)
|
||||
with patch.dict(git.__salt__, {'git.revision': mock}):
|
||||
mock = MagicMock(return_value="salt")
|
||||
with patch.object(git, '_fail', mock):
|
||||
self.assertEqual(git.latest("git@gitl"
|
||||
"ab.example.com:user"
|
||||
"/website.git",
|
||||
target="/usr/share/n"
|
||||
"ginx/prod"),
|
||||
"salt")
|
||||
|
||||
mock = MagicMock(return_value="salt")
|
||||
with patch.dict(git.__salt__, {'git.revision': mock}):
|
||||
with patch.dict(git.__salt__,
|
||||
{'git.current_branch': mock}):
|
||||
mock = MagicMock(return_value=None)
|
||||
with patch.dict(git.__salt__,
|
||||
{'git.ls_remote': mock}):
|
||||
with patch.dict(git.__opts__, {'test': True}):
|
||||
mock = MagicMock(return_value=["salt"])
|
||||
with patch.object(git,
|
||||
'_neutral_test', mock):
|
||||
self.assertListEqual(git.latest(arg[0],
|
||||
None,
|
||||
"salt"),
|
||||
["salt"])
|
||||
|
||||
with patch.dict(git.__opts__, {'test': False}):
|
||||
mock = MagicMock(return_value=[arg[0]])
|
||||
with patch.dict(git.__salt__,
|
||||
{'git.remote_get': mock}):
|
||||
mock = MagicMock(return_value=0)
|
||||
with patch.dict(git.__salt__,
|
||||
{'cmd.retcode': mock}):
|
||||
sub_test_latest(self, arg)
|
||||
|
||||
mock = MagicMock(return_value=False)
|
||||
with patch.object(os.path, 'isdir', mock):
|
||||
mock = MagicMock(side_effect=[False, True])
|
||||
with patch.object(os.path, 'isdir', mock):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os, 'listdir', mock):
|
||||
mock = MagicMock(return_value=["salt"])
|
||||
with patch.object(git, '_fail', mock):
|
||||
self.assertListEqual(git.latest(arg[0], None,
|
||||
"salt"),
|
||||
["salt"])
|
||||
|
||||
with patch.dict(git.__opts__, {'test': True}):
|
||||
mock = MagicMock(return_value=["salt"])
|
||||
with patch.object(git, '_neutral_test', mock):
|
||||
self.assertListEqual(git.latest(arg[0], None,
|
||||
"salt"), ["salt"])
|
||||
|
||||
with patch.dict(git.__opts__, {'test': False}):
|
||||
mock = MagicMock(side_effect=[Exception, True])
|
||||
with patch.dict(git.__salt__, {'git.clone': mock}):
|
||||
mock = MagicMock(return_value=["salt"])
|
||||
with patch.object(git, '_fail', mock):
|
||||
self.assertListEqual(git.latest(arg[0], None,
|
||||
"salt"),
|
||||
["salt"])
|
||||
|
||||
self.assertEqual(git.latest(arg[0], None, "salt",
|
||||
bare=True), ret)
|
||||
|
||||
def test_present(self):
|
||||
'''
|
||||
Test to make sure the repository is present
|
||||
'''
|
||||
arg = ["git@gitlab.example.com:user/website.git"]
|
||||
ret = [{'changes': {},
|
||||
'comment': '',
|
||||
'name': 'git@gitlab.example.com:user/website.git',
|
||||
'result': True},
|
||||
{'changes': {'new repository': 'git@gitlab.example'
|
||||
'.com:user/website.git'},
|
||||
'comment': 'Initialized repository git@gitlab.'
|
||||
'example.com:user/website.git',
|
||||
'name': 'git@gitlab.example.com:user/website.git',
|
||||
'result': True}
|
||||
]
|
||||
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os.path, 'isdir', mock):
|
||||
with patch.object(os.path, 'isfile', mock):
|
||||
self.assertDictEqual(git.present(arg[0], True), ret[0])
|
||||
|
||||
self.assertDictEqual(git.present(arg[0], None), ret[0])
|
||||
|
||||
with patch.object(os, 'listdir', mock):
|
||||
mock = MagicMock(return_value=["salt"])
|
||||
with patch.object(git, '_fail', mock):
|
||||
self.assertListEqual(git.present(arg[0]), ["salt"])
|
||||
|
||||
mock = MagicMock(return_value=False)
|
||||
with patch.object(os.path, 'isdir', mock):
|
||||
with patch.dict(git.__opts__, {'test': True}):
|
||||
mock = MagicMock(return_value="Dude")
|
||||
with patch.object(git, '_neutral_test', mock):
|
||||
self.assertEqual(git.present(arg[0]), "Dude")
|
||||
|
||||
with patch.dict(git.__opts__, {'test': False}):
|
||||
with patch.dict(git.__salt__, {'git.init': mock}):
|
||||
self.assertDictEqual(git.present(arg[0]), ret[1])
|
||||
|
||||
def test_config(self):
|
||||
'''
|
||||
Test to manage a git config setting
|
||||
'''
|
||||
arg = ["git@gitlab.example.com:user/website.git"]
|
||||
ret = [{'changes': {},
|
||||
'comment': 'No changes made',
|
||||
'name': 'git@gitlab.example.com:user/website.git',
|
||||
'result': True}
|
||||
]
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.dict(git.__salt__, {'git.config_get': mock}):
|
||||
self.assertDictEqual(git.config(arg[0], True), ret[0])
|
||||
|
||||
def test_mod_run_check(self):
|
||||
'''
|
||||
Test to execute the onlyif and unless logic.
|
||||
'''
|
||||
ret = [{'comment': 'onlyif execution failed',
|
||||
'result': True,
|
||||
'skip_watch': True},
|
||||
{'comment': 'unless execution succeeded',
|
||||
'result': True,
|
||||
'skip_watch': True}
|
||||
]
|
||||
run_check_cmd_kwargs = {}
|
||||
run_check_cmd_kwargs['shell'] = "Salt"
|
||||
mock = MagicMock(side_effect=[1, 0])
|
||||
with patch.dict(git.__salt__, {'cmd.retcode': mock}):
|
||||
self.assertDictEqual(git.mod_run_check(run_check_cmd_kwargs,
|
||||
True,
|
||||
False),
|
||||
ret[0])
|
||||
|
||||
self.assertDictEqual(git.mod_run_check(run_check_cmd_kwargs,
|
||||
False,
|
||||
True),
|
||||
ret[1])
|
||||
|
||||
self.assertTrue(git.mod_run_check(run_check_cmd_kwargs, False, False))
|
||||
|
||||
|
||||
def sub_test_latest(self, arg):
|
||||
'''
|
||||
Sub part of test_latest
|
||||
'''
|
||||
mock = MagicMock(return_value=0)
|
||||
with patch.dict(git.__salt__, {'git.checkout': mock}):
|
||||
mock = MagicMock(return_value=0)
|
||||
with patch.dict(git.__salt__, {'git.config_get': mock}):
|
||||
with patch.dict(git.__opts__, {'test': True}):
|
||||
mock = MagicMock(return_value="salt")
|
||||
with patch.object(git, '_neutral_test', mock):
|
||||
self.assertEqual(git.latest(arg[0],
|
||||
True,
|
||||
"salt"),
|
||||
"salt")
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(GitTestCase, needs_daemon=False)
|
|
@ -22,7 +22,7 @@ ensure_in_syspath('../../')
|
|||
from salt.states import openstack_config
|
||||
|
||||
openstack_config.__salt__ = {}
|
||||
openstack_config.__opts__ = {}
|
||||
openstack_config.__opts__ = {'test': False}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
|
|
|
@ -36,28 +36,43 @@ class RegTestCase(TestCase):
|
|||
'''
|
||||
Test to set a registry entry.
|
||||
'''
|
||||
name = 'HKEY_CURRENT_USER\\SOFTWARE\\Salt\\version'
|
||||
value = '0.15.3'
|
||||
name = 'HKEY_CURRENT_USER\\SOFTWARE\\Salt'
|
||||
vname = 'version'
|
||||
vdata = '0.15.3'
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': '{0} is already configured'.format(name)}
|
||||
'comment': '{0} in {1} is already configured'.format(vname, name)}
|
||||
|
||||
mock = MagicMock(side_effect=[{'vdata': value}, {'vdata': 'a'}, {'vdata': 'a'}])
|
||||
mock_read = MagicMock(side_effect=[{'vdata': vdata, 'success': True},
|
||||
{'vdata': 'a', 'success': True},
|
||||
{'vdata': 'a', 'success': True}])
|
||||
mock_t = MagicMock(return_value=True)
|
||||
with patch.dict(reg.__salt__, {'reg.read_value': mock,
|
||||
with patch.dict(reg.__salt__, {'reg.read_value': mock_read,
|
||||
'reg.set_value': mock_t}):
|
||||
self.assertDictEqual(reg.present(name, value), ret)
|
||||
self.assertDictEqual(reg.present(name,
|
||||
vname=vname,
|
||||
vdata=vdata), ret)
|
||||
|
||||
with patch.dict(reg.__opts__, {'test': True}):
|
||||
ret.update({'comment': '', 'result': None,
|
||||
'changes': {'reg': 'configured to 0.15.3'}})
|
||||
self.assertDictEqual(reg.present(name, value), ret)
|
||||
'changes': {'reg': {'Will add': {'Key': name,
|
||||
'Entry': vname,
|
||||
'Value': vdata}}}})
|
||||
self.assertDictEqual(reg.present(name,
|
||||
vname=vname,
|
||||
vdata=vdata), ret)
|
||||
|
||||
with patch.dict(reg.__opts__, {'test': False}):
|
||||
ret.update({'result': True})
|
||||
self.assertDictEqual(reg.present(name, value), ret)
|
||||
ret.update({'comment': 'Added {0} to {0}'.format(name),
|
||||
'result': True,
|
||||
'changes': {'reg': {'Added': {'Key': name,
|
||||
'Entry': vname,
|
||||
'Value': vdata}}}})
|
||||
self.assertDictEqual(reg.present(name,
|
||||
vname=vname,
|
||||
vdata=vdata), ret)
|
||||
|
||||
# 'absent' function tests: 1
|
||||
|
||||
|
@ -65,27 +80,35 @@ class RegTestCase(TestCase):
|
|||
'''
|
||||
Test to remove a registry entry.
|
||||
'''
|
||||
name = 'HKEY_CURRENT_USER\\SOFTWARE\\Salt\\version'
|
||||
name = 'HKEY_CURRENT_USER\\SOFTWARE\\Salt'
|
||||
vname = 'version'
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': '{0} is already absent'.format(name)}
|
||||
|
||||
mock = MagicMock(side_effect=[{'success': False}, {'success': True}, {'success': True}])
|
||||
mock_read = MagicMock(side_effect=[{'success': False},
|
||||
{'success': False},
|
||||
{'success': True},
|
||||
{'success': True}])
|
||||
mock_t = MagicMock(return_value=True)
|
||||
with patch.dict(reg.__salt__, {'reg.read_value': mock,
|
||||
with patch.dict(reg.__salt__, {'reg.read_value': mock_read,
|
||||
'reg.delete_value': mock_t}):
|
||||
self.assertDictEqual(reg.absent(name), ret)
|
||||
self.assertDictEqual(reg.absent(name, vname), ret)
|
||||
|
||||
with patch.dict(reg.__opts__, {'test': True}):
|
||||
ret.update({'comment': '', 'result': None,
|
||||
'changes': {'reg': 'Removed {0}'.format(name)}})
|
||||
self.assertDictEqual(reg.absent(name), ret)
|
||||
'changes': {'reg': {'Will remove': {'Entry': vname,
|
||||
'Key': name}}}})
|
||||
self.assertDictEqual(reg.absent(name, vname), ret)
|
||||
|
||||
with patch.dict(reg.__opts__, {'test': False}):
|
||||
ret.update({'result': True})
|
||||
self.assertDictEqual(reg.absent(name), ret)
|
||||
ret.update({'result': True,
|
||||
'changes': {'reg': {'Removed': {'Entry': vname,
|
||||
'Key': name}}},
|
||||
'comment': 'Removed {0} from {0}'.format(name)})
|
||||
self.assertDictEqual(reg.absent(name, vname), ret)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -95,8 +95,10 @@ class TestSaltEvent(TestCase):
|
|||
def assertGotEvent(self, evt, data, msg=None):
|
||||
self.assertIsNotNone(evt, msg)
|
||||
for key in data:
|
||||
self.assertIn(key, evt, msg)
|
||||
self.assertEqual(data[key], evt[key], msg)
|
||||
self.assertIn(key, evt, '{0}: Key {1} missing'.format(msg, key))
|
||||
assertMsg = '{0}: Key {1} value mismatch, {2} != {3}'
|
||||
assertMsg = assertMsg.format(msg, key, data[key], evt[key])
|
||||
self.assertEqual(data[key], evt[key], assertMsg)
|
||||
|
||||
def test_master_event(self):
|
||||
me = event.MasterEvent(SOCK_DIR, listen=False)
|
||||
|
@ -198,7 +200,7 @@ class TestSaltEvent(TestCase):
|
|||
with eventpublisher_process():
|
||||
me = event.MasterEvent(SOCK_DIR, listen=True)
|
||||
me.fire_event({'data': 'foo1'}, 'evt1')
|
||||
evt1 = me.get_event(tag='not', tags_regex=['^ev'])
|
||||
evt1 = me.get_event(tag='^ev', match_type='regex')
|
||||
self.assertGotEvent(evt1, {'data': 'foo1'})
|
||||
|
||||
def test_event_matching_all(self):
|
||||
|
@ -236,7 +238,7 @@ class TestSaltEvent(TestCase):
|
|||
'''Test regex subscriptions cache a message until requested'''
|
||||
with eventpublisher_process():
|
||||
me = event.MasterEvent(SOCK_DIR, listen=True)
|
||||
me.subscribe_regex('1$')
|
||||
me.subscribe('e..1$', 'regex')
|
||||
me.fire_event({'data': 'foo1'}, 'evt1')
|
||||
me.fire_event({'data': 'foo2'}, 'evt2')
|
||||
evt2 = me.get_event(tag='evt2')
|
||||
|
@ -244,8 +246,6 @@ class TestSaltEvent(TestCase):
|
|||
self.assertGotEvent(evt2, {'data': 'foo2'})
|
||||
self.assertGotEvent(evt1, {'data': 'foo1'})
|
||||
|
||||
# TODO: @driskell fix these up please
|
||||
@skipIf(True, '@driskell will fix these up')
|
||||
def test_event_multiple_clients(self):
|
||||
'''Test event is received by multiple clients'''
|
||||
with eventpublisher_process():
|
||||
|
@ -279,8 +279,6 @@ class TestSaltEvent(TestCase):
|
|||
evt = me.get_event(tag='testevents')
|
||||
self.assertGotEvent(evt, {'data': '{0}'.format(i)}, 'Event {0}'.format(i))
|
||||
|
||||
# TODO: @driskell fix these up please
|
||||
@skipIf(True, '@driskell will fix these up')
|
||||
def test_event_many_backlog(self):
|
||||
'''Test a large number of events, send all then recv all'''
|
||||
with eventpublisher_process():
|
||||
|
|
Loading…
Add table
Reference in a new issue