Merge branch '2017.7' into 'develop'

Conflicts:
  - salt/modules/state.py
  - salt/modules/yumpkg.py
  - salt/modules/zypper.py
  - salt/states/pkg.py
  - salt/version.py
  - tests/unit/config/test_config.py
  - tests/unit/modules/test_zypper.py
This commit is contained in:
rallytime 2017-06-28 11:31:53 -06:00
commit e512a29f08
107 changed files with 5674 additions and 611 deletions

View file

@ -91,6 +91,10 @@
# Set the default outputter used by the salt command. The default is "nested".
#output: nested
# To set a list of additional directories to search for salt outputters, set the
# outputter_dirs option.
#outputter_dirs: []
# Set the default output file used by the salt command. Default is to output
# to the CLI and not to a file. Functions the same way as the "--out-file"
# CLI option, only sets this to a single file for all salt commands.
@ -99,6 +103,9 @@
# Return minions that timeout when running commands like test.ping
#show_timeout: True
# Tell the client to display the jid when a job is published.
#show_jid: False
# By default, output is colored. To disable colored output, set the color value
# to False.
#color: True
@ -454,6 +461,27 @@
# - /etc/salt/roster.d
# - /opt/salt/some/more/rosters
# The ssh password to log in with.
#ssh_passwd: ''
#The target system's ssh port number.
#ssh_port: 22
# Comma-separated list of ports to scan.
#ssh_scan_ports: 22
# Scanning socket timeout for salt-ssh.
#ssh_scan_timeout: 0.01
# Boolean to run command via sudo.
#ssh_sudo: False
# Number of seconds to wait for a response when establishing an SSH connection.
#ssh_timeout: 60
# The user to log in as.
#ssh_user: root
# The log file of the salt-ssh command:
#ssh_log_file: /var/log/salt/ssh
@ -467,6 +495,18 @@
# authentication with minions
#ssh_use_home_key: False
# Set this to True to default salt-ssh to run with ``-o IdentitiesOnly=yes``.
# This option is intended for situations where the ssh-agent offers many
# different identities and allows ssh to ignore those identities and use the
# only one specified in options.
#ssh_identities_only: False
# List-only nodegroups for salt-ssh. Each group must be formed as either a
# comma-separated list, or a YAML list. This option is useful to group minions
# into easy-to-target groups when using salt-ssh. These groups can then be
# targeted with the normal -N argument to salt-ssh.
#ssh_list_nodegroups: {}
##### Master Module Management #####
##########################################
# Manage how master side modules are loaded.
@ -535,6 +575,11 @@
# If set to 'changes', the output will be full unless the state didn't change.
#state_output: full
# The state_output_diff setting changes whether or not the output from
# successful states is returned. Useful when even the terse output of these
# states is cluttering the logs. Set it to True to ignore them.
#state_output_diff: False
# Automatically aggregate all states that have support for mod_aggregate by
# setting to 'True'. Or pass a list of state module names to automatically
# aggregate just those types.
@ -575,6 +620,10 @@
# - /srv/salt
#
# The master_roots setting configures a master-only copy of the file_roots dictionary,
# used by the state compiler.
#master_roots: /srv/salt-master
# When using multiple environments, each with their own top file, the
# default behaviour is an unordered merge. To prevent top files from
# being merged together and instead to only use the top file from the

View file

@ -151,7 +151,11 @@
# Set the default outputter used by the salt-call command. The default is
# "nested".
#output: nested
#
# To set a list of additional directories to search for salt outputters, set the
# outputter_dirs option.
#outputter_dirs: []
# By default output is colored. To disable colored output, set the color value
# to False.
#color: True
@ -231,7 +235,7 @@
# cause sub minion process to restart.
#auth_safemode: False
# Ping Master to ensure connection is alive (minutes).
# Ping Master to ensure connection is alive (seconds).
#ping_interval: 0
# To auto recover minions if master changes IP address (DDNS)
@ -369,6 +373,9 @@
# interface: eth0
# cidr: '10.0.0.0/8'
# The number of seconds a mine update runs.
#mine_interval: 60
# Windows platforms lack posix IPC and must rely on slower TCP based inter-
# process communications. Set ipc_mode to 'tcp' on such systems
#ipc_mode: ipc

1230
conf/suse/master Normal file

File diff suppressed because it is too large Load diff

View file

@ -393,6 +393,19 @@ Default: ``nested``
Set the default outputter used by the salt command.
.. conf_master:: outputter_dirs
``outputter_dirs``
------------------
Default: ``[]``
A list of additional directories to search for salt outputters in.
.. code-block:: yaml
outputter_dirs: []
.. conf_master:: output_file
``output_file``
@ -408,6 +421,32 @@ CLI option, only sets this to a single file for all salt commands.
output_file: /path/output/file
.. conf_master:: show_timeout
``show_timeout``
----------------
Default: ``True``
Tell the client to show minions that have timed out.
.. code-block:: yaml
show_timeout: True
.. conf_master:: show_jid
``show_jid``
------------
Default: ``False``
Tell the client to display the jid when a job is published.
.. code-block:: yaml
show_jid: False
.. conf_master:: color
``color``
@ -819,6 +858,32 @@ that connect to a master via localhost.
presence_events: False
.. conf_master:: ping_on_rotate
``ping_on_rotate``
------------------
Default: ``False``
By default, the master AES key rotates every 24 hours. The next command
following a key rotation will trigger a key refresh from the minion which may
result in minions which do not respond to the first command after a key refresh.
To tell the master to ping all minions immediately after an AES key refresh,
set ``ping_on_rotate`` to ``True``. This should mitigate the issue where a
minion does not appear to initially respond after a key is rotated.
Note that ping_on_rotate may cause high load on the master immediately after
the key rotation event as minions reconnect. Consider this carefully if this
salt master is managing a large number of minions.
If disabled, it is recommended to handle this event by listening for the
``aes_key_rotate`` event with the ``key`` tag and acting appropriately.
.. code-block:: yaml
ping_on_rotate: False
.. conf_master:: transport
``transport``
@ -874,6 +939,97 @@ Pass in an alternative location for the salt-ssh roster file.
roster_file: /root/roster
.. conf_master:: ssh_passwd
``ssh_passwd``
--------------
Default: ``''``
The ssh password to log in with.
.. code-block:: yaml
ssh_passwd: ''
.. conf_master:: ssh_port
``ssh_port``
------------
Default: ``22``
The target system's ssh port number.
.. code-block:: yaml
ssh_port: 22
.. conf_master:: ssh_scan_ports
``ssh_scan_ports``
------------------
Default: ``22``
Comma-separated list of ports to scan.
.. code-block:: yaml
ssh_scan_ports: 22
.. conf_master:: ssh_scan_timeout
``ssh_scan_timeout``
--------------------
Default: ``0.01``
Scanning socket timeout for salt-ssh.
.. code-block:: yaml
ssh_scan_timeout: 0.01
.. conf_master:: ssh_sudo
``ssh_sudo``
------------
Default: ``False``
Boolean to run command via sudo.
.. code-block:: yaml
ssh_sudo: False
.. conf_master:: ssh_timeout
``ssh_timeout``
---------------
Default: ``60``
Number of seconds to wait for a response when establishing an SSH connection.
.. code-block:: yaml
ssh_timeout: 60
.. conf_master:: ssh_user
``ssh_user``
------------
Default: ``root``
The user to log in as.
.. code-block:: yaml
ssh_user: root
.. conf_master:: ssh_log_file
``ssh_log_file``
@ -905,6 +1061,8 @@ overridden on a per-minion basis in the roster (``minion_opts``)
ssh_minion_opts:
gpg_keydir: /root/gpg
.. conf_master:: ssh_use_home_key
``ssh_use_home_key``
--------------------
@ -917,6 +1075,41 @@ authentication with minions
ssh_use_home_key: False
.. conf_master:: ssh_identities_only
``ssh_identities_only``
-----------------------
Default: ``False``
Set this to ``True`` to default salt-ssh to run with ``-o IdentitiesOnly=yes``. This
option is intended for situations where the ssh-agent offers many different identities
and allows ssh to ignore those identities and use the only one specified in options.
.. code-block:: yaml
ssh_identities_only: False
.. conf_master:: ssh_list_nodegroups
``ssh_list_nodegroups``
-----------------------
Default: ``{}``
List-only nodegroups for salt-ssh. Each group must be formed as either a comma-separated
list, or a YAML list. This option is useful to group minions into easy-to-target groups
when using salt-ssh. These groups can then be targeted with the normal -N argument to
salt-ssh.
.. code-block:: yaml
ssh_list_nodegroups:
groupA: minion1,minion2
groupB: minion1,minion3
.. conf_master:: thin_extra_mods
``thin_extra_mods``
-------------------
@ -1577,6 +1770,21 @@ If set to 'changes', the output will be full unless the state didn't change.
state_output: full
.. conf_master:: state_output_diff
``state_output_diff``
---------------------
Default: ``False``
The state_output_diff setting changes whether or not the output from
successful states is returned. Useful when even the terse output of these
states is cluttering the logs. Set it to True to ignore them.
.. code-block:: yaml
state_output_diff: False
.. conf_master:: state_aggregate
``state_aggregate``
@ -1889,6 +2097,19 @@ Example:
For masterless Salt, this parameter must be specified in the minion config
file.
.. conf_master:: master_roots
``master_roots``
----------------
Default: ``/srv/salt-master``
A master-only copy of the file_roots dictionary, used by the state compiler.
.. code-block:: yaml
master_roots: /srv/salt-master
git: Git Remote File Server Backend
-----------------------------------

View file

@ -337,7 +337,7 @@ The user to run the Salt processes
.. conf_minion:: sudo_user
``sudo_user``
--------------
-------------
Default: ``''``
@ -628,6 +628,26 @@ With ``grains_deep_merge``, the result will be:
k1: v1
k2: v2
.. conf_minion:: grains_refresh_every
``grains_refresh_every``
------------------------
Default: ``0``
The ``grains_refresh_every`` setting allows for a minion to periodically
check its grains to see if they have changed and, if so, to inform the master
of the new grains. This operation is moderately expensive, therefore care
should be taken not to set this value too low.
Note: This value is expressed in minutes.
A value of 10 minutes is a reasonable default.
.. code-block:: yaml
grains_refresh_every: 0
.. conf_minion:: mine_enabled
``mine_enabled``
@ -661,7 +681,7 @@ return for the job cache.
mine_return_job: False
``mine_functions``
-------------------
------------------
Default: Empty
@ -679,6 +699,18 @@ Note these can be defined in the pillar for a minion as well.
interface: eth0
cidr: '10.0.0.0/8'
.. conf_minion:: mine_interval
``mine_interval``
-----------------
Default: ``60``
The number of seconds a mine update runs.
.. code-block:: yaml
mine_interval: 60
.. conf_minion:: sock_dir
@ -693,6 +725,19 @@ The directory where Unix sockets will be kept.
sock_dir: /var/run/salt/minion
.. conf_minion:: outputter_dirs
``outputter_dirs``
------------------
Default: ``[]``
A list of additional directories to search for salt outputters in.
.. code-block:: yaml
outputter_dirs: []
.. conf_minion:: backup_mode
``backup_mode``
@ -835,6 +880,20 @@ restart.
auth_safemode: False
.. conf_minion:: ping_interval
``ping_interval``
-----------------
Default: ``0``
Instructs the minion to ping its master(s) every n number of seconds. Used
primarily as a mitigation technique against minion disconnects.
.. code-block:: yaml
ping_interval: 0
.. conf_minion:: recon_default
``random_startup_delay``
@ -1522,6 +1581,22 @@ the output will be shortened to a single line.
state_output: full
.. conf_minion:: state_output_diff
``state_output_diff``
---------------------
Default: ``False``
The state_output_diff setting changes whether or not the output from
successful states is returned. Useful when even the terse output of these
states is cluttering the logs. Set it to True to ignore them.
.. code-block:: yaml
state_output_diff: False
.. conf_minion:: autoload_dynamic_modules
``autoload_dynamic_modules``

View file

@ -246,6 +246,10 @@ Server configuration values and their defaults:
# Redhat Identity Policy Audit
auth.ldap.freeipa: False
Authenticating to the LDAP Server
+++++++++++++++++++++++++++++++++
There are two phases to LDAP authentication. First, Salt authenticates to search for a users' Distinguished Name
and group membership. The user it authenticates as in this phase is often a special LDAP system user with
read-only access to the LDAP directory. After Salt searches the directory to determine the actual user's DN
@ -276,6 +280,10 @@ substitutes the ``{{ username }}`` value for the username when querying LDAP
auth.ldap.filter: uid={{ username }}
Determining Group Memberships (OpenLDAP / non-Active Directory)
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
For OpenLDAP, to determine group membership, one can specify an OU that contains
group data. This is prepended to the basedn to create a search path. Then
the results are filtered against ``auth.ldap.groupclass``, default
@ -285,7 +293,16 @@ the results are filtered against ``auth.ldap.groupclass``, default
auth.ldap.groupou: Groups
When using the `ldap('DC=domain,DC=com')` eauth operator, sometimes the records returned
Note that as of 2017.7, auth.ldap.groupclass can refer to either a groupclass or an objectClass.
For some LDAP servers (notably OpenLDAP without the ``memberOf`` overlay enabled) to determine group
membership we need to know both the ``objectClass`` and the ``memberUid`` attributes. Usually for these
servers you will want a ``auth.ldap.groupclass`` of ``posixGroup`` and an ``auth.ldap.groupattribute`` of
``memberUid``.
LDAP servers with the ``memberOf`` overlay will have entries similar to ``auth.ldap.groupclass: person`` and
``auth.ldap.groupattribute: memberOf``.
When using the ``ldap('DC=domain,DC=com')`` eauth operator, sometimes the records returned
from LDAP or Active Directory have fully-qualified domain names attached, while minion IDs
instead are simple hostnames. The parameter below allows the administrator to strip
off a certain set of domain names so the hostnames looked up in the directory service
@ -295,8 +312,9 @@ can match the minion IDs.
auth.ldap.minion_stripdomains: ['.external.bigcorp.com', '.internal.bigcorp.com']
Active Directory
----------------
Determining Group Memberships (Active Directory)
++++++++++++++++++++++++++++++++++++++++++++++++
Active Directory handles group membership differently, and does not utilize the
``groupou`` configuration variable. AD needs the following options in
@ -361,5 +379,5 @@ be part of the eAuth definition, they can be specified like this:
- ldap('DC=corp,DC=example,DC=com'):
- test.echo
The string inside `ldap()` above is any valid LDAP/AD tree limiter. `OU=` in
The string inside ``ldap()`` above is any valid LDAP/AD tree limiter. ``OU=`` in
particular is permitted as long as it would return a list of computer objects.

View file

@ -4,6 +4,36 @@
Salt 2017.7.0 Release Notes - Codename Nitrogen
===============================================
========
Python 3
========
The 2017.7 Salt Release adds initial Python 3 support.
The default Python version of Salt will remain Python 2, although Python 3 packages will be supplied for users who want to help test this new feature.
======================
Python 2.6 Deprecation
======================
Salt will no longer support Python 2.6. We will provide python2.7 packages on our repo_ for RedHat and CentOS 6 to ensure users can still run Salt on these platforms.
.. _repo: https://repo.saltstack.com/
============
Known Issues
============
The following salt-cloud drivers have known issues running with Python 3. These drivers will not work with Python 3, and Python 2.7 should be used instead:
- Joyent
- Any driver that relies on the `apache-libcloud` library such as cloudstack, dimenstiondata, gce, nova, and openstack
- When running under Python 3, users who require Unicode support should ensure that a locale is set on their machines.
Users using the `C` locale are advised to switch to a UTF-aware locale to ensure proper functionality with Salt with Python 3.
States Added for Management of systemd Unit Masking
===================================================
@ -161,6 +191,7 @@ Wildcard Versions in :py:func:`pkg.installed <salt.states.pkg.installed>` States
- The :py:func:`pkg.installed <salt.states.pkg.installed>` state now supports
wildcards in package versions, for the following platforms:
- SUSE/openSUSE Leap/Thumbleweed
- Debian/Ubuntu
- RHEL/CentOS
- Arch Linux

View file

@ -11,7 +11,7 @@ describes the package. An example of this file is:
name: apache
os: RedHat, Debian, Ubuntu, SUSE, FreeBSD
os_family: RedHat, Debian, SUSE, FreeBSD
os_family: RedHat, Debian, Suse, FreeBSD
version: 201506
release: 2
summary: Formula for installing Apache

View file

@ -91,8 +91,6 @@ firewall.
yast2 firewall
.. _linux-iptables:
Windows
=======
@ -137,6 +135,8 @@ following command from the command line or a run prompt:
netsh advfirewall firewall add rule name="Salt" dir=in action=allow protocol=TCP localport=4505-4506
.. _linux-iptables:
iptables
========

View file

@ -1,11 +1,11 @@
[Unit]
Description=The Salt API
Documentation=man:salt-api(1) file:///usr/share/doc/salt/html/contents.html https://docs.saltstack.com/en/latest/contents.html
After=network.target
[Service]
User=salt
Type=simple
Environment=SHELL=/bin/bash
LimitNOFILE=8192
ExecStart=/usr/bin/salt-api
TimeoutStopSec=3

View file

@ -0,0 +1,25 @@
/var/log/salt/master {
su salt salt
weekly
missingok
rotate 7
compress
notifempty
}
/var/log/salt/minion {
weekly
missingok
rotate 7
compress
notifempty
}
/var/log/salt/key {
su salt salt
weekly
missingok
rotate 7
compress
notifempty
}

View file

@ -0,0 +1,13 @@
[Unit]
Description=The Salt Master Server
Documentation=man:salt-master(1) file:///usr/share/doc/salt/html/contents.html https://docs.saltstack.com/en/latest/contents.html
After=network.target
[Service]
LimitNOFILE=16384
Type=simple
ExecStart=/usr/bin/salt-master
TasksMax=infinity
[Install]
WantedBy=multi-user.target

23
pkg/suse/salt-minion Normal file → Executable file
View file

@ -51,8 +51,23 @@ SERVICE=salt-minion
PROCESS=salt-minion
RETVAL=0
WATCHDOG_CRON="/etc/cron.d/salt-minion"
set_watchdog() {
if [ ! -f $WATCHDOG_CRON ]; then
echo -e '* * * * * root /usr/bin/salt-daemon-watcher --with-init\n' > $WATCHDOG_CRON
# Kick the watcher for 1 minute immediately, because cron will wake up only afterwards
/usr/bin/salt-daemon-watcher --with-init & disown
fi
}
remove_watchdog() {
rm $WATCHDOG_CRON 2>/dev/null || true
kill -9 $(ps uax | grep [s]alt-daemon-watcher | awk '{print $2}') 2>/dev/null
}
start() {
set_watchdog;
echo -n $"Starting salt-minion daemon: "
if [ -f $SUSE_RELEASE ]; then
startproc -p /var/run/$SERVICE.pid $SALTMINION -d $MINION_ARGS
@ -80,6 +95,10 @@ start() {
}
stop() {
IS_RESTARTING=$1
if [ -z $IS_RESTARTING ]; then
remove_watchdog;
fi
echo -n $"Stopping salt-minion daemon: "
if [ -f $SUSE_RELEASE ]; then
killproc -TERM $SALTMINION
@ -101,8 +120,8 @@ stop() {
}
restart() {
stop
start
stop 1;
start;
}
# See how we were called.

View file

@ -0,0 +1,14 @@
[Unit]
Description=The Salt Minion
After=network.target
[Service]
Type=simple
LimitNOFILE=8192
ExecStart=/usr/bin/salt-minion
KillMode=process
Restart=on-failure
RestartSec=15
[Install]
WantedBy=multi-user.target

View file

@ -6,7 +6,6 @@ boto>=2.32.1
boto3>=1.2.1
moto>=0.3.6
SaltPyLint>=v2017.3.6
GitPython>=0.3
pytest
git+https://github.com/eisensheng/pytest-catchlog.git@develop#egg=Pytest-catchlog
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt

View file

@ -11,6 +11,5 @@ moto>=0.3.6
# prevent it from being successfully installed (at least on Python 3.4).
httpretty
SaltPyLint>=v2017.2.29
GitPython>=0.3
pytest
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt

View file

@ -54,9 +54,7 @@ class Beacon(object):
current_beacon_config = {}
list(map(current_beacon_config.update, config[mod]))
elif isinstance(config[mod], dict):
raise CommandExecutionError(
'Beacon configuration should be a list instead of a dictionary.'
)
current_beacon_config = config[mod]
if 'enabled' in current_beacon_config:
if not current_beacon_config['enabled']:

View file

@ -296,7 +296,9 @@ class SaltCMD(parsers.SaltCMDOptionParser):
not_connected_minions = []
failed_minions = []
for each_minion in ret:
minion_ret = ret[each_minion].get('ret')
minion_ret = ret[each_minion]
if isinstance(minion_ret, dict) and 'ret' in minion_ret:
minion_ret = ret[each_minion].get('ret')
if (
isinstance(minion_ret, string_types)
and minion_ret.startswith("Minion did not return")

View file

@ -1103,7 +1103,7 @@ ARGS = {10}\n'''.format(self.minion_config,
# Copy shim to target system, under $HOME/.<randomized name>
target_shim_file = '.{0}.{1}'.format(binascii.hexlify(os.urandom(6)), extension)
if self.winrm:
target_shim_file = saltwinshell.get_target_shim_file(self)
target_shim_file = saltwinshell.get_target_shim_file(self, target_shim_file)
self.shell.send(shim_tmp_file.name, target_shim_file, makedirs=True)
# Remove our shim file
@ -1119,7 +1119,7 @@ ARGS = {10}\n'''.format(self.minion_config,
if not self.winrm:
ret = self.shell.exec_cmd('/bin/sh \'$HOME/{0}\''.format(target_shim_file))
else:
ret = saltwinshell.call_python(self)
ret = saltwinshell.call_python(self, target_shim_file)
# Remove shim from target system
if not self.winrm:

View file

@ -925,7 +925,7 @@ VALID_OPTS = {
'queue_dirs': list,
# Instructs the minion to ping its master(s) ever n number of seconds. Used
# Instructs the minion to ping its master(s) every n number of seconds. Used
# primarily as a mitigation technique against minion disconnects.
'ping_interval': int,

View file

@ -714,7 +714,7 @@ class RemoteFuncs(object):
load.get('saltenv', load.get('env')),
load.get('ext'),
self.mminion.functions,
pillar=load.get('pillar_override', {}))
pillar_override=load.get('pillar_override', {}))
pillar_dirs = {}
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
if self.opts.get('minion_data_cache', False):

View file

@ -1119,8 +1119,8 @@ _OS_NAME_MAP = {
'manjaro': 'Manjaro',
'antergos': 'Antergos',
'sles': 'SUSE',
'slesexpand': 'RES',
'void': 'Void',
'slesexpand': 'RES',
'linuxmint': 'Mint',
'neon': 'KDE neon',
}

View file

@ -8,13 +8,13 @@ plugin interfaces used by Salt.
# Import python libs
from __future__ import absolute_import
import os
import imp
import sys
import time
import logging
import inspect
import tempfile
import functools
import types
from collections import MutableMapping
from zipimport import zipimporter
@ -33,6 +33,15 @@ from salt.utils import is_proxy
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import reload_module
if sys.version_info[:2] >= (3, 5):
import importlib.machinery # pylint: disable=no-name-in-module,import-error
import importlib.util # pylint: disable=no-name-in-module,import-error
USE_IMPORTLIB = True
else:
import imp
USE_IMPORTLIB = False
try:
import pkg_resources
HAS_PKG_RESOURCES = True
@ -44,17 +53,25 @@ log = logging.getLogger(__name__)
SALT_BASE_PATH = os.path.abspath(salt.syspaths.INSTALL_DIR)
LOADED_BASE_NAME = 'salt.loaded'
if six.PY3:
# pylint: disable=no-member,no-name-in-module,import-error
import importlib.machinery
if USE_IMPORTLIB:
# pylint: disable=no-member
MODULE_KIND_SOURCE = 1
MODULE_KIND_COMPILED = 2
MODULE_KIND_EXTENSION = 3
MODULE_KIND_PKG_DIRECTORY = 5
SUFFIXES = []
for suffix in importlib.machinery.EXTENSION_SUFFIXES:
SUFFIXES.append((suffix, 'rb', 3))
SUFFIXES.append((suffix, 'rb', MODULE_KIND_EXTENSION))
for suffix in importlib.machinery.BYTECODE_SUFFIXES:
SUFFIXES.append((suffix, 'rb', 2))
SUFFIXES.append((suffix, 'rb', MODULE_KIND_COMPILED))
for suffix in importlib.machinery.SOURCE_SUFFIXES:
SUFFIXES.append((suffix, 'rb', 1))
# pylint: enable=no-member,no-name-in-module,import-error
SUFFIXES.append((suffix, 'rb', MODULE_KIND_SOURCE))
MODULE_KIND_MAP = {
MODULE_KIND_SOURCE: importlib.machinery.SourceFileLoader,
MODULE_KIND_COMPILED: importlib.machinery.SourcelessFileLoader,
MODULE_KIND_EXTENSION: importlib.machinery.ExtensionFileLoader
}
# pylint: enable=no-member
else:
SUFFIXES = imp.get_suffixes()
@ -537,7 +554,7 @@ def ssh_wrapper(opts, functions=None, context=None):
)
def render(opts, functions, states=None):
def render(opts, functions, states=None, proxy=None):
'''
Returns the render modules
'''
@ -545,6 +562,7 @@ def render(opts, functions, states=None):
'__grains__': opts.get('grains', {})}
if states:
pack['__states__'] = states
pack['__proxy__'] = proxy or {}
ret = LazyLoader(
_module_dirs(
opts,
@ -959,7 +977,7 @@ def _generate_module(name):
return
code = "'''Salt loaded {0} parent module'''".format(name.split('.')[-1])
module = imp.new_module(name)
module = types.ModuleType(name)
exec(code, module.__dict__)
sys.modules[name] = module
@ -1176,7 +1194,10 @@ class LazyLoader(salt.utils.lazy.LazyDict):
if self.opts.get('enable_zip_modules', True) is True:
self.suffix_map['.zip'] = tuple()
# allow for module dirs
self.suffix_map[''] = ('', '', imp.PKG_DIRECTORY)
if USE_IMPORTLIB:
self.suffix_map[''] = ('', '', MODULE_KIND_PKG_DIRECTORY)
else:
self.suffix_map[''] = ('', '', imp.PKG_DIRECTORY)
# create mapping of filename (without suffix) to (path, suffix)
# The files are added in order of priority, so order *must* be retained.
@ -1341,14 +1362,43 @@ class LazyLoader(salt.utils.lazy.LazyDict):
self.tag,
name)
if suffix == '':
mod = imp.load_module(mod_namespace, None, fpath, desc)
if USE_IMPORTLIB:
# pylint: disable=no-member
# Package directory, look for __init__
loader_details = [
(importlib.machinery.SourceFileLoader, importlib.machinery.SOURCE_SUFFIXES),
(importlib.machinery.SourcelessFileLoader, importlib.machinery.BYTECODE_SUFFIXES),
(importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES),
]
file_finder = importlib.machinery.FileFinder(fpath, *loader_details)
spec = file_finder.find_spec(mod_namespace)
if spec is None:
raise ImportError()
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[mod_namespace] = mod
else:
mod = imp.load_module(mod_namespace, None, fpath, desc)
# reload all submodules if necessary
if not self.initial_load:
self._reload_submodules(mod)
else:
with salt.utils.fopen(fpath, desc[1]) as fn_:
mod = imp.load_module(mod_namespace, fn_, fpath, desc)
if USE_IMPORTLIB:
# pylint: disable=no-member
loader = MODULE_KIND_MAP[desc[2]](mod_namespace, fpath)
spec = importlib.util.spec_from_file_location(
mod_namespace, fpath, loader=loader
)
if spec is None:
raise ImportError()
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[mod_namespace] = mod
else:
with salt.utils.fopen(fpath, desc[1]) as fn_:
mod = imp.load_module(mod_namespace, fn_, fpath, desc)
except IOError:
raise
except ImportError as exc:

View file

@ -1331,7 +1331,7 @@ class AESFuncs(object):
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar=load.get('pillar_override', {}),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'))
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
self.fs_.update_opts()

View file

@ -52,7 +52,7 @@ def _detect_os():
os_family = __grains__['os_family']
if os_family == 'RedHat':
return 'apachectl'
elif os_family == 'Debian' or os_family == 'SUSE':
elif os_family == 'Debian' or os_family == 'Suse':
return 'apache2ctl'
else:
return 'apachectl'

View file

@ -214,7 +214,7 @@ def _gather_pillar(pillarenv, pillar_override):
__grains__,
__opts__['id'],
__opts__['environment'],
pillar=pillar_override,
pillar_override=pillar_override,
pillarenv=pillarenv
)
ret = pillar.compile_pillar()

View file

@ -49,7 +49,7 @@ def _gather_pillar(pillarenv, pillar_override):
__grains__,
__opts__['id'],
__opts__['environment'],
pillar=pillar_override,
pillar_override=pillar_override,
pillarenv=pillarenv
)
ret = pillar.compile_pillar()

View file

@ -5135,7 +5135,7 @@ def _gather_pillar(pillarenv, pillar_override, **grains):
# Not sure if these two are correct
__opts__['id'],
__opts__['environment'],
pillar=pillar_override,
pillar_override=pillar_override,
pillarenv=pillarenv
)
ret = pillar.compile_pillar()

View file

@ -6,26 +6,25 @@ Return/control aspects of the grains data
# Import python libs
from __future__ import absolute_import, print_function
import os
import copy
import math
import random
import logging
import operator
import collections
import json
import math
from functools import reduce # pylint: disable=redefined-builtin
# Import 3rd-party libs
import yaml
import salt.utils.compat
from salt.utils.odict import OrderedDict
import salt.ext.six as six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
# Import salt libs
import salt.utils
import salt.utils.yamldumper
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.exceptions import SaltException
from salt.ext.six.moves import range
__proxyenabled__ = ['*']
@ -252,23 +251,7 @@ def setvals(grains, destructive=False):
else:
grains[key] = val
__grains__[key] = val
# Cast defaultdict to dict; is there a more central place to put this?
try:
yaml_reps = copy.deepcopy(yaml.representer.SafeRepresenter.yaml_representers)
yaml_multi_reps = copy.deepcopy(yaml.representer.SafeRepresenter.yaml_multi_representers)
except (TypeError, NameError):
# This likely means we are running under Python 2.6 which cannot deepcopy
# bound methods. Fallback to a modification of deepcopy which can support
# this behavior.
yaml_reps = salt.utils.compat.deepcopy_bound(yaml.representer.SafeRepresenter.yaml_representers)
yaml_multi_reps = salt.utils.compat.deepcopy_bound(yaml.representer.SafeRepresenter.yaml_multi_representers)
yaml.representer.SafeRepresenter.add_representer(collections.defaultdict,
yaml.representer.SafeRepresenter.represent_dict)
yaml.representer.SafeRepresenter.add_representer(OrderedDict,
yaml.representer.SafeRepresenter.represent_dict)
cstr = yaml.safe_dump(grains, default_flow_style=False)
yaml.representer.SafeRepresenter.yaml_representers = yaml_reps
yaml.representer.SafeRepresenter.yaml_multi_representers = yaml_multi_reps
cstr = salt.utils.yamldumper.safe_dump(grains, default_flow_style=False)
try:
with salt.utils.fopen(gfn, 'w+') as fp_:
fp_.write(cstr)

View file

@ -87,7 +87,7 @@ class Inspector(EnvLoader):
'''
if self.grains_core.os_data().get('os_family') == 'Debian':
return self.__get_cfg_pkgs_dpkg()
elif self.grains_core.os_data().get('os_family') in ['SUSE', 'redhat']:
elif self.grains_core.os_data().get('os_family') in ['Suse', 'redhat']:
return self.__get_cfg_pkgs_rpm()
else:
return dict()
@ -163,7 +163,7 @@ class Inspector(EnvLoader):
if self.grains_core.os_data().get('os_family') == 'Debian':
cfg_data = salt.utils.to_str(self._syscall("dpkg", None, None, '--verify',
pkg_name)[0]).split(os.linesep)
elif self.grains_core.os_data().get('os_family') in ['SUSE', 'redhat']:
elif self.grains_core.os_data().get('os_family') in ['Suse', 'redhat']:
cfg_data = salt.utils.to_str(self._syscall("rpm", None, None, '-V', '--nodeps', '--nodigest',
'--nosignature', '--nomtime', '--nolinkto',
pkg_name)[0]).split(os.linesep)
@ -240,7 +240,7 @@ class Inspector(EnvLoader):
'''
if self.grains_core.os_data().get('os_family') == 'Debian':
return self.__get_managed_files_dpkg()
elif self.grains_core.os_data().get('os_family') in ['SUSE', 'redhat']:
elif self.grains_core.os_data().get('os_family') in ['Suse', 'redhat']:
return self.__get_managed_files_rpm()
return list(), list(), list()

View file

@ -104,7 +104,7 @@ def _conf(family='ipv4'):
return '/var/lib/ip6tables/rules-save'
else:
return '/var/lib/iptables/rules-save'
elif __grains__['os_family'] == 'SUSE':
elif __grains__['os_family'] == 'Suse':
# SuSE does not seem to use separate files for IPv4 and IPv6
return '/etc/sysconfig/scripts/SuSEfirewall2-custom'
elif __grains__['os_family'] == 'Void':

1332
salt/modules/kubernetes.py Normal file

File diff suppressed because it is too large Load diff

View file

@ -139,6 +139,63 @@ def _netstat_linux():
return ret
def _ss_linux():
'''
Return ss information for Linux distros
(netstat is deprecated and may not be available)
'''
ret = []
cmd = 'ss -tulpnea'
out = __salt__['cmd.run'](cmd)
for line in out.splitlines():
comps = line.split()
ss_user = 0
ss_inode = 0
ss_program = ''
length = len(comps)
if line.startswith('tcp') or line.startswith('udp'):
i = 6
while i < (length - 1):
fields = comps[i].split(":")
if fields[0] == "users":
users = fields[1].split(",")
ss_program = users[0].split("\"")[1]
if fields[0] == "uid":
ss_user = fields[1]
if fields[0] == "ino":
ss_inode = fields[1]
i += 1
if line.startswith('tcp'):
ss_state = comps[1]
if ss_state == "ESTAB":
ss_state = "ESTABLISHED"
ret.append({
'proto': comps[0],
'recv-q': comps[2],
'send-q': comps[3],
'local-address': comps[4],
'remote-address': comps[5],
'state': ss_state,
'user': ss_user,
'inode': ss_inode,
'program': ss_program})
if line.startswith('udp'):
ret.append({
'proto': comps[0],
'recv-q': comps[2],
'send-q': comps[3],
'local-address': comps[4],
'remote-address': comps[5],
'user': ss_user,
'inode': ss_inode,
'program': ss_program})
return ret
def _netinfo_openbsd():
'''
Get process information for network connections using fstat
@ -409,7 +466,7 @@ def _netstat_route_linux():
'destination': comps[0],
'gateway': comps[1],
'netmask': '',
'flags': comps[3],
'flags': comps[2],
'interface': comps[5]})
elif len(comps) == 7:
ret.append({
@ -417,13 +474,109 @@ def _netstat_route_linux():
'destination': comps[0],
'gateway': comps[1],
'netmask': '',
'flags': comps[3],
'flags': comps[2],
'interface': comps[6]})
else:
continue
return ret
def _ip_route_linux():
'''
Return ip routing information for Linux distros
(netstat is deprecated and may not be available)
'''
# table main closest to old netstat inet output
ret = []
cmd = 'ip -4 route show table main'
out = __salt__['cmd.run'](cmd, python_shell=True)
for line in out.splitlines():
comps = line.split()
# need to fake similar output to that provided by netstat
# to maintain output format
if comps[0] == "unreachable":
continue
if comps[0] == "default":
ip_interface = ''
if comps[3] == "dev":
ip_interface = comps[4]
ret.append({
'addr_family': 'inet',
'destination': '0.0.0.0',
'gateway': comps[2],
'netmask': '0.0.0.0',
'flags': 'UG',
'interface': ip_interface})
else:
address_mask = convert_cidr(comps[0])
ip_interface = ''
if comps[1] == "dev":
ip_interface = comps[2]
ret.append({
'addr_family': 'inet',
'destination': address_mask['network'],
'gateway': '0.0.0.0',
'netmask': address_mask['netmask'],
'flags': 'U',
'interface': ip_interface})
# table all closest to old netstat inet6 output
cmd = 'ip -6 route show table all'
out = __salt__['cmd.run'](cmd, python_shell=True)
for line in out.splitlines():
comps = line.split()
# need to fake similar output to that provided by netstat
# to maintain output format
if comps[0] == "unreachable":
continue
if comps[0] == "default":
ip_interface = ''
if comps[3] == "dev":
ip_interface = comps[4]
ret.append({
'addr_family': 'inet6',
'destination': '::',
'gateway': comps[2],
'netmask': '',
'flags': 'UG',
'interface': ip_interface})
elif comps[0] == "local":
ip_interface = ''
if comps[2] == "dev":
ip_interface = comps[3]
local_address = comps[1] + "/128"
ret.append({
'addr_family': 'inet6',
'destination': local_address,
'gateway': '::',
'netmask': '',
'flags': 'U',
'interface': ip_interface})
else:
address_mask = convert_cidr(comps[0])
ip_interface = ''
if comps[1] == "dev":
ip_interface = comps[2]
ret.append({
'addr_family': 'inet6',
'destination': comps[0],
'gateway': '::',
'netmask': '',
'flags': 'U',
'interface': ip_interface})
return ret
def _netstat_route_freebsd():
'''
Return netstat routing information for FreeBSD and macOS
@ -607,7 +760,10 @@ def netstat():
salt '*' network.netstat
'''
if __grains__['kernel'] == 'Linux':
return _netstat_linux()
if not salt.utils.which('netstat'):
return _ss_linux()
else:
return _netstat_linux()
elif __grains__['kernel'] in ('OpenBSD', 'FreeBSD', 'NetBSD'):
return _netstat_bsd()
elif __grains__['kernel'] == 'SunOS':
@ -1445,7 +1601,10 @@ def routes(family=None):
raise CommandExecutionError('Invalid address family {0}'.format(family))
if __grains__['kernel'] == 'Linux':
routes_ = _netstat_route_linux()
if not salt.utils.which('netstat'):
routes_ = _ip_route_linux()
else:
routes_ = _netstat_route_linux()
elif __grains__['kernel'] == 'SunOS':
routes_ = _netstat_route_sunos()
elif __grains__['os'] in ['FreeBSD', 'MacOS', 'Darwin']:

View file

@ -188,7 +188,7 @@ def _extract_json(npm_output):
# macOS with fsevents includes the following line in the return
# when a new module is installed which is invalid JSON:
# [fsevents] Success: "..."
while lines and lines[0].startswith('[fsevents]'):
while lines and (lines[0].startswith('[fsevents]') or lines[0].startswith('Pass ')):
lines = lines[1:]
try:
return json.loads(''.join(lines))

View file

@ -257,8 +257,8 @@ def items(*args, **kwargs):
__opts__,
__grains__,
__opts__['id'],
pillar=pillar_override,
pillarenv=pillarenv)
pillar_override=kwargs.get('pillar'),
pillarenv=kwargs.get('pillarenv') or __opts__['pillarenv'])
return pillar.compile_pillar()
@ -465,7 +465,7 @@ def ext(external, pillar=None):
__opts__['id'],
__opts__['environment'],
ext=external,
pillar=pillar)
pillar_override=pillar)
ret = pillar_obj.compile_pillar()

View file

@ -17,6 +17,7 @@ import re
from salt.exceptions import SaltInvocationError, CommandExecutionError
# Import third party libs
import salt.utils.decorators as decorators
import salt.ext.six as six
# pylint: disable=import-error
try:
@ -655,6 +656,7 @@ def lsof(name):
return ret
@decorators.which('netstat')
def netstat(name):
'''
Retrieve the netstat information of the given process name.
@ -676,6 +678,31 @@ def netstat(name):
return ret
@decorators.which('ss')
def ss(name):
'''
Retrieve the ss information of the given process name.
CLI Example:
.. code-block:: bash
salt '*' ps.ss apache2
.. versionadded:: 2016.11.6
'''
sanitize_name = str(name)
ss_infos = __salt__['cmd.run']("ss -neap")
found_infos = []
ret = []
for info in ss_infos.splitlines():
if info.find(sanitize_name) != -1:
found_infos.append(info)
ret.extend([sanitize_name, found_infos])
return ret
def psaux(name):
'''
Retrieve information corresponding to a "ps aux" filtered

View file

@ -78,7 +78,8 @@ def _get_top_file_envs():
return __context__['saltutil._top_file_envs']
except KeyError:
try:
st_ = salt.state.HighState(__opts__)
st_ = salt.state.HighState(__opts__,
initial_pillar=__pillar__)
top = st_.get_top()
if top:
envs = list(st_.top_matches(top).keys()) or 'base'
@ -189,10 +190,14 @@ def sync_beacons(saltenv=None, refresh=True, extmod_whitelist=None, extmod_black
Sync beacons from ``salt://_beacons`` to the minion
saltenv : base
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for beacons to sync. If no top files are
found, then the ``base`` environment will be synced.
refresh : True
If ``True``, refresh the available beacons on the minion. This refresh
will be performed even if no new beacons are synced. Set to ``False``
@ -224,10 +229,14 @@ def sync_sdb(saltenv=None, extmod_whitelist=None, extmod_blacklist=None):
Sync sdb modules from ``salt://_sdb`` to the minion
saltenv : base
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for sdb modules to sync. If no top files
are found, then the ``base`` environment will be synced.
refresh : False
This argument has no affect and is included for consistency with the
other sync functions.
@ -256,10 +265,14 @@ def sync_modules(saltenv=None, refresh=True, extmod_whitelist=None, extmod_black
Sync execution modules from ``salt://_modules`` to the minion
saltenv : base
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for execution modules to sync. If no top
files are found, then the ``base`` environment will be synced.
refresh : True
If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new execution modules are
@ -308,10 +321,14 @@ def sync_states(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blackl
Sync state modules from ``salt://_states`` to the minion
saltenv : base
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for state modules to sync. If no top
files are found, then the ``base`` environment will be synced.
refresh : True
If ``True``, refresh the available states on the minion. This refresh
will be performed even if no new state modules are synced. Set to
@ -376,10 +393,14 @@ def sync_grains(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blackl
Sync grains modules from ``salt://_grains`` to the minion
saltenv : base
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for grains modules to sync. If no top
files are found, then the ``base`` environment will be synced.
refresh : True
If ``True``, refresh the available execution modules and recompile
pillar data for the minion. This refresh will be performed even if no
@ -413,10 +434,14 @@ def sync_renderers(saltenv=None, refresh=True, extmod_whitelist=None, extmod_bla
Sync renderers from ``salt://_renderers`` to the minion
saltenv : base
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for renderers to sync. If no top files
are found, then the ``base`` environment will be synced.
refresh : True
If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new renderers are synced.
@ -449,10 +474,14 @@ def sync_returners(saltenv=None, refresh=True, extmod_whitelist=None, extmod_bla
Sync beacons from ``salt://_returners`` to the minion
saltenv : base
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for returners to sync. If no top files
are found, then the ``base`` environment will be synced.
refresh : True
If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new returners are synced. Set
@ -483,10 +512,14 @@ def sync_proxymodules(saltenv=None, refresh=False, extmod_whitelist=None, extmod
Sync proxy modules from ``salt://_proxy`` to the minion
saltenv : base
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for proxy modules to sync. If no top
files are found, then the ``base`` environment will be synced.
refresh : True
If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new proxy modules are synced.
@ -518,10 +551,14 @@ def sync_engines(saltenv=None, refresh=False, extmod_whitelist=None, extmod_blac
Sync engine modules from ``salt://_engines`` to the minion
saltenv : base
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for engines to sync. If no top files are
found, then the ``base`` environment will be synced.
refresh : True
If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new engine modules are synced.
@ -550,10 +587,14 @@ def sync_output(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blackl
'''
Sync outputters from ``salt://_output`` to the minion
saltenv : base
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for outputters to sync. If no top files
are found, then the ``base`` environment will be synced.
refresh : True
If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new outputters are synced.
@ -622,10 +663,14 @@ def sync_utils(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blackli
Sync utility modules from ``salt://_utils`` to the minion
saltenv : base
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for utility modules to sync. If no top
files are found, then the ``base`` environment will be synced.
refresh : True
If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new utility modules are
@ -681,10 +726,14 @@ def sync_log_handlers(saltenv=None, refresh=True, extmod_whitelist=None, extmod_
Sync log handlers from ``salt://_log_handlers`` to the minion
saltenv : base
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for log handlers to sync. If no top files
are found, then the ``base`` environment will be synced.
refresh : True
If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new log handlers are synced.

View file

@ -171,7 +171,7 @@ def _replace_auth_key(
)
def _validate_keys(key_file):
def _validate_keys(key_file, fingerprint_hash_type):
'''
Return a dict containing validated keys in the passed file
'''
@ -207,7 +207,7 @@ def _validate_keys(key_file):
enc = comps[0]
key = comps[1]
comment = ' '.join(comps[2:])
fingerprint = _fingerprint(key)
fingerprint = _fingerprint(key, fingerprint_hash_type)
if fingerprint is None:
continue
@ -223,7 +223,7 @@ def _validate_keys(key_file):
return ret
def _fingerprint(public_key, fingerprint_hash_type=None):
def _fingerprint(public_key, fingerprint_hash_type):
'''
Return a public key fingerprint based on its base64-encoded representation
@ -246,9 +246,6 @@ def _fingerprint(public_key, fingerprint_hash_type=None):
if fingerprint_hash_type:
hash_type = fingerprint_hash_type.lower()
else:
# Set fingerprint_hash_type to md5 as default
log.warning('Public Key hashing currently defaults to "md5". This will '
'change to "sha256" in the 2017.7.0 release.')
hash_type = 'sha256'
try:
@ -358,7 +355,9 @@ def host_keys(keydir=None, private=True, certs=True):
return keys
def auth_keys(user=None, config='.ssh/authorized_keys'):
def auth_keys(user=None,
config='.ssh/authorized_keys',
fingerprint_hash_type=None):
'''
Return the authorized keys for users
@ -388,7 +387,7 @@ def auth_keys(user=None, config='.ssh/authorized_keys'):
pass
if full and os.path.isfile(full):
keys[u] = _validate_keys(full)
keys[u] = _validate_keys(full, fingerprint_hash_type)
if old_output_when_one_user:
if user[0] in keys:
@ -402,7 +401,8 @@ def auth_keys(user=None, config='.ssh/authorized_keys'):
def check_key_file(user,
source,
config='.ssh/authorized_keys',
saltenv='base'):
saltenv='base',
fingerprint_hash_type=None):
'''
Check a keyfile from a source destination against the local keys and
return the keys to change
@ -416,7 +416,7 @@ def check_key_file(user,
keyfile = __salt__['cp.cache_file'](source, saltenv)
if not keyfile:
return {}
s_keys = _validate_keys(keyfile)
s_keys = _validate_keys(keyfile, fingerprint_hash_type)
if not s_keys:
err = 'No keys detected in {0}. Is file properly ' \
'formatted?'.format(source)
@ -432,12 +432,19 @@ def check_key_file(user,
s_keys[key]['enc'],
s_keys[key]['comment'],
s_keys[key]['options'],
config)
config=config,
fingerprint_hash_type=fingerprint_hash_type)
return ret
def check_key(user, key, enc, comment, options, config='.ssh/authorized_keys',
cache_keys=None):
def check_key(user,
key,
enc,
comment,
options,
config='.ssh/authorized_keys',
cache_keys=None,
fingerprint_hash_type=None):
'''
Check to see if a key needs updating, returns "update", "add" or "exists"
@ -450,7 +457,9 @@ def check_key(user, key, enc, comment, options, config='.ssh/authorized_keys',
if cache_keys is None:
cache_keys = []
enc = _refine_enc(enc)
current = auth_keys(user, config)
current = auth_keys(user,
config=config,
fingerprint_hash_type=fingerprint_hash_type)
nline = _format_auth_line(key, enc, comment, options)
# Removing existing keys from the auth_keys isn't really a good idea
@ -479,9 +488,10 @@ def check_key(user, key, enc, comment, options, config='.ssh/authorized_keys',
def rm_auth_key_from_file(user,
source,
config='.ssh/authorized_keys',
saltenv='base'):
source,
config='.ssh/authorized_keys',
saltenv='base',
fingerprint_hash_type=None):
'''
Remove an authorized key from the specified user's authorized key file,
using a file as source
@ -498,7 +508,7 @@ def rm_auth_key_from_file(user,
'Failed to pull key file from salt file server'
)
s_keys = _validate_keys(lfile)
s_keys = _validate_keys(lfile, fingerprint_hash_type)
if not s_keys:
err = (
'No keys detected in {0}. Is file properly formatted?'.format(
@ -514,7 +524,8 @@ def rm_auth_key_from_file(user,
rval += rm_auth_key(
user,
key,
config
config=config,
fingerprint_hash_type=fingerprint_hash_type
)
# Due to the ability for a single file to have multiple keys, it's
# possible for a single call to this function to have both "replace"
@ -528,7 +539,10 @@ def rm_auth_key_from_file(user,
return 'Key not present'
def rm_auth_key(user, key, config='.ssh/authorized_keys'):
def rm_auth_key(user,
key,
config='.ssh/authorized_keys',
fingerprint_hash_type=None):
'''
Remove an authorized key from the specified user's authorized key file
@ -538,7 +552,9 @@ def rm_auth_key(user, key, config='.ssh/authorized_keys'):
salt '*' ssh.rm_auth_key <user> <key>
'''
current = auth_keys(user, config)
current = auth_keys(user,
config=config,
fingerprint_hash_type=fingerprint_hash_type)
linere = re.compile(r'^(.*?)\s?((?:ssh\-|ecds)[\w-]+\s.+)$')
if key in current:
# Remove the key
@ -596,7 +612,8 @@ def rm_auth_key(user, key, config='.ssh/authorized_keys'):
def set_auth_key_from_file(user,
source,
config='.ssh/authorized_keys',
saltenv='base'):
saltenv='base',
fingerprint_hash_type=None):
'''
Add a key to the authorized_keys file, using a file as the source.
@ -613,7 +630,7 @@ def set_auth_key_from_file(user,
'Failed to pull key file from salt file server'
)
s_keys = _validate_keys(lfile)
s_keys = _validate_keys(lfile, fingerprint_hash_type)
if not s_keys:
err = (
'No keys detected in {0}. Is file properly formatted?'.format(
@ -629,11 +646,12 @@ def set_auth_key_from_file(user,
rval += set_auth_key(
user,
key,
s_keys[key]['enc'],
s_keys[key]['comment'],
s_keys[key]['options'],
config,
list(s_keys.keys())
enc=s_keys[key]['enc'],
comment=s_keys[key]['comment'],
options=s_keys[key]['options'],
config=config,
cache_keys=list(s_keys.keys()),
fingerprint_hash_type=fingerprint_hash_type
)
# Due to the ability for a single file to have multiple keys, it's
# possible for a single call to this function to have both "replace"
@ -656,7 +674,8 @@ def set_auth_key(
comment='',
options=None,
config='.ssh/authorized_keys',
cache_keys=None):
cache_keys=None,
fingerprint_hash_type=None):
'''
Add a key to the authorized_keys file. The "key" parameter must only be the
string of text that is the encoded key. If the key begins with "ssh-rsa"
@ -683,11 +702,18 @@ def set_auth_key(
# the same filtering done when reading the authorized_keys file. Apply
# the same check to ensure we don't insert anything that will not
# subsequently be read)
key_is_valid = _fingerprint(key) is not None
key_is_valid = _fingerprint(key, fingerprint_hash_type) is not None
if not key_is_valid:
return 'Invalid public key'
status = check_key(user, key, enc, comment, options, config, cache_keys)
status = check_key(user,
key,
enc,
comment,
options,
config=config,
cache_keys=cache_keys,
fingerprint_hash_type=fingerprint_hash_type)
if status == 'update':
_replace_auth_key(user, key, enc, comment, options or [], config)
return 'replace'

View file

@ -253,16 +253,28 @@ def _check_queue(queue, kwargs):
return conflict
def _get_opts(localconfig=None):
def _get_opts(**kwargs):
'''
Return a copy of the opts for use, optionally load a local config on top
'''
opts = copy.deepcopy(__opts__)
if localconfig:
opts = salt.config.minion_config(localconfig, defaults=opts)
if 'localconfig' in kwargs:
opts = salt.config.minion_config(kwargs['localconfig'], defaults=opts)
else:
if 'saltenv' in kwargs:
opts['environment'] = kwargs['saltenv']
if 'pillarenv' in kwargs:
opts['pillarenv'] = kwargs['pillarenv']
return opts
def _get_initial_pillar(opts):
return __pillar__ if __opts__['__cli'] == 'salt-call' \
and opts['pillarenv'] == __opts__['pillarenv'] \
else None
def low(data, queue=False, **kwargs):
'''
Execute a single low data call
@ -326,24 +338,31 @@ def high(data, test=None, queue=False, **kwargs):
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = _get_opts(kwargs.get('localconfig'))
opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs)
pillar = kwargs.get('pillar')
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar is not None \
and not isinstance(pillar, dict):
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
try:
st_ = salt.state.State(opts, pillar, pillar_enc=pillar_enc, proxy=__proxy__,
context=__context__)
st_ = salt.state.State(opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
context=__context__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.State(opts, pillar, pillar_enc=pillar_enc)
st_ = salt.state.State(opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
ret = st_.call_high(data)
_set_retcode(ret, highstate=data)
@ -372,19 +391,20 @@ def template(tem, queue=False, **kwargs):
)
kwargs.pop('env')
if 'saltenv' in kwargs:
saltenv = kwargs['saltenv']
else:
saltenv = ''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = _get_opts(**kwargs)
try:
st_ = salt.state.HighState(__opts__, context=__context__,
proxy=__proxy__)
st_ = salt.state.HighState(opts,
context=__context__,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(__opts__, context=__context__)
st_ = salt.state.HighState(opts,
context=__context__,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5
@ -393,7 +413,11 @@ def template(tem, queue=False, **kwargs):
if not tem.endswith('.sls'):
tem = '{sls}.sls'.format(sls=tem)
high_state, errors = st_.render_state(tem, saltenv, '', None, local=True)
high_state, errors = st_.render_state(tem,
kwargs.get('saltenv', ''),
'',
None,
local=True)
if errors:
__context__['retcode'] = 1
return errors
@ -415,10 +439,15 @@ def template_str(tem, queue=False, **kwargs):
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = _get_opts(**kwargs)
try:
st_ = salt.state.State(__opts__, proxy=__proxy__)
st_ = salt.state.State(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.State(__opts__)
st_ = salt.state.State(opts, initial_pillar=_get_initial_pillar(opts))
ret = st_.call_template_str(tem)
_set_retcode(ret)
return ret
@ -696,9 +725,7 @@ def run_request(name='default', **kwargs):
return {}
def highstate(test=None,
queue=False,
**kwargs):
def highstate(test=None, queue=False, **kwargs):
'''
Retrieve the state data from the salt master for this minion and execute it
@ -760,7 +787,7 @@ def highstate(test=None,
states to be run with their own custom minion configuration, including
different pillars, file_roots, etc.
mock:
mock
The mock option allows for the state run to execute without actually
calling any states. This then returns a mocked return which will show
the requisite ordering as well as fully validate the state run.
@ -793,7 +820,7 @@ def highstate(test=None,
return conflict
orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig'))
opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs)
@ -813,10 +840,11 @@ def highstate(test=None,
opts['pillarenv'] = kwargs['pillarenv']
pillar = kwargs.get('pillar')
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar is not None \
and not isinstance(pillar, dict):
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
@ -824,18 +852,20 @@ def highstate(test=None,
try:
st_ = salt.state.HighState(opts,
pillar,
pillar_override,
kwargs.get('__pub_jid'),
pillar_enc=pillar_enc,
proxy=__proxy__,
context=__context__,
mocked=kwargs.get('mock', False))
mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
pillar,
pillar_override,
kwargs.get('__pub_jid'),
pillar_enc=pillar_enc,
mocked=kwargs.get('mock', False))
mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5
@ -872,13 +902,7 @@ def highstate(test=None,
return ret
def sls(mods,
saltenv=None,
test=None,
exclude=None,
queue=False,
pillarenv=None,
**kwargs):
def sls(mods, test=None, exclude=None, queue=False, **kwargs):
'''
Execute the states in one or more SLS files
@ -976,16 +1000,6 @@ def sls(mods,
)
kwargs.pop('env')
if saltenv is None:
if __opts__.get('environment', None):
saltenv = __opts__['environment']
else:
saltenv = 'base'
if not pillarenv:
if __opts__.get('pillarenv', None):
pillarenv = __opts__['pillarenv']
# Modification to __opts__ lost after this if-else
if queue:
_wait(kwargs.get('__pub_jid'))
@ -995,10 +1009,6 @@ def sls(mods,
__context__['retcode'] = 1
return conflict
# Ensure desired environment
__opts__['environment'] = saltenv
__opts__['pillarenv'] = pillarenv
if isinstance(mods, list):
disabled = _disabled(mods)
else:
@ -1006,20 +1016,28 @@ def sls(mods,
if disabled:
for state in disabled:
log.debug('Salt state {0} run is disabled. To re-enable, run state.enable {0}'.format(state))
log.debug(
'Salt state %s is disabled. To re-enable, run '
'state.enable %s', state, state
)
__context__['retcode'] = 1
return disabled
orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig'))
opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs)
pillar = kwargs.get('pillar')
# Since this is running a specific SLS file (or files), fall back to the
# 'base' saltenv if none is configured and none was passed.
if opts['environment'] is None:
opts['environment'] = 'base'
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar is not None \
and not isinstance(pillar, dict):
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
@ -1030,20 +1048,23 @@ def sls(mods,
__opts__['cachedir'],
'{0}.cache.p'.format(kwargs.get('cache_name', 'highstate'))
)
try:
st_ = salt.state.HighState(opts,
pillar,
pillar_override,
kwargs.get('__pub_jid'),
pillar_enc=pillar_enc,
proxy=__proxy__,
context=__context__,
mocked=kwargs.get('mock', False))
mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
pillar,
pillar_override,
kwargs.get('__pub_jid'),
pillar_enc=pillar_enc,
mocked=kwargs.get('mock', False))
mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5
@ -1066,7 +1087,7 @@ def sls(mods,
st_.push_active()
ret = {}
try:
high_, errors = st_.render_highstate({saltenv: mods})
high_, errors = st_.render_highstate({opts['environment']: mods})
if errors:
__context__['retcode'] = 1
@ -1116,12 +1137,7 @@ def sls(mods,
return ret
def top(topfn,
test=None,
queue=False,
saltenv=None,
pillarenv=None,
**kwargs):
def top(topfn, test=None, queue=False, **kwargs):
'''
Execute a specific top file instead of the default. This is useful to apply
configurations from a different environment (for example, dev or prod), without
@ -1158,31 +1174,31 @@ def top(topfn,
if conflict is not None:
return conflict
orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig'))
opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs)
if saltenv is not None:
opts['environment'] = saltenv
if pillarenv is not None:
opts['pillarenv'] = pillarenv
pillar = kwargs.get('pillar')
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar is not None \
and not isinstance(pillar, dict):
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
try:
st_ = salt.state.HighState(opts, pillar, pillar_enc=pillar_enc,
context=__context__, proxy=__proxy__)
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
context=__context__,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts, pillar, pillar_enc=pillar_enc,
context=__context__)
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
context=__context__,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:']
@ -1193,8 +1209,8 @@ def top(topfn,
st_.opts['state_top'] = salt.utils.url.create(topfn)
ret = {}
orchestration_jid = kwargs.get('orchestration_jid')
if saltenv:
st_.opts['state_top_saltenv'] = saltenv
if 'saltenv' in kwargs:
st_.opts['state_top_saltenv'] = kwargs['saltenv']
try:
snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy'))
ret = st_.call_highstate(
@ -1228,21 +1244,28 @@ def show_highstate(queue=False, **kwargs):
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
pillar = kwargs.get('pillar')
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar is not None \
and not isinstance(pillar, dict):
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
opts = _get_opts(**kwargs)
try:
st_ = salt.state.HighState(__opts__, pillar, pillar_enc=pillar_enc,
proxy=__proxy__)
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(__opts__, pillar, pillar_enc=pillar_enc)
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5
@ -1272,10 +1295,15 @@ def show_lowstate(queue=False, **kwargs):
if conflict is not None:
assert False
return conflict
opts = _get_opts(**kwargs)
try:
st_ = salt.state.HighState(__opts__, proxy=__proxy__)
st_ = salt.state.HighState(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(__opts__)
st_ = salt.state.HighState(opts,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5
@ -1326,14 +1354,7 @@ def show_state_usage(queue=False, **kwargs):
return ret
def sls_id(
id_,
mods,
saltenv='base',
pillarenv=None,
test=None,
queue=False,
**kwargs):
def sls_id(id_, mods, test=None, queue=False, **kwargs):
'''
Call a single ID from the named module(s) and handle all requisites
@ -1369,26 +1390,21 @@ def sls_id(
if conflict is not None:
return conflict
orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig'))
opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs)
opts['environment'] = saltenv
if pillarenv is not None:
opts['pillarenv'] = pillarenv
pillar = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar is not None \
and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
# Since this is running a specific ID within a specific SLS file, fall back
# to the 'base' saltenv if none is configured and none was passed.
if opts['environment'] is None:
opts['environment'] = 'base'
try:
st_ = salt.state.HighState(opts, pillar=pillar, pillar_enc=pillar_enc, proxy=__proxy__)
st_ = salt.state.HighState(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts)
st_ = salt.state.HighState(opts,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5
@ -1400,7 +1416,7 @@ def sls_id(
split_mods = mods.split(',')
st_.push_active()
try:
high_, errors = st_.render_highstate({saltenv: split_mods})
high_, errors = st_.render_highstate({opts['environment']: split_mods})
finally:
st_.pop_active()
errors += st_.state.verify_high(high_)
@ -1420,17 +1436,12 @@ def sls_id(
if not ret:
raise SaltInvocationError(
'No matches for ID \'{0}\' found in SLS \'{1}\' within saltenv '
'\'{2}\''.format(id_, mods, saltenv)
'\'{2}\''.format(id_, mods, opts['environment'])
)
return ret
def show_low_sls(mods,
saltenv='base',
pillarenv=None,
test=None,
queue=False,
**kwargs):
def show_low_sls(mods, test=None, queue=False, **kwargs):
'''
Display the low data from a specific sls. The default environment is
``base``, use ``saltenv`` to specify a different environment.
@ -1450,6 +1461,7 @@ def show_low_sls(mods,
.. code-block:: bash
salt '*' state.show_low_sls foo
salt '*' state.show_low_sls foo saltenv=dev
'''
if 'env' in kwargs:
salt.utils.warn_until(
@ -1464,15 +1476,19 @@ def show_low_sls(mods,
if conflict is not None:
return conflict
orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig'))
opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs)
opts['environment'] = saltenv
if pillarenv is not None:
opts['pillarenv'] = pillarenv
# Since this is dealing with a specific SLS file (or files), fall back to
# the 'base' saltenv if none is configured and none was passed.
if opts['environment'] is None:
opts['environment'] = 'base'
try:
st_ = salt.state.HighState(opts, proxy=__proxy__)
st_ = salt.state.HighState(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts)
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5
@ -1483,7 +1499,7 @@ def show_low_sls(mods,
mods = mods.split(',')
st_.push_active()
try:
high_, errors = st_.render_highstate({saltenv: mods})
high_, errors = st_.render_highstate({opts['environment']: mods})
finally:
st_.pop_active()
errors += st_.state.verify_high(high_)
@ -1497,7 +1513,7 @@ def show_low_sls(mods,
return ret
def show_sls(mods, saltenv='base', test=None, queue=False, **kwargs):
def show_sls(mods, test=None, queue=False, **kwargs):
'''
Display the state data from a specific sls or list of sls files on the
master. The default environment is ``base``, use ``saltenv`` to specify a
@ -1537,28 +1553,36 @@ def show_sls(mods, saltenv='base', test=None, queue=False, **kwargs):
if conflict is not None:
return conflict
orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig'))
opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs)
pillar = kwargs.get('pillar')
# Since this is dealing with a specific SLS file (or files), fall back to
# the 'base' saltenv if none is configured and none was passed.
if opts['environment'] is None:
opts['environment'] = 'base'
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar is not None \
and not isinstance(pillar, dict):
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
if 'pillarenv' in kwargs:
opts['pillarenv'] = kwargs['pillarenv']
try:
st_ = salt.state.HighState(opts, pillar, pillar_enc=pillar_enc,
proxy=__proxy__)
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts, pillar, pillar_enc=pillar_enc)
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5
@ -1569,7 +1593,7 @@ def show_sls(mods, saltenv='base', test=None, queue=False, **kwargs):
mods = mods.split(',')
st_.push_active()
try:
high_, errors = st_.render_highstate({saltenv: mods})
high_, errors = st_.render_highstate({opts['environment']: mods})
finally:
st_.pop_active()
errors += st_.state.verify_high(high_)
@ -1592,8 +1616,6 @@ def show_top(queue=False, **kwargs):
salt '*' state.show_top
'''
opts = copy.deepcopy(__opts__)
if 'env' in kwargs:
salt.utils.warn_until(
'Oxygen',
@ -1603,15 +1625,17 @@ def show_top(queue=False, **kwargs):
)
kwargs.pop('env')
if 'saltenv' in kwargs:
opts['environment'] = kwargs['saltenv']
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = _get_opts(**kwargs)
try:
st_ = salt.state.HighState(opts, proxy=__proxy__)
st_ = salt.state.HighState(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts)
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5
@ -1657,23 +1681,30 @@ def single(fun, name, test=None, queue=False, **kwargs):
'__id__': name,
'name': name})
orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig'))
opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs)
pillar = kwargs.get('pillar')
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar is not None \
and not isinstance(pillar, dict):
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
try:
st_ = salt.state.State(opts, pillar, pillar_enc=pillar_enc, proxy=__proxy__)
st_ = salt.state.State(opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.State(opts, pillar, pillar_enc=pillar_enc)
st_ = salt.state.State(opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
err = st_.verify_data(kwargs)
if err:
__context__['retcode'] = 1
@ -1716,7 +1747,11 @@ def clear_cache():
return ret
def pkg(pkg_path, pkg_sum, hash_type, test=None, **kwargs):
def pkg(pkg_path,
pkg_sum,
hash_type,
test=None,
**kwargs):
'''
Execute a packaged state run, the packaged state run will exist in a
tarball available locally. This packaged state
@ -1754,15 +1789,16 @@ def pkg(pkg_path, pkg_sum, hash_type, test=None, **kwargs):
pillar_json = os.path.join(root, 'pillar.json')
if os.path.isfile(pillar_json):
with salt.utils.fopen(pillar_json, 'r') as fp_:
pillar = json.load(fp_)
pillar_override = json.load(fp_)
else:
pillar = None
pillar_override = None
roster_grains_json = os.path.join(root, 'roster_grains.json')
if os.path.isfile(roster_grains_json):
with salt.utils.fopen(roster_grains_json, 'r') as fp_:
roster_grains = json.load(fp_, object_hook=salt.utils.decode_dict)
popts = _get_opts(kwargs.get('localconfig'))
popts = _get_opts(**kwargs)
if os.path.isfile(roster_grains_json):
popts['grains'] = roster_grains
popts['fileclient'] = 'local'
@ -1774,7 +1810,7 @@ def pkg(pkg_path, pkg_sum, hash_type, test=None, **kwargs):
if not os.path.isdir(full):
continue
popts['file_roots'][fn_] = [full]
st_ = salt.state.State(popts, pillar=pillar)
st_ = salt.state.State(popts, pillar_override=pillar_override)
snapper_pre = _snapper_pre(popts, kwargs.get('__pub_jid', 'called localy'))
ret = st_.call_chunks(lowstate)
ret = st_.call_listen(lowstate, ret)

View file

@ -42,9 +42,10 @@ def get_cert_serial(cert_file):
salt '*' certutil.get_cert_serial <certificate name>
'''
cmd = "certutil.exe -verify {0}".format(cert_file)
cmd = "certutil.exe -silent -verify {0}".format(cert_file)
out = __salt__['cmd.run'](cmd)
matches = re.search(r"Serial: (.*)", out)
# match serial number by paragraph to work with multiple languages
matches = re.search(r":\s*(\w*)\r\n\r\n", out)
if matches is not None:
return matches.groups()[0].strip()
else:
@ -66,7 +67,8 @@ def get_stored_cert_serials(store):
'''
cmd = "certutil.exe -store {0}".format(store)
out = __salt__['cmd.run'](cmd)
matches = re.findall(r"Serial Number: (.*)\r", out)
# match serial numbers by header position to work with multiple languages
matches = re.findall(r"={16}\r\n.*:\s*(\w*)\r\n", out)
return matches

View file

@ -155,7 +155,7 @@ def get_rule(name='all'):
salt '*' firewall.get_rule 'MyAppPort'
'''
cmd = ['netsh', 'advfirewall', 'firewall', 'show', 'rule',
'name="{0}"'.format(name)]
'name={0}'.format(name)]
ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True)
if ret['retcode'] != 0:
raise CommandExecutionError(ret['stdout'])
@ -245,7 +245,7 @@ def add_rule(name, localport, protocol='tcp', action='allow', dir='in',
return True
def delete_rule(name,
def delete_rule(name=None,
localport=None,
protocol=None,
dir=None,
@ -261,10 +261,11 @@ def delete_rule(name,
name (str): The name of the rule to delete. If the name ``all`` is used
you must specify additional parameters.
localport (Optional[str]): The port of the rule. Must specify a
protocol.
localport (Optional[str]): The port of the rule. If protocol is not
specified, protocol will be set to ``tcp``
protocol (Optional[str]): The protocol of the rule.
protocol (Optional[str]): The protocol of the rule. Default is ``tcp``
when ``localport`` is specified
dir (Optional[str]): The direction of the rule.
@ -293,8 +294,9 @@ def delete_rule(name,
# Delete a rule called 'allow80':
salt '*' firewall.delete_rule allow80
'''
cmd = ['netsh', 'advfirewall', 'firewall', 'delete', 'rule',
'name={0}'.format(name)]
cmd = ['netsh', 'advfirewall', 'firewall', 'delete', 'rule']
if name:
cmd.append('name={0}'.format(name))
if protocol:
cmd.append('protocol={0}'.format(protocol))
if dir:
@ -305,6 +307,8 @@ def delete_rule(name,
if protocol is None \
or ('icmpv4' not in protocol and 'icmpv6' not in protocol):
if localport:
if not protocol:
cmd.append('protocol=tcp')
cmd.append('localport={0}'.format(localport))
ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True)

View file

@ -927,7 +927,7 @@ list_updates = salt.utils.alias_function(list_upgrades, 'list_updates')
def list_downloaded():
'''
.. versionadded:: Oxygen
.. versionadded:: 2017.7.0
List prefetched packages downloaded by Yum in the local disk.
@ -3029,7 +3029,7 @@ def _get_patches(installed_only=False):
def list_patches(refresh=False):
'''
.. versionadded:: Oxygen
.. versionadded:: 2017.7.0
List all known advisory patches from available repos.
@ -3052,7 +3052,7 @@ def list_patches(refresh=False):
def list_installed_patches():
'''
.. versionadded:: Oxygen
.. versionadded:: 2017.7.0
List installed advisory patches on the system.

View file

@ -450,9 +450,9 @@ def info_installed(*names, **kwargs):
summary, description.
:param errors:
Handle RPM field errors (true|false). By default, various mistakes in the textual fields are simply ignored and
omitted from the data. Otherwise a field with a mistake is not returned, instead a 'N/A (bad UTF-8)'
(not available, broken) text is returned.
Handle RPM field errors. If 'ignore' is chosen, then various mistakes are simply ignored and omitted
from the texts or strings. If 'report' is chonen, then a field with a mistake is not returned, instead
a 'N/A (broken)' (not available, broken) text is placed.
Valid attributes are:
ignore, report
@ -465,7 +465,8 @@ def info_installed(*names, **kwargs):
salt '*' pkg.info_installed <package1> <package2> <package3> ...
salt '*' pkg.info_installed <package1> attr=version,vendor
salt '*' pkg.info_installed <package1> <package2> <package3> ... attr=version,vendor
salt '*' pkg.info_installed <package1> <package2> <package3> ... attr=version,vendor errors=true
salt '*' pkg.info_installed <package1> <package2> <package3> ... attr=version,vendor errors=ignore
salt '*' pkg.info_installed <package1> <package2> <package3> ... attr=version,vendor errors=report
'''
ret = dict()
for pkg_name, pkg_nfo in __salt__['lowpkg.info'](*names, **kwargs).items():
@ -479,7 +480,7 @@ def info_installed(*names, **kwargs):
else:
value_ = value.decode('UTF-8', 'ignore').encode('UTF-8', 'ignore')
if value != value_:
value = kwargs.get('errors') and value_ or 'N/A (invalid UTF-8)'
value = kwargs.get('errors', 'ignore') == 'ignore' and value_ or 'N/A (invalid UTF-8)'
log.error('Package {0} has bad UTF-8 code in {1}: {2}'.format(pkg_name, key, value))
if key == 'source_rpm':
t_nfo['source'] = value
@ -1151,15 +1152,18 @@ def install(name=None,
# Handle packages which report multiple new versions
# (affects only kernel packages at this point)
for pkg in new:
if isinstance(new[pkg], six.string_types):
new[pkg] = new[pkg].split(',')[-1]
for pkg_name in new:
pkg_data = new[pkg_name]
if isinstance(pkg_data, six.string_types):
new[pkg_name] = pkg_data.split(',')[-1]
ret = salt.utils.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
'Problem encountered installing package(s)',
'Problem encountered {0} package(s)'.format(
'downloading' if downloadonly else 'installing'
),
info={'errors': errors, 'changes': ret}
)
@ -1864,7 +1868,7 @@ def download(*packages, **kwargs):
def list_downloaded():
'''
.. versionadded:: Oxygen
.. versionadded:: 2017.7.0
List prefetched packages downloaded by Zypper in the local disk.
@ -1948,7 +1952,7 @@ def _get_patches(installed_only=False):
def list_patches(refresh=False):
'''
.. versionadded:: Oxygen
.. versionadded:: 2017.7.0
List all known advisory patches from available repos.
@ -1971,7 +1975,7 @@ def list_patches(refresh=False):
def list_installed_patches():
'''
.. versionadded:: Oxygen
.. versionadded:: 2017.7.0
List installed advisory patches on the system.

View file

@ -37,7 +37,7 @@ log = logging.getLogger(__name__)
def get_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None,
pillar=None, pillarenv=None):
pillar_override=None, pillarenv=None):
'''
Return the correct pillar driver based on the file_client option
'''
@ -54,14 +54,14 @@ def get_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None,
log.info('Compiling pillar from cache')
log.debug('get_pillar using pillar cache with ext: {0}'.format(ext))
return PillarCache(opts, grains, minion_id, saltenv, ext=ext, functions=funcs,
pillar=pillar, pillarenv=pillarenv)
pillar_override=pillar_override, pillarenv=pillarenv)
return ptype(opts, grains, minion_id, saltenv, ext, functions=funcs,
pillar=pillar, pillarenv=pillarenv)
pillar_override=pillar_override, pillarenv=pillarenv)
# TODO: migrate everyone to this one!
def get_async_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None,
pillar=None, pillarenv=None):
pillar_override=None, pillarenv=None):
'''
Return the correct pillar driver based on the file_client option
'''
@ -73,7 +73,7 @@ def get_async_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None
'local': AsyncPillar,
}.get(file_client, AsyncPillar)
return ptype(opts, grains, minion_id, saltenv, ext, functions=funcs,
pillar=pillar, pillarenv=pillarenv)
pillar_override=pillar_override, pillarenv=pillarenv)
class AsyncRemotePillar(object):
@ -81,7 +81,7 @@ class AsyncRemotePillar(object):
Get the pillar from the master
'''
def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None,
pillar=None, pillarenv=None):
pillar_override=None, pillarenv=None):
self.opts = opts
self.opts['environment'] = saltenv
self.ext = ext
@ -90,16 +90,10 @@ class AsyncRemotePillar(object):
self.channel = salt.transport.client.AsyncReqChannel.factory(opts)
if pillarenv is not None:
self.opts['pillarenv'] = pillarenv
elif self.opts.get('pillarenv_from_saltenv', False):
self.opts['pillarenv'] = saltenv
elif 'pillarenv' not in self.opts:
self.opts['pillarenv'] = None
self.pillar_override = {}
if pillar is not None:
if isinstance(pillar, dict):
self.pillar_override = pillar
else:
log.error('Pillar data must be a dictionary')
self.pillar_override = pillar_override or {}
if not isinstance(self.pillar_override, dict):
self.pillar_override = {}
log.error('Pillar data must be a dictionary')
@tornado.gen.coroutine
def compile_pillar(self):
@ -138,7 +132,7 @@ class RemotePillar(object):
Get the pillar from the master
'''
def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None,
pillar=None, pillarenv=None):
pillar_override=None, pillarenv=None):
self.opts = opts
self.opts['environment'] = saltenv
self.ext = ext
@ -147,16 +141,10 @@ class RemotePillar(object):
self.channel = salt.transport.Channel.factory(opts)
if pillarenv is not None:
self.opts['pillarenv'] = pillarenv
elif self.opts.get('pillarenv_from_saltenv', False):
self.opts['pillarenv'] = saltenv
elif 'pillarenv' not in self.opts:
self.opts['pillarenv'] = None
self.pillar_override = {}
if pillar is not None:
if isinstance(pillar, dict):
self.pillar_override = pillar
else:
log.error('Pillar data must be a dictionary')
self.pillar_override = pillar_override or {}
if not isinstance(self.pillar_override, dict):
self.pillar_override = {}
log.error('Pillar data must be a dictionary')
def compile_pillar(self):
'''
@ -188,8 +176,8 @@ class PillarCache(object):
'''
Return a cached pillar if it exists, otherwise cache it.
Pillar caches are structed in two diminensions: minion_id with a dict of saltenvs.
Each saltenv contains a pillar dict
Pillar caches are structed in two diminensions: minion_id with a dict of
saltenvs. Each saltenv contains a pillar dict
Example data structure:
@ -200,7 +188,7 @@ class PillarCache(object):
'''
# TODO ABC?
def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None,
pillar=None, pillarenv=None):
pillar_override=None, pillarenv=None):
# Yes, we need all of these because we need to route to the Pillar object
# if we have no cache. This is another refactor target.
@ -210,7 +198,7 @@ class PillarCache(object):
self.minion_id = minion_id
self.ext = ext
self.functions = functions
self.pillar = pillar
self.pillar_override = pillar_override
self.pillarenv = pillarenv
if saltenv is None:
@ -239,13 +227,13 @@ class PillarCache(object):
'''
log.debug('Pillar cache getting external pillar with ext: {0}'.format(self.ext))
fresh_pillar = Pillar(self.opts,
self.grains,
self.minion_id,
self.saltenv,
ext=self.ext,
functions=self.functions,
pillar=self.pillar,
pillarenv=self.pillarenv)
self.grains,
self.minion_id,
self.saltenv,
ext=self.ext,
functions=self.functions,
pillar_override=self.pillar_override,
pillarenv=self.pillarenv)
return fresh_pillar.compile_pillar() # FIXME We are not yet passing pillar_dirs in here
def compile_pillar(self, *args, **kwargs): # Will likely just be pillar_dirs
@ -278,7 +266,7 @@ class Pillar(object):
Read over the pillar top files and render the pillar data
'''
def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None,
pillar=None, pillarenv=None):
pillar_override=None, pillarenv=None):
self.minion_id = minion_id
self.ext = ext
if pillarenv is None:
@ -320,12 +308,10 @@ class Pillar(object):
self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions)
self.ignored_pillars = {}
self.pillar_override = {}
if pillar is not None:
if isinstance(pillar, dict):
self.pillar_override = pillar
else:
log.error('Pillar data must be a dictionary')
self.pillar_override = pillar_override or {}
if not isinstance(self.pillar_override, dict):
self.pillar_override = {}
log.error('Pillar data must be a dictionary')
def __valid_on_demand_ext_pillar(self, opts):
'''
@ -835,7 +821,7 @@ class Pillar(object):
return pillar, errors
ext = None
# Bring in CLI pillar data
if self.pillar_override and isinstance(self.pillar_override, dict):
if self.pillar_override:
pillar = merge(pillar,
self.pillar_override,
self.merge_strategy,
@ -922,7 +908,7 @@ class Pillar(object):
log.critical('Pillar render error: {0}'.format(error))
pillar['_errors'] = errors
if self.pillar_override and isinstance(self.pillar_override, dict):
if self.pillar_override:
pillar = merge(pillar,
self.pillar_override,
self.merge_strategy,

View file

@ -66,6 +66,7 @@ def render(template_file, saltenv='base', sls='', argline='',
sls=sls,
context=context,
tmplpath=tmplpath,
proxy=__proxy__,
**kws)
if not tmp_data.get('result', False):
raise SaltRenderError(

View file

@ -336,9 +336,9 @@ For example:
'''
from __future__ import absolute_import
import imp
import types
from salt.ext.six import exec_
from salt.utils import pydsl
from salt.utils import pydsl, to_str
from salt.utils.pydsl import PyDslError
from salt.exceptions import SaltRenderError
@ -346,7 +346,8 @@ __all__ = ['render']
def render(template, saltenv='base', sls='', tmplpath=None, rendered_sls=None, **kws):
mod = imp.new_module(sls)
sls = to_str(sls)
mod = types.ModuleType(sls)
# Note: mod object is transient. It's existence only lasts as long as
# the lowstate data structure that the highstate in the sls file
# is compiled to.

View file

@ -72,26 +72,35 @@ class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object):
fun = low.pop('fun')
verify_fun(self.functions, fun)
reserved_kwargs = dict([(i, low.pop(i)) for i in [
eauth_creds = dict([(i, low.pop(i)) for i in [
'username', 'password', 'eauth', 'token', 'client', 'user', 'key',
'__current_eauth_groups', '__current_eauth_user',
] if i in low])
# Run name=value args through parse_input. We don't need to run kwargs
# through because there is no way to send name=value strings in the low
# dict other than by including an `arg` array.
arg, kwarg = salt.utils.args.parse_input(
low.pop('arg', []),
condition=False,
no_parse=self.opts.get('no_parse', []))
kwarg.update(low.pop('kwarg', {}))
_arg, _kwarg = salt.utils.args.parse_input(
low.pop('arg', []), condition=False)
_kwarg.update(low.pop('kwarg', {}))
# If anything hasn't been pop()'ed out of low by this point it must be
# an old-style kwarg.
kwarg.update(low)
_kwarg.update(low)
# Finally, mung our kwargs to a format suitable for the byzantine
# load_args_and_kwargs so that we can introspect the function being
# called and fish for invalid kwargs.
munged = []
munged.extend(_arg)
munged.append(dict(__kwarg__=True, **_kwarg))
arg, kwarg = salt.minion.load_args_and_kwargs(
self.functions[fun],
munged,
self.opts,
ignore_invalid=True)
return dict(fun=fun, kwarg={'kwarg': kwarg, 'arg': arg},
**reserved_kwargs)
**eauth_creds)
def cmd_async(self, low):
'''

View file

@ -73,10 +73,10 @@ def orchestrate(mods,
minion = salt.minion.MasterMinion(__opts__)
running = minion.functions['state.sls'](
mods,
saltenv,
test,
exclude,
pillar=pillar,
saltenv=saltenv,
pillarenv=pillarenv,
pillar_enc=pillar_enc,
orchestration_jid=orchestration_jid)

View file

@ -37,6 +37,18 @@ def raw_arg(*args, **kwargs):
return ret
def metasyntactic(locality='us'):
'''
Return common metasyntactic variables for the given locality
'''
lookup = {
'us': ['foo', 'bar', 'baz', 'qux', 'quux', 'quuz', 'corge', 'grault',
'garply', 'waldo', 'fred', 'plugh', 'xyzzy', 'thud'],
'uk': ['wibble', 'wobble', 'wubble', 'flob'],
}
return lookup.get(locality, None)
def stdout_print():
'''
Print 'foo' and return 'bar'

View file

@ -657,26 +657,28 @@ class State(object):
def __init__(
self,
opts,
pillar=None,
pillar_override=None,
jid=None,
pillar_enc=None,
proxy=None,
context=None,
mocked=False,
loader='states'):
loader='states',
initial_pillar=None):
self.states_loader = loader
if 'grains' not in opts:
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
self.proxy = proxy
self._pillar_override = pillar
self._pillar_override = pillar_override
if pillar_enc is not None:
try:
pillar_enc = pillar_enc.lower()
except AttributeError:
pillar_enc = str(pillar_enc).lower()
self._pillar_enc = pillar_enc
self.opts['pillar'] = self._gather_pillar()
self.opts['pillar'] = initial_pillar if initial_pillar is not None \
else self._gather_pillar()
self.state_con = context or {}
self.load_modules()
self.active = set()
@ -727,7 +729,7 @@ class State(object):
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillar=self._pillar_override,
pillar_override=self._pillar_override,
pillarenv=self.opts.get('pillarenv'))
return pillar.compile_pillar()
@ -890,7 +892,8 @@ class State(object):
self.functions[f_key] = funcs[func]
self.serializers = salt.loader.serializers(self.opts)
self._load_states()
self.rend = salt.loader.render(self.opts, self.functions, states=self.states)
self.rend = salt.loader.render(self.opts, self.functions,
states=self.states, proxy=self.proxy)
def module_refresh(self):
'''
@ -2801,9 +2804,11 @@ class BaseHighState(object):
)
if found == 0:
log.error('No contents found in top file. Please verify '
'that the \'file_roots\' specified in \'etc/master\' are '
'accessible: {0}'.format(repr(self.state.opts['file_roots']))
log.debug(
'No contents found in top file. If this is not expected, '
'verify that the \'file_roots\' specified in \'etc/master\' '
'are accessible. The \'file_roots\' configuration is: %s',
repr(self.state.opts['file_roots'])
)
# Search initial top files for includes
@ -3627,7 +3632,7 @@ class BaseHighState(object):
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = 'No Top file or external nodes data matches found.'
msg = 'No Top file or master_tops data matches found.'
ret[tag_name]['comment'] = msg
return ret
matches = self.matches_whitelist(matches, whitelist)
@ -3761,24 +3766,26 @@ class HighState(BaseHighState):
def __init__(
self,
opts,
pillar=None,
pillar_override=None,
jid=None,
pillar_enc=None,
proxy=None,
context=None,
mocked=False,
loader='states'):
loader='states',
initial_pillar=None):
self.opts = opts
self.client = salt.fileclient.get_file_client(self.opts)
BaseHighState.__init__(self, opts)
self.state = State(self.opts,
pillar,
pillar_override,
jid,
pillar_enc,
proxy=proxy,
context=context,
mocked=mocked,
loader=loader)
loader=loader,
initial_pillar=initial_pillar)
self.matcher = salt.minion.Matcher(self.opts)
self.proxy = proxy

View file

@ -25,6 +25,11 @@ Manage Kubernetes
- node: myothernodename
- apiserver: http://mykubeapiserer:8080
'''
from __future__ import absolute_import
# Import salt libs
import salt.utils
__virtualname__ = 'k8s'
@ -42,6 +47,10 @@ def label_present(
node=None,
apiserver=None):
'''
.. deprecated:: Nitrogen
This state has been moved to :py:func:`kubernetes.node_label_present
<salt.states.kubernetes.node_label_present`.
Ensure the label exists on the kube node.
name
@ -60,6 +69,14 @@ def label_present(
# Use salt k8s module to set label
ret = __salt__['k8s.label_present'](name, value, node, apiserver)
msg = (
'The k8s.label_present state has been replaced by '
'kubernetes.node_label_present. Update your SLS to use the new '
'function name to get rid of this warning.'
)
salt.utils.warn_until('Fluorine', msg)
ret.setdefault('warnings', []).append(msg)
return ret
@ -68,6 +85,10 @@ def label_absent(
node=None,
apiserver=None):
'''
.. deprecated:: Nitrogen
This state has been moved to :py:func:`kubernetes.node_label_absent
<salt.states.kubernetes.node_label_absent`.
Ensure the label doesn't exist on the kube node.
name
@ -83,6 +104,14 @@ def label_absent(
# Use salt k8s module to set label
ret = __salt__['k8s.label_absent'](name, node, apiserver)
msg = (
'The k8s.label_absent state has been replaced by '
'kubernetes.node_label_absent. Update your SLS to use the new '
'function name to get rid of this warning.'
)
salt.utils.warn_until('Fluorine', msg)
ret.setdefault('warnings', []).append(msg)
return ret
@ -91,6 +120,10 @@ def label_folder_absent(
node=None,
apiserver=None):
'''
.. deprecated:: Nitrogen
This state has been moved to :py:func:`kubernetes.node_label_folder_absent
<salt.states.kubernetes.node_label_folder_absent`.
Ensure the label folder doesn't exist on the kube node.
name
@ -106,4 +139,13 @@ def label_folder_absent(
# Use salt k8s module to set label
ret = __salt__['k8s.folder_absent'](name, node, apiserver)
msg = (
'The k8s.label_folder_absent state has been replaced by '
'kubernetes.node_label_folder_absent. Update your SLS to use the new '
'function name to get rid of this warning.'
)
salt.utils.warn_until('Fluorine', msg)
ret.setdefault('warnings', []).append(msg)
return ret

989
salt/states/kubernetes.py Normal file
View file

@ -0,0 +1,989 @@
# -*- coding: utf-8 -*-
'''
Manage kubernetes resources as salt states
==========================================
NOTE: This module requires the proper pillar values set. See
salt.modules.kubernetes for more information.
The kubernetes module is used to manage different kubernetes resources.
.. code-block:: yaml
my-nginx:
kubernetes.deployment_present:
- namespace: default
metadata:
app: frontend
spec:
replicas: 1
template:
metadata:
labels:
run: my-nginx
spec:
containers:
- name: my-nginx
image: nginx
ports:
- containerPort: 80
my-mariadb:
kubernetes.deployment_absent:
- namespace: default
# kubernetes deployment as specified inside of
# a file containing the definition of the the
# deployment using the official kubernetes format
redis-master-deployment:
kubernetes.deployment_present:
- name: redis-master
- source: salt://k8s/redis-master-deployment.yml
require:
- pip: kubernetes-python-module
# kubernetes service as specified inside of
# a file containing the definition of the the
# service using the official kubernetes format
redis-master-service:
kubernetes.service_present:
- name: redis-master
- source: salt://k8s/redis-master-service.yml
require:
- kubernetes.deployment_present: redis-master
# kubernetes deployment as specified inside of
# a file containing the definition of the the
# deployment using the official kubernetes format
# plus some jinja directives
nginx-source-template:
kubernetes.deployment_present:
- source: salt://k8s/nginx.yml.jinja
- template: jinja
require:
- pip: kubernetes-python-module
# Kubernetes secret
k8s-secret:
kubernetes.secret_present:
- name: top-secret
data:
key1: value1
key2: value2
key3: value3
'''
from __future__ import absolute_import
import copy
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if the kubernetes module is available in __salt__
'''
return 'kubernetes.ping' in __salt__
def _error(ret, err_msg):
'''
Helper function to propagate errors to
the end user.
'''
ret['result'] = False
ret['comment'] = err_msg
return ret
def deployment_absent(name, namespace='default', **kwargs):
'''
Ensures that the named deployment is absent from the given namespace.
name
The name of the deployment
namespace
The name of the namespace
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
deployment = __salt__['kubernetes.show_deployment'](name, namespace, **kwargs)
if deployment is None:
ret['result'] = True if not __opts__['test'] else None
ret['comment'] = 'The deployment does not exist'
return ret
if __opts__['test']:
ret['comment'] = 'The deployment is going to be deleted'
ret['result'] = None
return ret
res = __salt__['kubernetes.delete_deployment'](name, namespace, **kwargs)
if res['code'] == 200:
ret['result'] = True
ret['changes'] = {
'kubernetes.deployment': {
'new': 'absent', 'old': 'present'}}
ret['comment'] = res['message']
else:
ret['comment'] = 'Something went wrong, response: {0}'.format(res)
return ret
def deployment_present(
name,
namespace='default',
metadata=None,
spec=None,
source='',
template='',
**kwargs):
'''
Ensures that the named deployment is present inside of the specified
namespace with the given metadata and spec.
If the deployment exists it will be replaced.
name
The name of the deployment.
namespace
The namespace holding the deployment. The 'default' one is going to be
used unless a different one is specified.
metadata
The metadata of the deployment object.
spec
The spec of the deployment object.
source
A file containing the definition of the deployment (metadata and
spec) in the official kubernetes format.
template
Template engine to be used to render the source file.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if (metadata or spec) and source:
return _error(
ret,
'\'source\' cannot be used in combination with \'metadata\' or '
'\'spec\''
)
if metadata is None:
metadata = {}
if spec is None:
spec = {}
deployment = __salt__['kubernetes.show_deployment'](name, namespace, **kwargs)
if deployment is None:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'The deployment is going to be created'
return ret
res = __salt__['kubernetes.create_deployment'](name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
source=source,
template=template,
saltenv=__env__,
**kwargs)
ret['changes']['{0}.{1}'.format(namespace, name)] = {
'old': {},
'new': res}
else:
if __opts__['test']:
ret['result'] = None
return ret
# TODO: improve checks # pylint: disable=fixme
log.info('Forcing the recreation of the deployment')
ret['comment'] = 'The deployment is already present. Forcing recreation'
res = __salt__['kubernetes.replace_deployment'](
name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
source=source,
template=template,
saltenv=__env__,
**kwargs)
ret['changes'] = {
'metadata': metadata,
'spec': spec
}
ret['result'] = True
return ret
def service_present(
name,
namespace='default',
metadata=None,
spec=None,
source='',
template='',
**kwargs):
'''
Ensures that the named service is present inside of the specified namespace
with the given metadata and spec.
If the deployment exists it will be replaced.
name
The name of the service.
namespace
The namespace holding the service. The 'default' one is going to be
used unless a different one is specified.
metadata
The metadata of the service object.
spec
The spec of the service object.
source
A file containing the definition of the service (metadata and
spec) in the official kubernetes format.
template
Template engine to be used to render the source file.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if (metadata or spec) and source:
return _error(
ret,
'\'source\' cannot be used in combination with \'metadata\' or '
'\'spec\''
)
if metadata is None:
metadata = {}
if spec is None:
spec = {}
service = __salt__['kubernetes.show_service'](name, namespace, **kwargs)
if service is None:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'The service is going to be created'
return ret
res = __salt__['kubernetes.create_service'](name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
source=source,
template=template,
saltenv=__env__,
**kwargs)
ret['changes']['{0}.{1}'.format(namespace, name)] = {
'old': {},
'new': res}
else:
if __opts__['test']:
ret['result'] = None
return ret
# TODO: improve checks # pylint: disable=fixme
log.info('Forcing the recreation of the service')
ret['comment'] = 'The service is already present. Forcing recreation'
res = __salt__['kubernetes.replace_service'](
name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
source=source,
template=template,
old_service=service,
saltenv=__env__,
**kwargs)
ret['changes'] = {
'metadata': metadata,
'spec': spec
}
ret['result'] = True
return ret
def service_absent(name, namespace='default', **kwargs):
'''
Ensures that the named service is absent from the given namespace.
name
The name of the service
namespace
The name of the namespace
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
service = __salt__['kubernetes.show_service'](name, namespace, **kwargs)
if service is None:
ret['result'] = True if not __opts__['test'] else None
ret['comment'] = 'The service does not exist'
return ret
if __opts__['test']:
ret['comment'] = 'The service is going to be deleted'
ret['result'] = None
return ret
res = __salt__['kubernetes.delete_service'](name, namespace, **kwargs)
if res['code'] == 200:
ret['result'] = True
ret['changes'] = {
'kubernetes.service': {
'new': 'absent', 'old': 'present'}}
ret['comment'] = res['message']
else:
ret['comment'] = 'Something went wrong, response: {0}'.format(res)
return ret
def namespace_absent(name, **kwargs):
'''
Ensures that the named namespace is absent.
name
The name of the namespace
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
namespace = __salt__['kubernetes.show_namespace'](name, **kwargs)
if namespace is None:
ret['result'] = True if not __opts__['test'] else None
ret['comment'] = 'The namespace does not exist'
return ret
if __opts__['test']:
ret['comment'] = 'The namespace is going to be deleted'
ret['result'] = None
return ret
res = __salt__['kubernetes.delete_namespace'](name, **kwargs)
if (
res['code'] == 200 or
(
isinstance(res['status'], str) and
'Terminating' in res['status']
) or
(
isinstance(res['status'], dict) and
res['status']['phase'] == 'Terminating'
)):
ret['result'] = True
ret['changes'] = {
'kubernetes.namespace': {
'new': 'absent', 'old': 'present'}}
if res['message']:
ret['comment'] = res['message']
else:
ret['comment'] = 'Terminating'
else:
ret['comment'] = 'Something went wrong, response: {0}'.format(res)
return ret
def namespace_present(name, **kwargs):
'''
Ensures that the named namespace is present.
name
The name of the deployment.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
namespace = __salt__['kubernetes.show_namespace'](name, **kwargs)
if namespace is None:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'The namespace is going to be created'
return ret
res = __salt__['kubernetes.create_namespace'](name, **kwargs)
ret['changes']['namespace'] = {
'old': {},
'new': res}
else:
ret['result'] = True if not __opts__['test'] else None
ret['comment'] = 'The namespace already exists'
return ret
def secret_absent(name, namespace='default', **kwargs):
'''
Ensures that the named secret is absent from the given namespace.
name
The name of the secret
namespace
The name of the namespace
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
secret = __salt__['kubernetes.show_secret'](name, namespace, **kwargs)
if secret is None:
ret['result'] = True if not __opts__['test'] else None
ret['comment'] = 'The secret does not exist'
return ret
if __opts__['test']:
ret['comment'] = 'The secret is going to be deleted'
ret['result'] = None
return ret
__salt__['kubernetes.delete_secret'](name, namespace, **kwargs)
# As for kubernetes 1.6.4 doesn't set a code when deleting a secret
# The kubernetes module will raise an exception if the kubernetes
# server will return an error
ret['result'] = True
ret['changes'] = {
'kubernetes.secret': {
'new': 'absent', 'old': 'present'}}
ret['comment'] = 'Secret deleted'
return ret
def secret_present(
name,
namespace='default',
data=None,
source='',
template='',
**kwargs):
'''
Ensures that the named secret is present inside of the specified namespace
with the given data.
If the secret exists it will be replaced.
name
The name of the secret.
namespace
The namespace holding the secret. The 'default' one is going to be
used unless a different one is specified.
data
The dictionary holding the secrets.
source
A file containing the data of the secret in plain format.
template
Template engine to be used to render the source file.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if data and source:
return _error(
ret,
'\'source\' cannot be used in combination with \'data\''
)
secret = __salt__['kubernetes.show_secret'](name, namespace, **kwargs)
if secret is None:
if data is None:
data = {}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'The secret is going to be created'
return ret
res = __salt__['kubernetes.create_secret'](name=name,
namespace=namespace,
data=data,
source=source,
template=template,
saltenv=__env__,
**kwargs)
ret['changes']['{0}.{1}'.format(namespace, name)] = {
'old': {},
'new': res}
else:
if __opts__['test']:
ret['result'] = None
return ret
# TODO: improve checks # pylint: disable=fixme
log.info('Forcing the recreation of the service')
ret['comment'] = 'The secret is already present. Forcing recreation'
res = __salt__['kubernetes.replace_secret'](
name=name,
namespace=namespace,
data=data,
source=source,
template=template,
saltenv=__env__,
**kwargs)
ret['changes'] = {
# Omit values from the return. They are unencrypted
# and can contain sensitive data.
'data': res['data'].keys()
}
ret['result'] = True
return ret
def configmap_absent(name, namespace='default', **kwargs):
'''
Ensures that the named configmap is absent from the given namespace.
name
The name of the configmap
namespace
The name of the namespace
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
configmap = __salt__['kubernetes.show_configmap'](name, namespace, **kwargs)
if configmap is None:
ret['result'] = True if not __opts__['test'] else None
ret['comment'] = 'The configmap does not exist'
return ret
if __opts__['test']:
ret['comment'] = 'The configmap is going to be deleted'
ret['result'] = None
return ret
__salt__['kubernetes.delete_configmap'](name, namespace, **kwargs)
# As for kubernetes 1.6.4 doesn't set a code when deleting a configmap
# The kubernetes module will raise an exception if the kubernetes
# server will return an error
ret['result'] = True
ret['changes'] = {
'kubernetes.configmap': {
'new': 'absent', 'old': 'present'}}
ret['comment'] = 'ConfigMap deleted'
return ret
def configmap_present(
name,
namespace='default',
data=None,
source='',
template='',
**kwargs):
'''
Ensures that the named configmap is present inside of the specified namespace
with the given data.
If the configmap exists it will be replaced.
name
The name of the configmap.
namespace
The namespace holding the configmap. The 'default' one is going to be
used unless a different one is specified.
data
The dictionary holding the configmaps.
source
A file containing the data of the configmap in plain format.
template
Template engine to be used to render the source file.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if data and source:
return _error(
ret,
'\'source\' cannot be used in combination with \'data\''
)
configmap = __salt__['kubernetes.show_configmap'](name, namespace, **kwargs)
if configmap is None:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'The configmap is going to be created'
return ret
res = __salt__['kubernetes.create_configmap'](name=name,
namespace=namespace,
data=data,
source=source,
template=template,
saltenv=__env__,
**kwargs)
ret['changes']['{0}.{1}'.format(namespace, name)] = {
'old': {},
'new': res}
else:
if __opts__['test']:
ret['result'] = None
return ret
# TODO: improve checks # pylint: disable=fixme
log.info('Forcing the recreation of the service')
ret['comment'] = 'The configmap is already present. Forcing recreation'
res = __salt__['kubernetes.replace_configmap'](
name=name,
namespace=namespace,
data=data,
source=source,
template=template,
saltenv=__env__,
**kwargs)
ret['changes'] = {
'data': res['data']
}
ret['result'] = True
return ret
def pod_absent(name, namespace='default', **kwargs):
'''
Ensures that the named pod is absent from the given namespace.
name
The name of the pod
namespace
The name of the namespace
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
pod = __salt__['kubernetes.show_pod'](name, namespace, **kwargs)
if pod is None:
ret['result'] = True if not __opts__['test'] else None
ret['comment'] = 'The pod does not exist'
return ret
if __opts__['test']:
ret['comment'] = 'The pod is going to be deleted'
ret['result'] = None
return ret
res = __salt__['kubernetes.delete_pod'](name, namespace, **kwargs)
if res['code'] == 200 or res['code'] is None:
ret['result'] = True
ret['changes'] = {
'kubernetes.pod': {
'new': 'absent', 'old': 'present'}}
if res['code'] is None:
ret['comment'] = 'In progress'
else:
ret['comment'] = res['message']
else:
ret['comment'] = 'Something went wrong, response: {0}'.format(res)
return ret
def pod_present(
name,
namespace='default',
metadata=None,
spec=None,
source='',
template='',
**kwargs):
'''
Ensures that the named pod is present inside of the specified
namespace with the given metadata and spec.
If the pod exists it will be replaced.
name
The name of the pod.
namespace
The namespace holding the pod. The 'default' one is going to be
used unless a different one is specified.
metadata
The metadata of the pod object.
spec
The spec of the pod object.
source
A file containing the definition of the pod (metadata and
spec) in the official kubernetes format.
template
Template engine to be used to render the source file.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if (metadata or spec) and source:
return _error(
ret,
'\'source\' cannot be used in combination with \'metadata\' or '
'\'spec\''
)
if metadata is None:
metadata = {}
if spec is None:
spec = {}
pod = __salt__['kubernetes.show_pod'](name, namespace, **kwargs)
if pod is None:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'The pod is going to be created'
return ret
res = __salt__['kubernetes.create_pod'](name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
source=source,
template=template,
saltenv=__env__,
**kwargs)
ret['changes']['{0}.{1}'.format(namespace, name)] = {
'old': {},
'new': res}
else:
if __opts__['test']:
ret['result'] = None
return ret
# TODO: fix replace_namespaced_pod validation issues
ret['comment'] = 'salt is currently unable to replace a pod without ' \
'deleting it. Please perform the removal of the pod requiring ' \
'the \'pod_absent\' state if this is the desired behaviour.'
ret['result'] = False
return ret
ret['changes'] = {
'metadata': metadata,
'spec': spec
}
ret['result'] = True
return ret
def node_label_absent(name, node, **kwargs):
'''
Ensures that the named label is absent from the node.
name
The name of the label
node
The name of the node
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
labels = __salt__['kubernetes.node_labels'](node, **kwargs)
if name not in labels:
ret['result'] = True if not __opts__['test'] else None
ret['comment'] = 'The label does not exist'
return ret
if __opts__['test']:
ret['comment'] = 'The label is going to be deleted'
ret['result'] = None
return ret
__salt__['kubernetes.node_remove_label'](
node_name=node,
label_name=name,
**kwargs)
ret['result'] = True
ret['changes'] = {
'kubernetes.node_label': {
'new': 'absent', 'old': 'present'}}
ret['comment'] = 'Label removed from node'
return ret
def node_label_folder_absent(name, node, **kwargs):
'''
Ensures the label folder doesn't exist on the specified node.
name
The name of label folder
node
The name of the node
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
labels = __salt__['kubernetes.node_labels'](node, **kwargs)
folder = name.strip("/") + "/"
labels_to_drop = []
new_labels = []
for label in labels:
if label.startswith(folder):
labels_to_drop.append(label)
else:
new_labels.append(label)
if not labels_to_drop:
ret['result'] = True if not __opts__['test'] else None
ret['comment'] = 'The label folder does not exist'
return ret
if __opts__['test']:
ret['comment'] = 'The label folder is going to be deleted'
ret['result'] = None
return ret
for label in labels_to_drop:
__salt__['kubernetes.node_remove_label'](
node_name=node,
label_name=label,
**kwargs)
ret['result'] = True
ret['changes'] = {
'kubernetes.node_label_folder_absent': {
'new': new_labels, 'old': labels.keys()}}
ret['comment'] = 'Label folder removed from node'
return ret
def node_label_present(
name,
node,
value,
**kwargs):
'''
Ensures that the named label is set on the named node
with the given value.
If the label exists it will be replaced.
name
The name of the label.
value
Value of the label.
node
Node to change.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
labels = __salt__['kubernetes.node_labels'](node, **kwargs)
if name not in labels:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'The label is going to be set'
return ret
__salt__['kubernetes.node_add_label'](label_name=name,
label_value=value,
node_name=node,
**kwargs)
elif labels[name] == value:
ret['result'] = True
ret['comment'] = 'The label is already set and has the specified value'
return ret
else:
if __opts__['test']:
ret['result'] = None
return ret
ret['comment'] = 'The label is already set, changing the value'
__salt__['kubernetes.node_add_label'](
node_name=node,
label_name=name,
label_value=value,
**kwargs)
old_labels = copy.copy(labels)
labels[name] = value
ret['changes']['{0}.{1}'.format(node, name)] = {
'old': old_labels,
'new': labels}
ret['result'] = True
return ret

View file

@ -163,10 +163,11 @@ functions at once the following way:
- user: myuser
- opts: '--all'
By default this behaviour is not turned on. In ordder to do so, please add the following
By default this behaviour is not turned on. In order to do so, please add the following
configuration to the minion:
.. code-block:: yaml
use_superseded:
- module.run
@ -214,11 +215,12 @@ def wait(name, **kwargs):
watch = salt.utils.alias_function(wait, 'watch')
@with_deprecated(globals(), "Fluorine", policy=with_deprecated.OPT_IN)
@with_deprecated(globals(), "Sodium", policy=with_deprecated.OPT_IN)
def run(**kwargs):
'''
Run a single module function or a range of module functions in a batch.
Supersedes `module.run` function, which requires `m_` prefix to function-specific parameters.
Supersedes ``module.run`` function, which requires ``m_`` prefix to
function-specific parameters.
:param returner:
Specify a common returner for the whole batch to send the return data
@ -227,8 +229,9 @@ def run(**kwargs):
Pass any arguments needed to execute the function(s)
.. code-block:: yaml
some_id_of_state:
module.xrun:
module.run:
- network.ip_addrs:
- interface: eth0
- cloud.create:
@ -424,16 +427,30 @@ def _run(name, **kwargs):
ret['result'] = False
return ret
if aspec.varargs and aspec.varargs in kwargs:
varargs = kwargs.pop(aspec.varargs)
if aspec.varargs:
if aspec.varargs == 'name':
rarg = 'm_name'
elif aspec.varargs == 'fun':
rarg = 'm_fun'
elif aspec.varargs == 'names':
rarg = 'm_names'
elif aspec.varargs == 'state':
rarg = 'm_state'
elif aspec.varargs == 'saltenv':
rarg = 'm_saltenv'
else:
rarg = aspec.varargs
if not isinstance(varargs, list):
msg = "'{0}' must be a list."
ret['comment'] = msg.format(aspec.varargs)
ret['result'] = False
return ret
if rarg in kwargs:
varargs = kwargs.pop(rarg)
args.extend(varargs)
if not isinstance(varargs, list):
msg = "'{0}' must be a list."
ret['comment'] = msg.format(aspec.varargs)
ret['result'] = False
return ret
args.extend(varargs)
nkwargs = {}
if aspec.keywords and aspec.keywords in kwargs:

View file

@ -1871,7 +1871,7 @@ def downloaded(name,
ignore_epoch=None,
**kwargs):
'''
.. versionadded:: Oxygen
.. versionadded:: 2017.7.0
Ensure that the package is downloaded, and that it is the correct version
(if specified).
@ -2008,7 +2008,7 @@ def downloaded(name,
def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs):
'''
.. versionadded:: Oxygen
.. versionadded:: 2017.7.0
Ensure that packages related to certain advisory ids are installed.
@ -2088,7 +2088,7 @@ def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs):
def patch_downloaded(name, advisory_ids=None, **kwargs):
'''
.. versionadded:: Oxygen
.. versionadded:: 2017.7.0
Ensure that packages related to certain advisory ids are downloaded.

View file

@ -55,7 +55,7 @@ import sys
import salt.ext.six as six
def _present_test(user, name, enc, comment, options, source, config):
def _present_test(user, name, enc, comment, options, source, config, fingerprint_hash_type):
'''
Run checks for "present"
'''
@ -65,7 +65,8 @@ def _present_test(user, name, enc, comment, options, source, config):
user,
source,
config,
saltenv=__env__)
saltenv=__env__,
fingerprint_hash_type=fingerprint_hash_type)
if keys:
comment = ''
for key, status in six.iteritems(keys):
@ -111,7 +112,8 @@ def _present_test(user, name, enc, comment, options, source, config):
enc,
comment,
options,
config)
config=config,
fingerprint_hash_type=fingerprint_hash_type)
if check == 'update':
comment = (
'Key {0} for user {1} is set to be updated'
@ -128,7 +130,7 @@ def _present_test(user, name, enc, comment, options, source, config):
return result, comment
def _absent_test(user, name, enc, comment, options, source, config):
def _absent_test(user, name, enc, comment, options, source, config, fingerprint_hash_type):
'''
Run checks for "absent"
'''
@ -138,7 +140,8 @@ def _absent_test(user, name, enc, comment, options, source, config):
user,
source,
config,
saltenv=__env__)
saltenv=__env__,
fingerprint_hash_type=fingerprint_hash_type)
if keys:
comment = ''
for key, status in list(keys.items()):
@ -184,7 +187,8 @@ def _absent_test(user, name, enc, comment, options, source, config):
enc,
comment,
options,
config)
config=config,
fingerprint_hash_type=fingerprint_hash_type)
if check == 'update' or check == 'exists':
comment = ('Key {0} for user {1} is set for removal').format(name, user)
else:
@ -202,6 +206,7 @@ def present(
source='',
options=None,
config='.ssh/authorized_keys',
fingerprint_hash_type=None,
**kwargs):
'''
Verifies that the specified SSH key is present for the specified user
@ -243,6 +248,17 @@ def present(
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/authorized_keys". Token expansion %u and
%h for username and home path supported.
fingerprint_hash_type
The public key fingerprint hash type that the public key fingerprint
was originally hashed with. This defaults to ``md5`` if not specified.
.. versionadded:: 2016.11.7
.. note::
The default value of the ``fingerprint_hash_type`` will change to
``sha256`` in Salt Nitrogen.
'''
ret = {'name': name,
'changes': {},
@ -279,7 +295,7 @@ def present(
options or [],
source,
config,
)
fingerprint_hash_type)
return ret
# Get only the path to the file without env referrences to check if exists
@ -305,10 +321,11 @@ def present(
data = __salt__['ssh.set_auth_key_from_file'](
user,
source,
config,
saltenv=__env__)
config=config,
saltenv=__env__,
fingerprint_hash_type=fingerprint_hash_type)
else:
# Split keyline to get key und commen
# Split keyline to get key und comment
keyline = keyline.split(' ')
key_type = keyline[0]
key_value = keyline[1]
@ -316,18 +333,20 @@ def present(
data = __salt__['ssh.set_auth_key'](
user,
key_value,
key_type,
key_comment,
options or [],
config)
enc=key_type,
comment=key_comment,
options=options or [],
config=config,
fingerprint_hash_type=fingerprint_hash_type)
else:
data = __salt__['ssh.set_auth_key'](
user,
name,
enc,
comment,
options or [],
config)
enc=enc,
comment=comment,
options=options or [],
config=config,
fingerprint_hash_type=fingerprint_hash_type)
if data == 'replace':
ret['changes'][name] = 'Updated'
@ -369,7 +388,8 @@ def absent(name,
comment='',
source='',
options=None,
config='.ssh/authorized_keys'):
config='.ssh/authorized_keys',
fingerprint_hash_type=None):
'''
Verifies that the specified SSH key is absent
@ -401,6 +421,17 @@ def absent(name,
directory, defaults to ".ssh/authorized_keys". Token expansion %u and
%h for username and home path supported.
fingerprint_hash_type
The public key fingerprint hash type that the public key fingerprint
was originally hashed with. This defaults to ``md5`` if not specified.
.. versionadded:: 2016.11.7
.. note::
The default value of the ``fingerprint_hash_type`` will change to
``sha256`` in Salt Nitrogen.
'''
ret = {'name': name,
'changes': {},
@ -416,7 +447,7 @@ def absent(name,
options or [],
source,
config,
)
fingerprint_hash_type)
return ret
# Extract Key from file if source is present
@ -434,13 +465,15 @@ def absent(name,
ret['comment'] = __salt__['ssh.rm_auth_key_from_file'](user,
source,
config,
saltenv=__env__)
saltenv=__env__,
fingerprint_hash_type=fingerprint_hash_type)
else:
# Split keyline to get key
keyline = keyline.split(' ')
ret['comment'] = __salt__['ssh.rm_auth_key'](user,
keyline[1],
config)
config=config,
fingerprint_hash_type=fingerprint_hash_type)
else:
# Get just the key
sshre = re.compile(r'^(.*?)\s?((?:ssh\-|ecds)[\w-]+\s.+)$')
@ -461,7 +494,10 @@ def absent(name,
name = comps[1]
if len(comps) == 3:
comment = comps[2]
ret['comment'] = __salt__['ssh.rm_auth_key'](user, name, config)
ret['comment'] = __salt__['ssh.rm_auth_key'](user,
name,
config=config,
fingerprint_hash_type=fingerprint_hash_type)
if ret['comment'] == 'User authorized keys file not present':
ret['result'] = False

View file

@ -30,8 +30,8 @@ try:
# installation time.
import salt._syspaths as __generated_syspaths # pylint: disable=no-name-in-module
except ImportError:
import imp
__generated_syspaths = imp.new_module('salt._syspaths')
import types
__generated_syspaths = types.ModuleType('salt._syspaths')
for key in ('ROOT_DIR', 'CONFIG_DIR', 'CACHE_DIR', 'SOCK_DIR',
'SRV_ROOT_DIR', 'BASE_FILE_ROOTS_DIR',
'BASE_PILLAR_ROOTS_DIR', 'BASE_THORIUM_ROOTS_DIR',

View file

@ -611,49 +611,61 @@ class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.tra
Handle incoming messages from underylying tcp streams
'''
try:
payload = self._decode_payload(payload)
except Exception:
stream.write(salt.transport.frame.frame_msg('bad load', header=header))
raise tornado.gen.Return()
try:
payload = self._decode_payload(payload)
except Exception:
stream.write(salt.transport.frame.frame_msg('bad load', header=header))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
yield stream.write(salt.transport.frame.frame_msg(
'payload and load must be a dict', header=header))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
yield stream.write(salt.transport.frame.frame_msg(
'payload and load must be a dict', header=header))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
yield stream.write(salt.transport.frame.frame_msg(
self._auth(payload['load']), header=header))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
yield stream.write(salt.transport.frame.frame_msg(
self._auth(payload['load']), header=header))
raise tornado.gen.Return()
# TODO: test
try:
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.write('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
stream.close()
raise tornado.gen.Return()
# TODO: test
try:
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.write('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
stream.close()
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.write(salt.transport.frame.frame_msg(ret, header=header))
elif req_fun == 'send':
stream.write(salt.transport.frame.frame_msg(self.crypticle.dumps(ret), header=header))
elif req_fun == 'send_private':
stream.write(salt.transport.frame.frame_msg(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
), header=header))
else:
log.error('Unknown req_fun {0}'.format(req_fun))
# always attempt to return an error to the minion
stream.write('Server-side exception handling payload')
stream.close()
except tornado.gen.Return:
raise
except tornado.iostream.StreamClosedError:
# Stream was closed. This could happen if the remote side
# closed the connection on its end (eg in a timeout or shutdown
# situation).
log.error('Connection was unexpectedly closed', exc_info=True)
except Exception as exc: # pylint: disable=broad-except
# Absorb any other exceptions
log.error('Unexpected exception occurred: {0}'.format(exc), exc_info=True)
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.write(salt.transport.frame.frame_msg(ret, header=header))
elif req_fun == 'send':
stream.write(salt.transport.frame.frame_msg(self.crypticle.dumps(ret), header=header))
elif req_fun == 'send_private':
stream.write(salt.transport.frame.frame_msg(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
), header=header))
else:
log.error('Unknown req_fun {0}'.format(req_fun))
# always attempt to return an error to the minion
stream.write('Server-side exception handling payload')
stream.close()
raise tornado.gen.Return()

View file

@ -12,7 +12,6 @@ import datetime
import errno
import fnmatch
import hashlib
import imp
import json
import logging
import numbers
@ -44,6 +43,10 @@ from salt.ext.six.moves import zip
from stat import S_IMODE
# pylint: enable=import-error,redefined-builtin
if six.PY3:
import importlib.util # pylint: disable=no-name-in-module,import-error
else:
import imp
try:
import cProfile
@ -808,8 +811,8 @@ def dns_check(addr, port, safe=False, ipv6=None):
if h[0] != socket.AF_INET6 or ipv6 is not None:
candidates.append(candidate_addr)
s = socket.socket(h[0], socket.SOCK_STREAM)
try:
s = socket.socket(h[0], socket.SOCK_STREAM)
s.connect((candidate_addr.strip('[]'), port))
s.close()
@ -852,7 +855,11 @@ def required_module_list(docstring=None):
modules = parse_docstring(docstring).get('deps', [])
for mod in modules:
try:
imp.find_module(mod)
if six.PY3:
if importlib.util.find_spec(mod) is None: # pylint: disable=no-member
ret.append(mod)
else:
imp.find_module(mod)
except ImportError:
ret.append(mod)
return ret

View file

@ -98,5 +98,7 @@ class SyncWrapper(object):
# Other things should be deallocated after the io_loop closes.
# See Issue #26889.
del self.async
else:
del self.io_loop
elif hasattr(self, 'io_loop'):
self.io_loop.close()
del self.io_loop

View file

@ -179,8 +179,9 @@ def get_connection(service, module=None, region=None, key=None, keyid=None,
'''
module = module or service
module, submodule = ('boto.' + module).rsplit('.', 1)
svc_mod = __import__('boto.' + module, fromlist=[module])
svc_mod = getattr(__import__(module, fromlist=[submodule]), submodule)
cxkey, region, key, keyid = _get_profile(service, region, key,
keyid, profile)

View file

@ -125,6 +125,22 @@ class ContextDict(collections.MutableMapping):
else:
return iter(self.global_data)
def __copy__(self):
new_obj = type(self)(threadsafe=self._threadsafe)
if self.active:
new_obj.global_data = copy.copy(self._state.data)
else:
new_obj.global_data = copy.copy(self.global_data)
return new_obj
def __deepcopy__(self, memo):
new_obj = type(self)(threadsafe=self._threadsafe)
if self.active:
new_obj.global_data = copy.deepcopy(self._state.data, memo)
else:
new_obj.global_data = copy.deepcopy(self.global_data, memo)
return new_obj
class ChildContextDict(collections.MutableMapping):
'''An overrideable child of ContextDict

View file

@ -245,6 +245,14 @@ def memoize(func):
@wraps(func)
def _memoize(*args, **kwargs):
str_args = []
for arg in args:
if not isinstance(arg, six.string_types):
str_args.append(str(arg))
else:
str_args.append(arg)
args = str_args
args_ = ','.join(list(args) + ['{0}={1}'.format(k, kwargs[k]) for k in sorted(kwargs)])
if args_ not in cache:
cache[args_] = func(*args, **kwargs)

View file

@ -164,6 +164,9 @@ class EtcdClient(object):
except ValueError:
return {}
if result is None:
return {}
if recurse:
ret['key'] = getattr(result, 'key', None)
ret['value'] = getattr(result, 'value', None)

View file

@ -10,6 +10,7 @@ import inspect
import logging
from salt.utils.odict import OrderedDict
from salt.utils.schema import Prepareable
import salt.ext.six as six
REQUISITES = ('listen', 'onchanges', 'onfail', 'require', 'watch', 'use', 'listen_in', 'onchanges_in', 'onfail_in', 'require_in', 'watch_in', 'use_in')
@ -288,20 +289,30 @@ class SaltObject(object):
return __wrapper__()
class MapMeta(type):
class MapMeta(six.with_metaclass(Prepareable, type)):
'''
This is the metaclass for our Map class, used for building data maps based
off of grain data.
'''
@classmethod
def __prepare__(metacls, name, bases):
return OrderedDict()
def __new__(cls, name, bases, attrs):
c = type.__new__(cls, name, bases, attrs)
c.__ordered_attrs__ = attrs.keys()
return c
def __init__(cls, name, bases, nmspc):
cls.__set_attributes__()
super(MapMeta, cls).__init__(name, bases, nmspc)
def __set_attributes__(cls):
match_groups = OrderedDict([])
match_info = []
grain_targets = set()
# find all of our filters
for item in cls.__dict__:
for item in cls.__ordered_attrs__:
if item[0] == '_':
continue
@ -313,8 +324,7 @@ class MapMeta(type):
# which grain are we filtering on
grain = getattr(filt, '__grain__', 'os_family')
if grain not in match_groups:
match_groups[grain] = OrderedDict([])
grain_targets.add(grain)
# does the object pointed to have a __match__ attribute?
# if so use it, otherwise use the name of the object
@ -325,19 +335,22 @@ class MapMeta(type):
else:
match = item
match_groups[grain][match] = OrderedDict([])
match_attrs = {}
for name in filt.__dict__:
if name[0] == '_':
continue
if name[0] != '_':
match_attrs[name] = filt.__dict__[name]
match_groups[grain][match][name] = filt.__dict__[name]
match_info.append((grain, match, match_attrs))
# Check for matches and update the attrs dict accordingly
attrs = {}
for grain in match_groups:
filtered = Map.__salt__['grains.filter_by'](match_groups[grain],
grain=grain)
if filtered:
attrs.update(filtered)
if match_info:
grain_vals = Map.__salt__['grains.item'](*grain_targets)
for grain, match, match_attrs in match_info:
if grain not in grain_vals:
continue
if grain_vals[grain] == match:
attrs.update(match_attrs)
if hasattr(cls, 'merge'):
pillar = Map.__salt__['pillar.get'](cls.merge)

View file

@ -885,9 +885,10 @@ class Schedule(object):
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master
# if the scheduled job is running on a minion.
if '__role' in self.opts and self.opts['__role'] == 'minion':
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
@ -915,6 +916,7 @@ class Schedule(object):
log.exception("Unhandled exception firing event: {0}".format(exc))
log.debug('schedule.handle_func: Removing {0}'.format(proc_fn))
try:
os.unlink(proc_fn)
except OSError as exc:

View file

@ -8,6 +8,7 @@ Utility functions for SMB connections
from __future__ import absolute_import
# Import python libs
import salt.utils
import logging
log = logging.getLogger(__name__)
@ -96,3 +97,24 @@ def put_str(content, path, share='C$', conn=None, host=None, username=None, pass
fh_ = StrHandle(content)
conn.putFile(share, path, fh_.string)
def put_file(local_path, path, share='C$', conn=None, host=None, username=None, password=None):
'''
Wrapper around impacket.smbconnection.putFile() that allows a file to be
uploaded
Example usage:
import salt.utils.smb
smb_conn = salt.utils.smb.get_conn('10.0.0.45', 'vagrant', 'vagrant')
salt.utils.smb.put_file('/root/test.pdf', 'temp\\myfiles\\test1.pdf', conn=smb_conn)
'''
if conn is None:
conn = get_conn(host, username, password)
if conn is False:
return False
with salt.utils.fopen(local_path, 'rb') as fh_:
conn.putFile(share, path, fh_.read)

View file

@ -8,7 +8,6 @@ from __future__ import absolute_import
# Import python libs
import codecs
import os
import imp
import logging
import tempfile
import traceback
@ -19,6 +18,14 @@ import jinja2
import jinja2.ext
import salt.ext.six as six
if sys.version_info[:2] >= (3, 5):
import importlib.machinery # pylint: disable=no-name-in-module,import-error
import importlib.util # pylint: disable=no-name-in-module,import-error
USE_IMPORTLIB = True
else:
import imp
USE_IMPORTLIB = False
# Import salt libs
import salt.utils
import salt.utils.http
@ -483,10 +490,22 @@ def py(sfn, string=False, **kwargs): # pylint: disable=C0103
if not os.path.isfile(sfn):
return {}
mod = imp.load_source(
os.path.basename(sfn).split('.')[0],
sfn
)
base_fname = os.path.basename(sfn)
name = base_fname.split('.')[0]
if USE_IMPORTLIB:
# pylint: disable=no-member
loader = importlib.machinery.SourceFileLoader(name, sfn)
spec = importlib.util.spec_from_file_location(name, sfn, loader=loader)
if spec is None:
raise ImportError()
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[name] = mod
else:
mod = imp.load_source(name, sfn)
# File templates need these set as __var__
if '__env__' not in kwargs and 'saltenv' in kwargs:
setattr(mod, '__env__', kwargs['saltenv'])

View file

@ -15,6 +15,8 @@ except ImportError:
from yaml import Dumper
from yaml import SafeDumper
import yaml
import collections
from salt.utils.odict import OrderedDict
try:
@ -44,6 +46,23 @@ def represent_ordereddict(dumper, data):
OrderedDumper.add_representer(OrderedDict, represent_ordereddict)
SafeOrderedDumper.add_representer(OrderedDict, represent_ordereddict)
OrderedDumper.add_representer(
collections.defaultdict,
yaml.representer.SafeRepresenter.represent_dict
)
SafeOrderedDumper.add_representer(
collections.defaultdict,
yaml.representer.SafeRepresenter.represent_dict
)
if HAS_IOFLO:
OrderedDumper.add_representer(odict, represent_ordereddict)
SafeOrderedDumper.add_representer(odict, represent_ordereddict)
def safe_dump(data, stream=None, **kwargs):
'''
Use a custom dumper to ensure that defaultdict and OrderedDict are
represented properly
'''
return yaml.dump(data, stream, Dumper=SafeOrderedDumper, **kwargs)

View file

@ -7,6 +7,7 @@ Set up the version of Salt
from __future__ import absolute_import, print_function
import re
import sys
import locale
import platform
# linux_distribution depreacted in py3.7
@ -97,6 +98,7 @@ class SaltStackVersion(object):
'Oxygen' : (MAX_SIZE - 101, 0),
'Fluorine' : (MAX_SIZE - 100, 0),
'Neon' : (MAX_SIZE - 99, 0),
'Sodium' : (MAX_SIZE - 98, 0),
# pylint: disable=E8265
#'Sodium' : (MAX_SIZE - 98, 0),
#'Magnesium' : (MAX_SIZE - 97, 0),
@ -672,6 +674,7 @@ def system_information():
('release', release),
('machine', platform.machine()),
('version', version),
('locale', locale.getpreferredencoding()),
]
for name, attr in system:

View file

@ -0,0 +1,26 @@
#!/bin/bash
#
# Author: Bo Maryniuk <bo@suse.de>
# Requires: yum install propcps
#
# Runs every minute from crontab,
# checks salt-minion every 10 seconds.
#
# Use this with a following crontab:
# * * * * * /path/to/this/script
if [ "$1" != "--with-init" ]; then
echo "This command is not used directly."
exit 1;
fi
SHELL=/bin/sh
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
for iter in {1..5}; do
if [[ $(pgrep salt-minion) == "" ]]; then
service salt-minion restart
fi
sleep 10;
done
true

View file

@ -0,0 +1,20 @@
## What it is
Plugin which provides a notification mechanism to Salt, if Yum is
used outside of it.
## Installation
Configuration files are going to:
`/etc/yum/pluginconf.d/[name].conf`
Plugin itself goes to:
`/usr/share/yum-plugins/[name].conf`
## Permissions
User: root
Group: root
Mode: 644

View file

@ -0,0 +1,2 @@
[main]
enabled=1

View file

@ -0,0 +1,55 @@
# Copyright (c) 2016 SUSE Linux LLC
# All Rights Reserved.
#
# Author: Bo Maryniuk <bo@suse.de>
from yum.plugins import TYPE_CORE
from yum import config
import os
import hashlib
CK_PATH = "/var/cache/salt/minion/rpmdb.cookie"
RPM_PATH = "/var/lib/rpm/Packages"
requires_api_version = '2.5'
plugin_type = TYPE_CORE
def _get_mtime():
"""
Get the modified time of the RPM Database.
Returns:
Unix ticks
"""
return os.path.exists(RPM_PATH) and int(os.path.getmtime(RPM_PATH)) or 0
def _get_checksum():
"""
Get the checksum of the RPM Database.
Returns:
hexdigest
"""
digest = hashlib.md5()
with open(RPM_PATH, "rb") as rpm_db_fh:
while True:
buff = rpm_db_fh.read(0x1000)
if not buff:
break
digest.update(buff)
return digest.hexdigest()
def posttrans_hook(conduit):
"""
Hook after the package installation transaction.
:param conduit:
:return:
"""
# Integrate Yum with Salt
if 'SALT_RUNNING' not in os.environ:
with open(CK_PATH, 'w') as ck_fh:
ck_fh.write('{chksum} {mtime}\n'.format(chksum=_get_checksum(), mtime=_get_mtime()))

View file

@ -0,0 +1,3 @@
# Zypper plugins
Plugins here are required to interact with SUSE Manager in conjunction of SaltStack and Zypper.

View file

@ -0,0 +1,59 @@
#!/usr/bin/python
#
# Copyright (c) 2016 SUSE Linux LLC
# All Rights Reserved.
#
# Author: Bo Maryniuk <bo@suse.de>
import sys
import os
import hashlib
from zypp_plugin import Plugin
class DriftDetector(Plugin):
"""
Return diff of the installed packages outside the Salt.
"""
def __init__(self):
Plugin.__init__(self)
self.ck_path = "/var/cache/salt/minion/rpmdb.cookie"
self.rpm_path = "/var/lib/rpm/Packages"
def _get_mtime(self):
'''
Get the modified time of the RPM Database.
Returns:
Unix ticks
'''
return os.path.exists(self.rpm_path) and int(os.path.getmtime(self.rpm_path)) or 0
def _get_checksum(self):
'''
Get the checksum of the RPM Database.
Returns:
hexdigest
'''
digest = hashlib.md5()
with open(self.rpm_path, "rb") as rpm_db_fh:
while True:
buff = rpm_db_fh.read(0x1000)
if not buff:
break
digest.update(buff)
return digest.hexdigest()
def PLUGINEND(self, headers, body):
"""
Hook when plugin closes Zypper's transaction.
"""
if 'SALT_RUNNING' not in os.environ:
with open(self.ck_path, 'w') as ck_fh:
ck_fh.write('{chksum} {mtime}\n'.format(chksum=self._get_checksum(), mtime=self._get_mtime()))
self.ack()
DriftDetector().main()

View file

@ -697,8 +697,7 @@ class Clean(clean):
INSTALL_VERSION_TEMPLATE = '''\
# This file was auto-generated by salt's setup on \
{date:%A, %d %B %Y @ %H:%m:%S UTC}.
# This file was auto-generated by salt's setup
from salt.version import SaltStackVersion

View file

@ -133,3 +133,14 @@ class RunnerModuleTest(TestCase, AdaptedConfigurationTestCaseMixin):
'quuz': 'on',
},
})
def test_invalid_kwargs_are_ignored(self):
low = {
'client': 'runner',
'fun': 'test.metasyntactic',
'thiskwargisbad': 'justpretendimnothere',
}
low.update(self.eauth_creds)
ret = self.runner.cmd_sync(low)
self.assertEqual(ret[0], 'foo')

View file

@ -10,7 +10,7 @@ import random
import string
# Import Salt Libs
from salt.config import cloud_providers_config
from salt.config import cloud_providers_config, cloud_config
# Import Salt Testing LIbs
from tests.support.case import ShellCase
@ -90,12 +90,25 @@ class VMWareTest(ShellCase):
Tests creating and deleting an instance on vmware and installing salt
'''
# create the instance
profile = os.path.join(
FILES,
'conf',
'cloud.profiles.d',
PROVIDER_NAME + '.conf'
)
profile_config = cloud_config(profile)
disk_datastore = profile_config['vmware-test']['devices']['disk']['Hard disk 2']['datastore']
instance = self.run_cloud('-p vmware-test {0}'.format(INSTANCE_NAME), timeout=TIMEOUT)
ret_str = '{0}:'.format(INSTANCE_NAME)
disk_datastore_str = ' [{0}] {1}/Hard disk 2-flat.vmdk'.format(disk_datastore, INSTANCE_NAME)
# check if instance returned with salt installed
try:
self.assertIn(ret_str, instance)
self.assertIn(disk_datastore_str, instance,
msg='Hard Disk 2 did not use the Datastore {0} '.format(disk_datastore))
except AssertionError:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT)
raise

View file

@ -7,6 +7,8 @@ vmware-test:
disk:
Hard disk 1:
size: 30
Hard disk 2:
size: 5
datastore: ''
resourcepool: ''
datastore: ''

View file

@ -23,6 +23,7 @@ from multiprocessing import Queue
import msgpack
# Import Salt libs
import salt.ext.six as six
import salt.log.setup
log = logging.getLogger(__name__)
@ -33,6 +34,8 @@ __virtualname__ = 'runtests_log_handler'
def __virtual__():
if 'runtests_log_port' not in __opts__:
return False, "'runtests_log_port' not in options"
if six.PY3:
return False, "runtests external logging handler is temporarily disabled for Python 3 tests"
return True

View file

@ -58,7 +58,7 @@ class BeaconsTest(ModuleCase):
@classmethod
def tearDownClass(cls):
if os.path.isfile(cls.beacons_config_file_path):
if cls.beacons_config_file_path and os.path.isfile(cls.beacons_config_file_path):
os.unlink(cls.beacons_config_file_path)
def setUp(self):

View file

@ -258,7 +258,7 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
keys = ret.keys()
self.assertIn('rpm', keys)
self.assertIn('yum', keys)
elif os_family == 'SUSE':
elif os_family == 'Suse':
ret = self.run_function(func, ['less', 'zypper'])
keys = ret.keys()
self.assertIn('less', keys)

View file

@ -448,7 +448,7 @@ class SaltTestsuiteParser(SaltCoverageTestingParser):
if self.options.coverage and any((
self.options.name,
is_admin,
not is_admin,
not self.options.run_destructive)) \
and self._check_enabled_suites(include_unit=True):
self.error(

View file

@ -177,6 +177,7 @@ class TestCase(_TestCase):
def run(self, result=None):
self._prerun_instance_attributes = dir(self)
self.maxDiff = None
outcome = super(TestCase, self).run(result=result)
for attr in dir(self):
if attr == '_prerun_instance_attributes':

View file

@ -18,12 +18,19 @@ from tests.support.mock import (
# Import Salt libs
import salt.config
import salt.utils
MOCK_MASTER_DEFAULT_OPTS = {
'log_file': '/var/log/salt/master',
'pidfile': '/var/run/salt-master.pid',
'root_dir': '/'
}
if salt.utils.is_windows():
MOCK_MASTER_DEFAULT_OPTS = {
'log_file': 'c:\\salt\\var\\log\\salt\\master',
'pidfile': 'c:\\salt\\var\\run\\salt-master.pid',
'root_dir': 'c:\\salt'
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
@ -46,8 +53,13 @@ class APIConfigTestCase(TestCase):
the DEFAULT_API_OPTS 'api_logfile' value.
'''
with patch('salt.config.client_config', MagicMock(return_value=MOCK_MASTER_DEFAULT_OPTS)):
expected = '/var/log/salt/api'
if salt.utils.is_windows():
expected = 'c:\\salt\\var\\log\\salt\\api'
ret = salt.config.api_config('/some/fake/path')
self.assertEqual(ret['log_file'], '/var/log/salt/api')
self.assertEqual(ret['log_file'], expected)
def test_api_config_pidfile_values(self):
'''
@ -56,8 +68,13 @@ class APIConfigTestCase(TestCase):
the DEFAULT_API_OPTS 'api_pidfile' value.
'''
with patch('salt.config.client_config', MagicMock(return_value=MOCK_MASTER_DEFAULT_OPTS)):
expected = '/var/run/salt-api.pid'
if salt.utils.is_windows():
expected = 'c:\\salt\\var\\run\\salt-api.pid'
ret = salt.config.api_config('/some/fake/path')
self.assertEqual(ret['pidfile'], '/var/run/salt-api.pid')
self.assertEqual(ret['pidfile'], expected)
@destructiveTest
def test_master_config_file_overrides_defaults(self):
@ -68,6 +85,10 @@ class APIConfigTestCase(TestCase):
'''
foo_dir = '/foo/bar/baz'
hello_dir = '/hello/world'
if salt.utils.is_windows():
foo_dir = 'c:\\foo\\bar\\baz'
hello_dir = 'c:\\hello\\world'
mock_master_config = {
'api_pidfile': foo_dir,
'api_logfile': hello_dir,
@ -98,6 +119,11 @@ class APIConfigTestCase(TestCase):
mock_master_config = MOCK_MASTER_DEFAULT_OPTS.copy()
mock_master_config['root_dir'] = '/mock/root/'
if salt.utils.is_windows():
mock_log = 'c:\\mock\\root\\var\\log\\salt\\api'
mock_pid = 'c:\\mock\\root\\var\\run\\salt-api.pid'
mock_master_config['root_dir'] = 'c:\\mock\\root'
with patch('salt.config.client_config',
MagicMock(return_value=mock_master_config)):
ret = salt.config.api_config('/some/fake/path')

View file

@ -106,17 +106,26 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
def test_proper_path_joining(self):
fpath = tempfile.mktemp()
temp_config = 'root_dir: /\n'\
'key_logfile: key\n'
if salt.utils.is_windows():
temp_config = 'root_dir: c:\\\n'\
'key_logfile: key\n'
try:
with salt.utils.fopen(fpath, 'w') as fp_:
fp_.write(
'root_dir: /\n'
'key_logfile: key\n'
)
fp_.write(temp_config)
config = sconfig.master_config(fpath)
expect_path_join = os.path.join('/', 'key')
expect_sep_join = '//key'
if salt.utils.is_windows():
expect_path_join = os.path.join('c:\\', 'key')
expect_sep_join = 'c:\\\\key'
# os.path.join behavior
self.assertEqual(config['key_logfile'], os.path.join('/', 'key'))
self.assertEqual(config['key_logfile'], expect_path_join)
# os.sep.join behavior
self.assertNotEqual(config['key_logfile'], '//key')
self.assertNotEqual(config['key_logfile'], expect_sep_join)
finally:
if os.path.isfile(fpath):
os.unlink(fpath)
@ -157,6 +166,9 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
@skipIf(
salt.utils.is_windows(),
'You can\'t set an environment dynamically in Windows')
def test_load_master_config_from_environ_var(self):
original_environ = os.environ.copy()
@ -201,6 +213,9 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
@skipIf(
salt.utils.is_windows(),
'You can\'t set an environment dynamically in Windows')
def test_load_minion_config_from_environ_var(self):
original_environ = os.environ.copy()
@ -589,6 +604,9 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
search_paths = sconfig.cloud_config('/etc/salt/cloud').get('deploy_scripts_search_path')
etc_deploy_path = '/salt/cloud.deploy.d'
deploy_path = '/salt/cloud/deploy'
if salt.utils.is_windows():
etc_deploy_path = '/salt\\cloud.deploy.d'
deploy_path = '\\salt\\cloud\\deploy'
# Check cloud.deploy.d path is the first element in the search_paths tuple
self.assertTrue(search_paths[0].endswith(etc_deploy_path))
@ -1007,6 +1025,9 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
# other cloud configuration tests
@skipIf(
salt.utils.is_windows(),
'You can\'t set an environment dynamically in Windows')
def test_load_cloud_config_from_environ_var(self):
original_environ = os.environ.copy()

View file

@ -15,6 +15,7 @@ from tests.support.mock import (
# Import Salt Libs
import salt.modules.augeas_cfg as augeas_cfg
from salt.exceptions import SaltInvocationError
import salt.ext.six as six
# Make sure augeas python interface is installed
if augeas_cfg.HAS_AUGEAS:
from augeas import Augeas as _Augeas
@ -26,7 +27,7 @@ class AugeasCfgTestCase(TestCase):
Test cases for salt.modules.augeas_cfg
'''
# 'execute' function tests: 3
@skipIf(six.PY3, 'Disabled pending https://github.com/hercules-team/python-augeas/issues/30')
def test_execute(self):
'''
Test if it execute Augeas commands

View file

@ -45,7 +45,6 @@ except ImportError:
# Import Salt Libs
import salt.config
import salt.ext.six as six
import salt.loader
import salt.modules.boto_elb as boto_elb
@ -67,7 +66,6 @@ instance_parameters = {'instance_type': 't1.micro'}
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(six.PY3, 'Running tests with Python 3. These tests need to be rewritten to support Py3.')
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(HAS_MOTO is False, 'The moto module must be installed.')
class BotoElbTestCase(TestCase, LoaderModuleMockMixin):

View file

@ -124,12 +124,22 @@ class DiskTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(disk.__salt__, {'cmd.run': mock}):
mock_dump = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch('salt.modules.disk.dump', mock_dump):
kwargs = {'read-ahead': 512, 'filesystem-read-ahead': 512}
kwargs = {'read-ahead': 512, 'filesystem-read-ahead': 1024}
disk.tune('/dev/sda', **kwargs)
mock.assert_called_once_with(
'blockdev --setra 512 --setfra 512 /dev/sda',
python_shell=False
)
mock.assert_called_once()
args, kwargs = mock.call_args
# Assert called once with either 'blockdev --setra 512 --setfra 512 /dev/sda' or
# 'blockdev --setfra 512 --setra 512 /dev/sda' and python_shell=False kwarg.
self.assertEqual(len(args), 1)
self.assertTrue(args[0].startswith('blockdev '))
self.assertTrue(args[0].endswith(' /dev/sda'))
self.assertIn(' --setra 512 ', args[0])
self.assertIn(' --setfra 1024 ', args[0])
self.assertEqual(len(args[0].split()), 6)
self.assertEqual(kwargs, {'python_shell': False})
@skipIf(not salt.utils.which('sync'), 'sync not found')
@skipIf(not salt.utils.which('mkfs'), 'mkfs not found')

View file

@ -189,7 +189,7 @@ class DjangomodCliCommandTestCase(TestCase, LoaderModuleMockMixin):
djangomod.createsuperuser(
'settings.py', 'testuser', 'user@example.com'
)
mock.assert_called_once()
self.assertEqual(mock.call_count, 1)
args, kwargs = mock.call_args
# cmdline arguments are extracted from a kwargs dict so order isn't guaranteed.
self.assertEqual(len(args), 1)

View file

@ -126,7 +126,7 @@ gcc-6-base:i386
inspector.grains_core.os_data = MagicMock()
inspector.grains_core.os_data().get = MagicMock(return_value='Debian')
self.assertEqual(inspector._get_cfg_pkgs(), 'dpkg')
inspector.grains_core.os_data().get = MagicMock(return_value='SUSE')
inspector.grains_core.os_data().get = MagicMock(return_value='Suse')
self.assertEqual(inspector._get_cfg_pkgs(), 'rpm')
inspector.grains_core.os_data().get = MagicMock(return_value='redhat')
self.assertEqual(inspector._get_cfg_pkgs(), 'rpm')

View file

@ -28,7 +28,7 @@ except ImportError:
import salt.modules.junos as junos
@skipIf(not HAS_JUNOS, 'Missing dependencies')
@skipIf(not HAS_JUNOS, 'Install junos-eznc to be able to run this test.')
class Test_Junos_Module(TestCase, LoaderModuleMockMixin, XMLEqualityMixin):
def setup_loader_modules(self):

View file

@ -18,9 +18,6 @@ from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
# Import salt libs
import salt.modules.mdadm as mdadm
# Import 3rd-party libs
import salt.ext.six as six
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MdadmTestCase(TestCase, LoaderModuleMockMixin):
@ -40,32 +37,29 @@ class MdadmTestCase(TestCase, LoaderModuleMockMixin):
chunk=256
)
self.assertEqual('salt', ret)
if six.PY2:
expected_args = [
'mdadm',
'-C', '/dev/md0',
'-R',
'-v',
'--chunk', '256',
'--force',
'-l', '5',
'-e', 'default',
'-n', '3',
'/dev/sdb1', '/dev/sdc1', '/dev/sdd1']
else:
expected_args = [
'mdadm',
'-C', '/dev/md0',
'-R',
'-v',
'--force',
'--chunk', '256',
'-l', '5',
'-e', 'default',
'-n', '3',
'/dev/sdb1', '/dev/sdc1', '/dev/sdd1'
]
mock.assert_called_once_with(expected_args, python_shell=False)
# Only available in 3.6 and above on py3
if hasattr(mock, 'assert_called_once'):
mock.assert_called_once()
args, kwargs = mock.call_args
# expected cmd is
# mdadm -C /dev/md0 -R -v --chunk 256 --force -l 5 -e default -n 3 /dev/sdb1 /dev/sdc1 /dev/sdd1
# where args between -v and -l could be in any order
self.assertEqual(len(args), 1)
self.assertEqual(len(args[0]), 17)
self.assertEqual(args[0][:5], [
'mdadm',
'-C', '/dev/md0',
'-R',
'-v'])
self.assertEqual(args[0][8:], [
'-l', '5',
'-e', 'default',
'-n', '3',
'/dev/sdb1', '/dev/sdc1', '/dev/sdd1'])
self.assertEqual(sorted(args[0][5:8]), sorted(['--chunk', '256', '--force']))
self.assertIn('--chunk 256', ' '.join(args[0][5:8]))
self.assertEqual(kwargs, {'python_shell': False})
def test_create_test_mode(self):
mock = MagicMock()

View file

@ -92,7 +92,7 @@ class SSHAuthKeyTestCase(TestCase, LoaderModuleMockMixin):
empty_line = '\n'
comment_line = '# this is a comment \n'
# Write out the authorized key to a temporary file
temp_file = tempfile.NamedTemporaryFile(delete=False, mode='w+')
temp_file = tempfile.NamedTemporaryFile(delete=False)
# Add comment
temp_file.write(comment_line)
# Add empty line for #41335

View file

@ -20,6 +20,7 @@ from tests.support.mock import (
# Import Salt Libs
import salt.utils
import salt.utils.odict
import salt.modules.state as state
from salt.exceptions import SaltInvocationError
@ -37,7 +38,11 @@ class MockState(object):
'''
flag = None
def __init__(self, opts, pillar=False, pillar_enc=None):
def __init__(self,
opts,
pillar_override=False,
pillar_enc=None,
initial_pillar=None):
pass
def verify_data(self, data):
@ -135,10 +140,10 @@ class MockState(object):
opts = {'state_top': '',
'pillar': {}}
def __init__(self, opts, pillar=None, *args, **kwargs):
self.building_highstate = {}
def __init__(self, opts, pillar_override=None, *args, **kwargs):
self.building_highstate = salt.utils.odict.OrderedDict
self.state = MockState.State(opts,
pillar=pillar)
pillar_override=pillar_override)
def render_state(self, sls, saltenv, mods, matches, local=False):
'''
@ -342,7 +347,15 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
patcher = patch('salt.modules.state.salt.state', MockState())
patcher.start()
self.addCleanup(patcher.stop)
return {state: {'__opts__': {'cachedir': '/D'}}}
return {
state: {
'__opts__': {
'cachedir': '/D',
'environment': None,
'__cli': 'salt',
},
},
}
def test_running(self):
'''
@ -605,7 +618,10 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
self.assertEqual(state.sls_id("apache", "http"), "A")
with patch.dict(state.__opts__, {"test": "A"}):
mock = MagicMock(return_value={'test': True})
mock = MagicMock(
return_value={'test': True,
'environment': None}
)
with patch.object(state, '_get_opts', mock):
mock = MagicMock(return_value=True)
with patch.object(salt.utils, 'test_mode', mock):
@ -629,7 +645,10 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
self.assertEqual(state.show_low_sls("foo"), "A")
with patch.dict(state.__opts__, {"test": "A"}):
mock = MagicMock(return_value={'test': True})
mock = MagicMock(
return_value={'test': True,
'environment': None}
)
with patch.object(state, '_get_opts', mock):
MockState.State.flag = True
MockState.HighState.flag = True
@ -648,7 +667,10 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
self.assertEqual(state.show_sls("foo"), "A")
with patch.dict(state.__opts__, {"test": "A"}):
mock = MagicMock(return_value={'test': True})
mock = MagicMock(
return_value={'test': True,
'environment': None}
)
with patch.object(state, '_get_opts', mock):
mock = MagicMock(return_value=True)
with patch.object(salt.utils, 'test_mode', mock):
@ -827,7 +849,6 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(state.__context__, {"retcode": 1}):
self.assertEqual(
state.sls("core,edit.vim dev",
None,
None,
None,
True),
@ -842,13 +863,13 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(state.__context__, {"retcode": 5}):
with patch.dict(state.__pillar__, {"_errors": "E1"}):
self.assertListEqual(state.sls("core,edit.vim dev",
None,
None,
None,
True), ret)
with patch.dict(state.__opts__, {"test": None}):
mock = MagicMock(return_value={"test": ""})
mock = MagicMock(return_value={"test": "",
"environment": None})
with patch.object(state, '_get_opts', mock):
mock = MagicMock(return_value=True)
with patch.object(salt.utils,
@ -860,7 +881,6 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
"core,edit.vim dev",
None,
None,
None,
True,
pillar="A")
@ -877,7 +897,6 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
mock_open()):
self.assertTrue(
state.sls(arg,
None,
None,
None,
True,
@ -891,7 +910,6 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
".vim dev",
None,
None,
None,
True)
)
@ -928,7 +946,6 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
".vim dev",
None,
None,
None,
True))
def test_pkg(self):

View file

@ -24,34 +24,54 @@ class CertUtilTestCase(TestCase, LoaderModuleMockMixin):
'''
Test getting the serial number from a certificate
'''
expected = 'XYZABC'
mock = MagicMock(return_value='CertInfo\r\nSerial: XYZABC\r\nOtherStuff')
expected = '180720d39cd2db3244ba037417241e90'
mock = MagicMock(return_value=(
'CertInfo\r\n'
'Cert Serial Number: 180720d39cd2db3244ba037417241e90\r\n'
'\r\n'
'OtherStuff'))
with patch.dict(certutil.__salt__, {'cmd.run': mock}):
out = certutil.get_cert_serial('/path/to/cert.cer')
mock.assert_called_once_with('certutil.exe -verify /path/to/cert.cer')
mock.assert_called_once_with(
'certutil.exe -silent -verify /path/to/cert.cer')
self.assertEqual(expected, out)
def test_get_serials(self):
'''
Test getting the all the serial numbers from a store
Test getting all the serial numbers from a store
'''
expected = ['XYZABC', '123456']
mock = MagicMock(return_value='CertInfo\r\nSerial Number: XYZABC\r\nSerial Number: 123456\r\n')
expected = ['180720d39cd2db3244ba037417241e90',
'1768ac4e5b72bf1d0df0df118b34b959']
mock = MagicMock(return_value=(
'CertInfo\r\n'
'================ Certificate 0 ================\r\n'
'Serial Number: 180720d39cd2db3244ba037417241e90\r\n'
'OtherStuff\r\n'
'\r\n'
'================ Certificate 1 ================\r\n'
'Serial Number: 1768ac4e5b72bf1d0df0df118b34b959\r\n'
'OtherStuff'))
with patch.dict(certutil.__salt__, {'cmd.run': mock}):
out = certutil.get_stored_cert_serials('TrustedPublisher')
mock.assert_called_once_with('certutil.exe -store TrustedPublisher')
mock.assert_called_once_with(
'certutil.exe -store TrustedPublisher')
self.assertEqual(expected, out)
def test_add_store(self):
'''
Test adding a certificate to a specific store
'''
cmd_mock = MagicMock(return_value='CertInfo\r\nSerial: XYZABC\r\nOtherStuff')
cmd_mock = MagicMock(return_value=(
'CertInfo\r\n'
'================ Certificate 0 ================\r\n'
'Serial Number: 180720d39cd2db3244ba037417241e90\r\n'
'OtherStuff'))
cache_mock = MagicMock(return_value='/tmp/cert.cer')
with patch.dict(certutil.__salt__, {'cmd.run': cmd_mock,
'cp.cache_file': cache_mock}):
certutil.add_store('salt://path/to/file', 'TrustedPublisher')
cmd_mock.assert_called_once_with('certutil.exe -addstore TrustedPublisher /tmp/cert.cer')
cmd_mock.assert_called_once_with(
'certutil.exe -addstore TrustedPublisher /tmp/cert.cer')
cache_mock.assert_called_once_with('salt://path/to/file', 'base')
def test_del_store(self):
@ -59,11 +79,16 @@ class CertUtilTestCase(TestCase, LoaderModuleMockMixin):
Test removing a certificate to a specific store
'''
with patch('salt.modules.win_certutil.get_cert_serial') as cert_serial_mock:
cmd_mock = MagicMock(return_value='CertInfo\r\nSerial: XYZABC\r\nOtherStuff')
cmd_mock = MagicMock(return_value=(
'CertInfo\r\n'
'================ Certificate 0 ================\r\n'
'Serial Number: 180720d39cd2db3244ba037417241e90\r\n'
'OtherStuff'))
cache_mock = MagicMock(return_value='/tmp/cert.cer')
cert_serial_mock.return_value = "ABCDEF"
cert_serial_mock.return_value = 'ABCDEF'
with patch.dict(certutil.__salt__, {'cmd.run': cmd_mock,
'cp.cache_file': cache_mock}):
certutil.del_store('salt://path/to/file', 'TrustedPublisher')
cmd_mock.assert_called_once_with('certutil.exe -delstore TrustedPublisher ABCDEF')
cmd_mock.assert_called_once_with(
'certutil.exe -delstore TrustedPublisher ABCDEF')
cache_mock.assert_called_once_with('salt://path/to/file', 'base')

View file

@ -10,7 +10,7 @@ import salt.ext.six as six
# Import Salt Testing libs
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, Mock, patch, ANY
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, Mock, patch, ANY
# wmi and pythoncom modules are platform specific...
wmi = types.ModuleType('wmi')
@ -29,7 +29,9 @@ if NO_MOCK is False:
import salt.modules.win_status as status
@skipIf(NO_MOCK or sys.stdin.encoding != 'UTF8', 'Mock is not installed or encoding not supported')
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(sys.stdin.encoding != 'UTF-8', 'UTF-8 encoding required for this test is not supported')
@skipIf(status.HAS_WMI is False, 'This test requires Windows')
class TestProcsBase(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)

View file

@ -391,17 +391,23 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
with patch('salt.modules.zypper.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}])):
ret = zypper.upgrade(dist_upgrade=True, dryrun=True)
zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses', '--dry-run')
zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses', '--dry-run', '--debug-solver')
zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses',
'--dry-run', '--debug-solver')
with patch('salt.modules.zypper.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}])):
ret = zypper.upgrade(dist_upgrade=True, dryrun=True, fromrepo=["Dummy", "Dummy2"], novendorchange=True)
zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses', '--dry-run', '--from', "Dummy", '--from', 'Dummy2', '--no-allow-vendor-change')
zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses', '--dry-run', '--from', "Dummy", '--from', 'Dummy2', '--no-allow-vendor-change', '--debug-solver')
ret = zypper.upgrade(dist_upgrade=True, dryrun=True,
fromrepo=["Dummy", "Dummy2"], novendorchange=True)
zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses', '--dry-run',
'--from', "Dummy", '--from', 'Dummy2', '--no-allow-vendor-change')
zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses', '--dry-run',
'--from', "Dummy", '--from', 'Dummy2', '--no-allow-vendor-change',
'--debug-solver')
with patch('salt.modules.zypper.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}])):
ret = zypper.upgrade(dist_upgrade=True, fromrepo=["Dummy", "Dummy2"], novendorchange=True)
self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}})
zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses', '--from', "Dummy", '--from', 'Dummy2', '--no-allow-vendor-change')
zypper_mock.assert_any_call('dist-upgrade', '--auto-agree-with-licenses', '--from', "Dummy",
'--from', 'Dummy2', '--no-allow-vendor-change')
def test_upgrade_kernel(self):
'''
@ -412,7 +418,8 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(zypper.__grains__, {'osrelease_info': [12, 1]}), \
patch('salt.modules.zypper.refresh_db', MagicMock(return_value=True)), \
patch('salt.modules.zypper._systemd_scope', MagicMock(return_value=False)):
with patch.dict(zypper.__salt__, {'pkg_resource.parse_targets': MagicMock(return_value=(['kernel-default'], None))}):
with patch.dict(zypper.__salt__, {'pkg_resource.parse_targets': MagicMock(return_value=(['kernel-default'],
None))}):
with patch('salt.modules.zypper.__zypper__.noraise.call', MagicMock()):
with patch('salt.modules.zypper.list_pkgs', MagicMock(side_effect=[
{"kernel-default": "3.12.49-11.1"}, {"kernel-default": "3.12.49-11.1,3.12.51-60.20.2"}])):
@ -455,7 +462,8 @@ Repository 'DUMMY' not found by its alias, number, or URI.
ret = zypper.upgrade(dist_upgrade=True, fromrepo=["DUMMY"])
self.assertEqual(cmd_exc.exception.info['changes'], {})
self.assertEqual(cmd_exc.exception.info['result']['stdout'], zypper_out)
zypper_mock.noraise.call.assert_called_with('dist-upgrade', '--auto-agree-with-licenses', '--from', 'DUMMY')
zypper_mock.noraise.call.assert_called_with('dist-upgrade', '--auto-agree-with-licenses',
'--from', 'DUMMY')
def test_upgrade_available(self):
'''
@ -548,7 +556,8 @@ Repository 'DUMMY' not found by its alias, number, or URI.
}
}
with patch.dict(zypper.__salt__, {'lowpkg.bin_pkg_info': MagicMock(return_value={'name': 'test-package', 'version': '1.0'})}):
with patch.dict(zypper.__salt__, {'lowpkg.bin_pkg_info': MagicMock(return_value={'name': 'test-package',
'version': '1.0'})}):
list_downloaded = zypper.list_downloaded()
self.assertEqual(len(list_downloaded), 1)
self.assertDictEqual(list_downloaded, DOWNLOADED_RET)
@ -579,14 +588,19 @@ Repository 'DUMMY' not found by its alias, number, or URI.
self.assertEqual(zypper.download("nmap", "foo"), test_out)
@patch('salt.modules.zypper._systemd_scope', MagicMock(return_value=False))
@patch('salt.modules.zypper.list_downloaded', MagicMock(side_effect=[{}, {'vim': {'1.1': {'path': '/foo/bar/test.rpm', 'size': 1234, 'creation_date_time_t': 1234567890, 'creation_date_time': '2009-02-13T23:31:30'}}}]))
@patch('salt.modules.zypper.list_downloaded',
MagicMock(side_effect=[{}, {'vim': {'1.1': {'path': '/foo/bar/test.rpm',
'size': 1234,
'creation_date_time_t': 1234567890,
'creation_date_time': '2009-02-13T23:31:30'}}}]))
def test_install_with_downloadonly(self):
'''
Test a package installation with downloadonly=True.
:return:
'''
with patch.dict(zypper.__salt__, {'pkg_resource.parse_targets': MagicMock(return_value=({'vim': None}, 'repository'))}):
with patch.dict(zypper.__salt__,
{'pkg_resource.parse_targets': MagicMock(return_value=({'vim': None}, 'repository'))}):
with patch('salt.modules.zypper.__zypper__.noraise.call', MagicMock()) as zypper_mock:
ret = zypper.install(pkgs=['vim'], downloadonly=True)
zypper_mock.assert_called_once_with(
@ -597,17 +611,27 @@ Repository 'DUMMY' not found by its alias, number, or URI.
'--download-only',
'vim'
)
self.assertDictEqual(ret, {'vim': {'new': {'1.1': {'path': '/foo/bar/test.rpm', 'size': 1234, 'creation_date_time_t': 1234567890, 'creation_date_time': '2009-02-13T23:31:30'}}, 'old': ''}})
self.assertDictEqual(ret, {'vim': {'new': {'1.1': {'path': '/foo/bar/test.rpm',
'size': 1234,
'creation_date_time_t': 1234567890,
'creation_date_time': '2009-02-13T23:31:30'}},
'old': ''}})
@patch('salt.modules.zypper._systemd_scope', MagicMock(return_value=False))
@patch('salt.modules.zypper.list_downloaded', MagicMock(return_value={'vim': {'1.1': {'path': '/foo/bar/test.rpm', 'size': 1234, 'creation_date_time_t': 1234567890, 'creation_date_time': '2017-01-01T11:00:00'}}}))
@patch('salt.modules.zypper.list_downloaded',
MagicMock(return_value={'vim': {'1.1': {'path': '/foo/bar/test.rpm',
'size': 1234,
'creation_date_time_t': 1234567890,
'creation_date_time': '2017-01-01T11:00:00'}}}))
def test_install_with_downloadonly_already_downloaded(self):
'''
Test a package installation with downloadonly=True when package is already downloaded.
:return:
'''
with patch.dict(zypper.__salt__, {'pkg_resource.parse_targets': MagicMock(return_value=({'vim': None}, 'repository'))}):
with patch.dict(zypper.__salt__, {'pkg_resource.parse_targets': MagicMock(return_value=({'vim': None},
'repository'))}):
with patch('salt.modules.zypper.__zypper__.noraise.call', MagicMock()) as zypper_mock:
ret = zypper.install(pkgs=['vim'], downloadonly=True)
zypper_mock.assert_called_once_with(
@ -621,7 +645,8 @@ Repository 'DUMMY' not found by its alias, number, or URI.
self.assertDictEqual(ret, {})
@patch('salt.modules.zypper._systemd_scope', MagicMock(return_value=False))
@patch('salt.modules.zypper._get_patches', MagicMock(return_value={'SUSE-PATCH-1234': {'installed': False, 'summary': 'test'}}))
@patch('salt.modules.zypper._get_patches',
MagicMock(return_value={'SUSE-PATCH-1234': {'installed': False, 'summary': 'test'}}))
@patch('salt.modules.zypper.list_pkgs', MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}]))
def test_install_advisory_patch_ok(self):
'''
@ -629,7 +654,9 @@ Repository 'DUMMY' not found by its alias, number, or URI.
:return:
'''
with patch.dict(zypper.__salt__, {'pkg_resource.parse_targets': MagicMock(return_value=({'SUSE-PATCH-1234': None}, 'advisory'))}):
with patch.dict(zypper.__salt__,
{'pkg_resource.parse_targets': MagicMock(return_value=({'SUSE-PATCH-1234': None},
'advisory'))}):
with patch('salt.modules.zypper.__zypper__.noraise.call', MagicMock()) as zypper_mock:
ret = zypper.install(advisory_ids=['SUSE-PATCH-1234'])
zypper_mock.assert_called_once_with(
@ -642,7 +669,8 @@ Repository 'DUMMY' not found by its alias, number, or URI.
self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}})
@patch('salt.modules.zypper._systemd_scope', MagicMock(return_value=False))
@patch('salt.modules.zypper._get_patches', MagicMock(return_value={'SUSE-PATCH-1234': {'installed': False, 'summary': 'test'}}))
@patch('salt.modules.zypper._get_patches',
MagicMock(return_value={'SUSE-PATCH-1234': {'installed': False, 'summary': 'test'}}))
@patch('salt.modules.zypper.list_pkgs', MagicMock(return_value={"vim": "1.1"}))
def test_install_advisory_patch_failure(self):
'''
@ -650,7 +678,8 @@ Repository 'DUMMY' not found by its alias, number, or URI.
:return:
'''
with patch.dict(zypper.__salt__, {'pkg_resource.parse_targets': MagicMock(return_value=({'SUSE-PATCH-XXX': None}, 'advisory'))}):
with patch.dict(zypper.__salt__,
{'pkg_resource.parse_targets': MagicMock(return_value=({'SUSE-PATCH-XXX': None}, 'advisory'))}):
with patch('salt.modules.zypper.__zypper__.noraise.call', MagicMock()) as zypper_mock:
with self.assertRaisesRegex(CommandExecutionError, '^Advisory id "SUSE-PATCH-XXX" not found$'):
zypper.install(advisory_ids=['SUSE-PATCH-XXX'])

View file

@ -9,11 +9,10 @@ from tests.support.mock import NO_MOCK, NO_MOCK_REASON
# Import Salt Libs
import salt.pillar.mysql as mysql
from salt.ext.six import PY3
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(PY3, 'MySQL-python is not compatible with python3')
@skipIf(not mysql.HAS_MYSQL, 'Install MySQL bindings before running MySQL unit tests.')
class MysqlPillarTestCase(TestCase):
maxDiff = None

View file

@ -73,9 +73,15 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
keywords=None,
defaults=False)
cls.bspec = ArgSpec(args=[],
varargs='names',
keywords='kwargs',
defaults=None)
@classmethod
def tearDownClass(cls):
del cls.aspec
del cls.bspec
def test_run_module_not_available(self):
'''
@ -88,6 +94,16 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin):
assert ret['comment'] == "Unavailable function: {0}.".format(CMD)
assert not ret['result']
def test_module_run_hidden_varargs(self):
'''
Tests the return of module.run state when hidden varargs are used with
wrong type.
'''
with patch('salt.utils.args.get_function_argspec', MagicMock(return_value=self.bspec)):
ret = module._run(CMD, m_names='anyname')
comment = "'names' must be a list."
self.assertEqual(ret['comment'], comment)
def test_run_testmode(self):
'''
Tests the return of the module.run state when test=True is passed.

Some files were not shown because too many files have changed in this diff Show more