Merge branch 'develop' into shade

This commit is contained in:
Daniel Wallace 2017-12-11 08:53:13 -07:00 committed by GitHub
commit 9e5d6c03f8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
192 changed files with 18108 additions and 10568 deletions

View file

@ -36,7 +36,7 @@
# The root directory prepended to these options: pki_dir, cachedir,
# sock_dir, log_file, autosign_file, autoreject_file, extension_modules,
# key_logfile, pidfile:
# key_logfile, pidfile, autosign_grains_dir:
#root_dir: /
# The path to the master's configuration file.
@ -351,6 +351,11 @@
# the autosign_file and the auto_accept setting.
#autoreject_file: /etc/salt/autoreject.conf
# If the autosign_grains_dir is specified, incoming keys from minons with grain
# values matching those defined in files in this directory will be accepted
# automatically. This is insecure. Minions need to be configured to send the grains.
#autosign_grains_dir: /etc/salt/autosign_grains
# Enable permissive access to the salt keys. This allows you to run the
# master or minion as root, but have a non-root group be given access to
# your pki_dir. To make the access explicit, root must belong to the group
@ -1297,4 +1302,3 @@
# use OS defaults, typically 75 seconds on Linux, see
# /proc/sys/net/ipv4/tcp_keepalive_intvl.
#tcp_keepalive_intvl: -1

View file

@ -666,6 +666,12 @@
# certfile: <path_to_certfile>
# ssl_version: PROTOCOL_TLSv1_2
# Grains to be sent to the master on authentication to check if the minion's key
# will be accepted automatically. Needs to be configured on the master.
#autosign_grains:
# - uuid
# - server_id
###### Reactor Settings #####
###########################################

View file

@ -37,7 +37,7 @@ syndic_user: salt
# The root directory prepended to these options: pki_dir, cachedir,
# sock_dir, log_file, autosign_file, autoreject_file, extension_modules,
# key_logfile, pidfile:
# key_logfile, pidfile, autosign_grains_dir:
#root_dir: /
# The path to the master's configuration file.
@ -320,6 +320,11 @@ syndic_user: salt
# the autosign_file and the auto_accept setting.
#autoreject_file: /etc/salt/autoreject.conf
# If the autosign_grains_dir is specified, incoming keys from minons with grain
# values matching those defined in files in this directory will be accepted
# automatically. This is insecure. Minions need to be configured to send the grains.
#autosign_grains_dir: /etc/salt/autosign_grains
# Enable permissive access to the salt keys. This allows you to run the
# master or minion as root, but have a non-root group be given access to
# your pki_dir. To make the access explicit, root must belong to the group
@ -1248,4 +1253,3 @@ syndic_user: salt
# use OS defaults, typically 75 seconds on Linux, see
# /proc/sys/net/ipv4/tcp_keepalive_intvl.
#tcp_keepalive_intvl: -1

View file

@ -140,7 +140,8 @@ an alternative root.
This directory is prepended to the following options:
:conf_master:`pki_dir`, :conf_master:`cachedir`, :conf_master:`sock_dir`,
:conf_master:`log_file`, :conf_master:`autosign_file`,
:conf_master:`autoreject_file`, :conf_master:`pidfile`.
:conf_master:`autoreject_file`, :conf_master:`pidfile`,
:conf_master:`autosign_grains_dir`.
.. conf_master:: conf_file
@ -1321,6 +1322,32 @@ minion IDs for which keys will automatically be rejected. Will override both
membership in the :conf_master:`autosign_file` and the
:conf_master:`auto_accept` setting.
.. conf_master:: autosign_grains_dir
``autosign_grains_dir``
-----------------------
.. versionadded:: Oxygen
Default: ``not defined``
If the ``autosign_grains_dir`` is specified, incoming keys from minions with
grain values that match those defined in files in the autosign_grains_dir
will be accepted automatically. Grain values that should be accepted automatically
can be defined by creating a file named like the corresponding grain in the
autosign_grains_dir and writing the values into that file, one value per line.
Lines starting with a ``#`` will be ignored.
Minion must be configured to send the corresponding grains on authentication.
This should still be considered a less than secure option, due to the fact
that trust is based on just the requesting minion.
Please see the :ref:`Autoaccept Minions from Grains <tutorial-autoaccept-grains>`
documentation for more infomation.
.. code-block:: yaml
autosign_grains_dir: /etc/salt/autosign_grains
.. conf_master:: permissive_pki_access
``permissive_pki_access``

View file

@ -321,6 +321,117 @@ option on the Salt master.
master_port: 4506
.. conf_minion:: source_interface_name
``source_interface_name``
-------------------------
.. versionadded:: Oxygen
The name of the interface to use when establishing the connection to the Master.
.. note::
If multiple IP addresses are configured on the named interface,
the first one will be selected. In that case, for a better selection,
consider using the :conf_minion:`source_address` option.
.. note::
To use an IPv6 address from the named interface, make sure the option
:conf_minion:`ipv6` is enabled, i.e., ``ipv6: true``.
.. note::
If the interface is down, it will avoid using it, and the Minion
will bind to ``0.0.0.0`` (all interfaces).
.. warning::
This option requires modern version of the underlying libraries used by
the selected transport:
- ``zeromq`` requires ``pyzmq`` >= 16.0.1 and ``libzmq`` >= 4.1.6
- ``tcp`` requires ``tornado`` >= 4.5
Configuration example:
.. code-block:: yaml
source_interface_name: bond0.1234
.. conf_minion:: source_address
``source_address``
------------------
.. versionadded:: Oxygen
The source IP address or the domain name to be used when connecting the Minion
to the Master.
See :conf_minion:`ipv6` for IPv6 connections to the Master.
.. warning::
This option requires modern version of the underlying libraries used by
the selected transport:
- ``zeromq`` requires ``pyzmq`` >= 16.0.1 and ``libzmq`` >= 4.1.6
- ``tcp`` requires ``tornado`` >= 4.5
Configuration example:
.. code-block:: yaml
source_address: if-bond0-1234.sjc.us-west.internal
.. conf_minion:: source_ret_port
``source_ret_port``
-------------------
.. versionadded:: Oxygen
The source port to be used when connecting the Minion to the Master ret server.
.. warning::
This option requires modern version of the underlying libraries used by
the selected transport:
- ``zeromq`` requires ``pyzmq`` >= 16.0.1 and ``libzmq`` >= 4.1.6
- ``tcp`` requires ``tornado`` >= 4.5
Configuration example:
.. code-block:: yaml
source_ret_port: 49017
.. conf_minion:: source_publish_port
``source_publish_port``
-----------------------
.. versionadded:: Oxygen
The source port to be used when connecting the Minion to the Master publish
server.
.. warning::
This option requires modern version of the underlying libraries used by
the selected transport:
- ``zeromq`` requires ``pyzmq`` >= 16.0.1 and ``libzmq`` >= 4.1.6
- ``tcp`` requires ``tornado`` >= 4.5
Configuration example:
.. code-block:: yaml
source_publish_port: 49018
.. conf_minion:: user
``user``
@ -1181,8 +1292,42 @@ The password used for HTTP proxy access.
proxy_password: obolus
.. conf_minion:: docker.compare_container_networks
``docker.compare_container_networks``
-------------------------------------
.. versionadded:: Oxygen
Default: ``{'static': ['Aliases', 'Links', 'IPAMConfig'], 'automatic': ['IPAddress', 'Gateway', 'GlobalIPv6Address', 'IPv6Gateway']}``
Specifies which keys are examined by
:py:func:`docker.compare_container_networks
<salt.modules.dockermod.compare_container_networks>`.
.. note::
This should not need to be modified unless new features added to Docker
result in new keys added to the network configuration which must be
compared to determine if two containers have different network configs.
This config option exists solely as a way to allow users to continue using
Salt to manage their containers after an API change, without waiting for a
new Salt release to catch up to the changes in the Docker API.
.. code-block:: yaml
docker.compare_container_networks:
static:
- Aliases
- Links
- IPAMConfig
automatic:
- IPAddress
- Gateway
- GlobalIPv6Address
- IPv6Gateway
Minion Execution Module Management
========================
==================================
.. conf_minion:: disable_modules
@ -1192,7 +1337,7 @@ Minion Execution Module Management
Default: ``[]`` (all execution modules are enabled by default)
The event may occur in which the administrator desires that a minion should not
be able to execute a certain module.
be able to execute a certain module.
However, the ``sys`` module is built into the minion and cannot be disabled.
@ -2278,6 +2423,27 @@ minion's pki directory.
master_sign_key_name: <filename_without_suffix>
.. conf_minion:: autosign_grains
``autosign_grains``
-------------------
.. versionadded:: Oxygen
Default: ``not defined``
The grains that should be sent to the master on authentication to decide if
the minion's key should be accepted automatically.
Please see the :ref:`Autoaccept Minions from Grains <tutorial-autoaccept-grains>`
documentation for more infomation.
.. code-block:: yaml
autosign_grains:
- uuid
- server_id
.. conf_minion:: always_verify_signature
``always_verify_signature``

View file

@ -4,6 +4,109 @@
Salt Release Notes - Codename Oxygen
====================================
Lots of Docker Improvements
---------------------------
Much Improved Support for Docker Networking
===========================================
The :py:func:`docker_network.present <salt.states.docker_network.present>`
state has undergone a full rewrite, which includes the following improvements:
Full API Support for Network Management
---------------------------------------
The improvements made to input handling in the
:py:func:`docker_container.running <salt.states.docker_container.running>`
state for 2017.7.0 have now been expanded to :py:func:`docker_network.present
<salt.states.docker_network.present>`. This brings with it full support for all
tunable configuration arguments.
Custom Subnets
--------------
Custom subnets can now be configured. Both IPv4 and mixed IPv4/IPv6 networks
are supported. See :ref:`here <salt-states-docker-network-present-ipam>` for
more information.
Network Configuration in :py:func:`docker_container.running` States
-------------------------------------------------------------------
A long-requested feature has finally been added! It is now possible to
configure static IPv4/IPv6 addresses, as well as links and labels. See
:ref:`here <salt-states-docker-container-network-management>` for more
information.
.. note::
While the ``containers`` argument to :py:func:`docker_network.present`
will continue to be supported, it will no longer be the recommended way of
ensuring that a container is attached to a network.
Improved Handling of Images from Custom Registries
==================================================
Rather than attempting to parse the tag from the passed image name, Salt will
now resolve that tag down to an image ID and use that ID instead.
.. important::
Due to this change, there are some backward-incompatible changes to image
management. See below for a full list of these changes.
Backward-incompatible Changes to Docker Image Management
********************************************************
Passing image names to the following functions must now be done using separate
``repository`` and ``tag`` arguments:
- :py:func:`docker.build <salt.modules.dockermod.build>`
- :py:func:`docker.commit <salt.modules.dockermod.commit>`
- :py:func:`docker.import <salt.modules.dockermod.import_>`
- :py:func:`docker.load <salt.modules.dockermod.load>`
- :py:func:`docker.tag <salt.modules.dockermod.tag_>`
- :py:func:`docker.sls_build <salt.modules.dockermod.sls_build>`
Additionally, the ``tag`` argument must now be explicitly passed to the
:py:func:`docker_image.present <salt.states.docker_image.present>` state,
unless the image is being pulled from a docker registry.
State and Execution Module Support for ``docker run`` Functionality
===================================================================
The :py:func:`docker_container.running <salt.states.docker_container.running>`
state is good for containers which run services, but it is not as useful for
cases in which the container only needs to run once. The ``start`` argument to
:py:func:`docker_container.running <salt.states.docker_container.running>` can
be set to ``False`` to prevent the container from being started again on a
subsequent run, but for many use cases this is not sufficient. Therefore, the
:py:func:`docker.run_container <salt.modules.dockermod.run_container>`
remote-execution function was added. When used on the Salt CLI, it will return
information about the container, such as its name, ID, exit code, and any
output it produces.
State support has also been added via the :py:func:`docker_container.run
<salt.states.docker_container.run>` state. This state is modeled after the
:py:func:`cmd.run <salt.states.cmd.run>` state, and includes arguments like
``onlyif``, ``unless``, and ``creates`` to control whether or not the container
is run.
Full API Support for :py:func:`docker.logs <salt.modules.dockermod.logs>`
=========================================================================
This function now supports all of the functions that its Docker API counterpart
does, allowing you to do things like include timestamps, and also suppress
stdout/stderr, etc. in the return.
`start` Argument Added to :py:func:`docker.create <salt.modules.dockermod.create>` Function
===========================================================================================
This removes the need to run :py:func:`docker.start
<salt.modules.dockermod.start_>` separately when creating containers on the
Salt CLI.
.. code-block:: bash
salt myminion docker.create image=foo/bar:baz command=/path/to/command start=True
Comparison Operators in Package Installation
--------------------------------------------
@ -65,6 +168,18 @@ noon PST so the Stormpath external authentication module has been removed.
https://stormpath.com/oktaplusstormpath
New (Proxy) Minion Configuration Options
----------------------------------------
To be able to connect the Minion to the Master using a certain source IP address
or port, the following options have been added:
- :conf_minion:`source_interface_name`
- :conf_minion:`source_address`
- :conf_minion:`source_ret_port`
- :conf_minion:`source_publish_port`
:conf_minion:`environment` config option renamed to :conf_minion:`saltenv`
--------------------------------------------------------------------------
@ -161,7 +276,7 @@ environments (i.e. ``saltenvs``) have been added:
available as saltenvs.
Additional output modes
------------------
-----------------------
The ``state_output`` parameter now supports ``full_id``, ``changes_id`` and ``terse_id``.
Just like ``mixed_id``, these use the state ID as name in the highstate output.

View file

@ -0,0 +1,44 @@
.. _tutorial-autoaccept-grains:
==============================
Autoaccept minions from Grains
==============================
.. versionadded:: Oxygen
To automatically accept minions based on certain characteristics, e.g. the ``uuid``
you can specify certain grain values on the salt master. Minions with matching grains
will have their keys automatically accepted.
1. Configure the autosign_grains_dir in the master config file:
.. code-block:: yaml
autosign_grains_dir: /etc/salt/autosign_grains
2. Configure the grain values to be accepted
Place a file named like the grain in the autosign_grains_dir and write the values that
should be accepted automatically inside that file. For example to automatically
accept minions based on their ``uuid`` create a file named ``/etc/salt/autosign_grains/uuid``:
.. code-block:: none
8f7d68e2-30c5-40c6-b84a-df7e978a03ee
1d3c5473-1fbc-479e-b0c7-877705a0730f
The master is now setup to accept minions with either of the two specified uuids.
Multiple values must always be written into separate lines.
Lines starting with a ``#`` are ignored.
3. Configure the minion to send the specific grains to the master in the minion config file:
.. code-block:: yaml
autosign_grains:
- uuid
Now you should be able to start salt-minion and run ``salt-call
state.apply`` or any other salt commands that require master authentication.

View file

@ -35,3 +35,4 @@ Tutorials Index
* :ref:`Multi-cloud orchestration with Apache Libcloud <tutorial-libcloud>`
* :ref:`Running Salt States and Commands in Docker Containers <docker-sls>`
* :ref:`Preseed Minion with Accepted Key <tutorial-preseed-key>`
* :ref:`Autoaccept Minions from Grains <tutorial-autoaccept-grains>`

View file

@ -31,11 +31,20 @@
# ./build.sh v2015.8.3 2 /tmp/custom_pkg
#
############################################################################
echo -n -e "\033]0;Build: Variables\007"
############################################################################
# Make sure the script is launched with sudo
############################################################################
if [[ $(id -u) -ne 0 ]]
then
exec sudo /bin/bash -c "$(printf '%q ' "$BASH_SOURCE" "$@")"
fi
############################################################################
# Check passed parameters, set defaults
############################################################################
echo -n -e "\033]0;Build: Variables\007"
if [ "$1" == "" ]; then
VERSION=`git describe`
else
@ -80,24 +89,24 @@ fi
# Create the Build Environment
############################################################################
echo -n -e "\033]0;Build: Build Environment\007"
sudo $PKGRESOURCES/build_env.sh $PYVER
$PKGRESOURCES/build_env.sh $PYVER
############################################################################
# Install Salt
############################################################################
echo -n -e "\033]0;Build: Install Salt\007"
sudo rm -rf $SRCDIR/build
sudo rm -rf $SRCDIR/dist
sudo $PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s"
sudo $PYTHON $SRCDIR/setup.py install
rm -rf $SRCDIR/build
rm -rf $SRCDIR/dist
$PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s"
$PYTHON $SRCDIR/setup.py install
############################################################################
# Build Package
############################################################################
echo -n -e "\033]0;Build: Package Salt\007"
sudo $PKGRESOURCES/build_pkg.sh $VERSION $PYVER $PKGDIR
$PKGRESOURCES/build_pkg.sh $VERSION $PYVER $PKGDIR
############################################################################
# Sign Package
############################################################################
sudo $PKGRESOURCES/build_sig.sh salt-$VERSION-py$PYVER-$CPUARCH.pkg salt-$VERSION-py$PYVER-$CPUARCH-signed.pkg
$PKGRESOURCES/build_sig.sh salt-$VERSION-py$PYVER-$CPUARCH.pkg salt-$VERSION-py$PYVER-$CPUARCH-signed.pkg

View file

@ -24,6 +24,14 @@
#
############################################################################
############################################################################
# Make sure the script is launched with sudo
############################################################################
if [[ $(id -u) -ne 0 ]]
then
exec sudo /bin/bash -c "$(printf '%q ' "$BASH_SOURCE" "$@")"
fi
############################################################################
# Set to Exit on all Errors
############################################################################
@ -54,17 +62,18 @@ ulimit -n 1200
SRCDIR=`git rev-parse --show-toplevel`
SCRIPTDIR=`pwd`
SHADIR=$SCRIPTDIR/shasums
PKG_CONFIG_PATH=/opt/salt/lib/pkgconfig
CFLAGS="-I/opt/salt/include"
LDFLAGS="-L/opt/salt/lib"
INSTALL_DIR=/opt/salt
PKG_CONFIG_PATH=$INSTALL_DIR/lib/pkgconfig
CFLAGS="-I$INSTALL_DIR/include"
LDFLAGS="-L$INSTALL_DIR/lib"
if [ "$PYVER" == "2" ]; then
PYDIR=/opt/salt/lib/python2.7
PYTHON=/opt/salt/bin/python
PIP=/opt/salt/bin/pip
PYDIR=$INSTALL_DIR/lib/python2.7
PYTHON=$INSTALL_DIR/bin/python
PIP=$INSTALL_DIR/bin/pip
else
PYDIR=/opt/salt/lib/python3.5
PYTHON=/opt/salt/bin/python3
PIP=/opt/salt/bin/pip3
PYDIR=$INSTALL_DIR/lib/python3.5
PYTHON=$INSTALL_DIR/bin/python3
PIP=$INSTALL_DIR/bin/pip3
fi
############################################################################
@ -74,10 +83,10 @@ fi
# Fink, Brew)
# Check for Xcode Command Line Tools first
if [ -d '/Library/Developer/CommandLineTools/usr/bin' ]; then
PATH=/Library/Developer/CommandLineTools/usr/bin:/opt/salt/bin:$PATH
PATH=/Library/Developer/CommandLineTools/usr/bin:$INSTALL_DIR/bin:$PATH
MAKE=/Library/Developer/CommandLineTools/usr/bin/make
elif [ -d '/Applications/Xcode.app/Contents/Developer/usr/bin' ]; then
PATH=/Applications/Xcode.app/Contents/Developer/usr/bin:/opt/salt/bin:$PATH
PATH=/Applications/Xcode.app/Contents/Developer/usr/bin:$INSTALL_DIR/bin:$PATH
MAKE=/Applications/Xcode.app/Contents/Developer/usr/bin/make
else
echo "No installation of XCode found. This script requires XCode."
@ -125,12 +134,15 @@ download(){
############################################################################
# Ensure Paths are present and clean
############################################################################
echo "################################################################################"
echo "Ensure Paths are present and clean"
echo "################################################################################"
echo -n -e "\033]0;Build_Env: Clean\007"
# Make sure /opt/salt is clean
sudo rm -rf /opt/salt
sudo mkdir -p /opt/salt
sudo chown $USER:staff /opt/salt
# Make sure $INSTALL_DIR is clean
rm -rf $INSTALL_DIR
mkdir -p $INSTALL_DIR
chown $USER:staff $INSTALL_DIR
# Make sure build staging is clean
rm -rf build
@ -140,7 +152,7 @@ BUILDDIR=$SCRIPTDIR/build
############################################################################
# Download and install pkg-config
############################################################################
echo -n -e "\033]0;Build_Env: pkg-config\007"
echo -n -e "\033]0;Build_Env: pkg-config: download\007"
PKGURL="http://pkgconfig.freedesktop.org/releases/pkg-config-0.29.2.tar.gz"
PKGDIR="pkg-config-0.29.2"
@ -151,18 +163,22 @@ echo "##########################################################################
echo "Building pkg-config"
echo "################################################################################"
cd $PKGDIR
env LDFLAGS="-framework CoreFoundation -framework Carbon" ./configure --prefix=/opt/salt --with-internal-glib
echo -n -e "\033]0;Build_Env: pkg-config: configure\007"
env LDFLAGS="-framework CoreFoundation -framework Carbon" ./configure --prefix=$INSTALL_DIR --with-internal-glib
echo -n -e "\033]0;Build_Env: pkg-config: make\007"
$MAKE
echo -n -e "\033]0;Build_Env: pkg-config: make check\007"
$MAKE check
sudo -H $MAKE install
echo -n -e "\033]0;Build_Env: pkg-config: make install\007"
$MAKE install
############################################################################
# Download and install libsodium
############################################################################
echo -n -e "\033]0;Build_Env: libsodium\007"
echo -n -e "\033]0;Build_Env: libsodium: download\007"
PKGURL="https://download.libsodium.org/libsodium/releases/libsodium-1.0.13.tar.gz"
PKGDIR="libsodium-1.0.13"
PKGURL="https://download.libsodium.org/libsodium/releases/libsodium-1.0.15.tar.gz"
PKGDIR="libsodium-1.0.15"
download $PKGURL
@ -170,15 +186,19 @@ echo "##########################################################################
echo "Building libsodium"
echo "################################################################################"
cd $PKGDIR
./configure --prefix=/opt/salt
echo -n -e "\033]0;Build_Env: libsodium: configure\007"
./configure --prefix=$INSTALL_DIR
echo -n -e "\033]0;Build_Env: libsodium: make\007"
$MAKE
echo -n -e "\033]0;Build_Env: libsodium: make check\007"
$MAKE check
sudo -H $MAKE install
echo -n -e "\033]0;Build_Env: libsodium: make install\007"
$MAKE install
############################################################################
# Download and install zeromq
############################################################################
echo -n -e "\033]0;Build_Env: zeromq\007"
echo -n -e "\033]0;Build_Env: zeromq: download\007"
PKGURL="http://download.zeromq.org/zeromq-4.1.4.tar.gz"
PKGDIR="zeromq-4.1.4"
@ -189,18 +209,22 @@ echo "##########################################################################
echo "Building zeromq"
echo "################################################################################"
cd $PKGDIR
./configure --prefix=/opt/salt
echo -n -e "\033]0;Build_Env: zeromq: configure\007"
./configure --prefix=$INSTALL_DIR
echo -n -e "\033]0;Build_Env: zeromq: make\007"
$MAKE
echo -n -e "\033]0;Build_Env: zeromq: make check\007"
$MAKE check
sudo -H $MAKE install
echo -n -e "\033]0;Build_Env: zeromq: make install\007"
$MAKE install
############################################################################
# Download and install OpenSSL
############################################################################
echo -n -e "\033]0;Build_Env: OpenSSL\007"
echo -n -e "\033]0;Build_Env: OpenSSL: download\007"
PKGURL="http://openssl.org/source/openssl-1.0.2l.tar.gz"
PKGDIR="openssl-1.0.2l"
PKGURL="http://openssl.org/source/openssl-1.0.2n.tar.gz"
PKGDIR="openssl-1.0.2n"
download $PKGURL
@ -208,19 +232,23 @@ echo "##########################################################################
echo "Building OpenSSL"
echo "################################################################################"
cd $PKGDIR
./Configure darwin64-x86_64-cc --prefix=/opt/salt --openssldir=/opt/salt/openssl
echo -n -e "\033]0;Build_Env: OpenSSL: configure\007"
./Configure darwin64-x86_64-cc --prefix=$INSTALL_DIR --openssldir=$INSTALL_DIR/openssl
echo -n -e "\033]0;Build_Env: OpenSSL: make\007"
$MAKE
echo -n -e "\033]0;Build_Env: OpenSSL: make test\007"
$MAKE test
sudo -H $MAKE install
echo -n -e "\033]0;Build_Env: OpenSSL: make install\007"
$MAKE install
############################################################################
# Download and install Python
############################################################################
echo -n -e "\033]0;Build_Env: Python\007"
echo -n -e "\033]0;Build_Env: Python: download\007"
if [ "$PYVER" == "2" ]; then
PKGURL="https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tar.xz"
PKGDIR="Python-2.7.13"
PKGURL="https://www.python.org/ftp/python/2.7.14/Python-2.7.14.tar.xz"
PKGDIR="Python-2.7.14"
else
PKGURL="https://www.python.org/ftp/python/3.5.3/Python-3.5.3.tar.xz"
PKGDIR="Python-3.5.3"
@ -233,15 +261,17 @@ echo "Building Python"
echo "################################################################################"
echo "Note there are some test failures"
cd $PKGDIR
./configure --prefix=/opt/salt --enable-shared --enable-toolbox-glue --with-ensurepip=install
echo -n -e "\033]0;Build_Env: Python: configure\007"
./configure --prefix=$INSTALL_DIR --enable-shared --enable-toolbox-glue --with-ensurepip=install
echo -n -e "\033]0;Build_Env: Python: make\007"
$MAKE
# $MAKE test
sudo -H $MAKE install
echo -n -e "\033]0;Build_Env: Python: make install\007"
$MAKE install
############################################################################
# upgrade pip
############################################################################
sudo -H $PIP install --upgrade pip
$PIP install --upgrade pip
############################################################################
# Download and install salt python dependencies
@ -253,23 +283,21 @@ cd $BUILDDIR
echo "################################################################################"
echo "Installing Salt Dependencies with pip (normal)"
echo "################################################################################"
sudo -H $PIP install \
-r $SRCDIR/pkg/osx/req.txt \
--no-cache-dir
$PIP install -r $SRCDIR/pkg/osx/req.txt \
--no-cache-dir
echo "################################################################################"
echo "Installing Salt Dependencies with pip (build_ext)"
echo "################################################################################"
sudo -H $PIP install \
-r $SRCDIR/pkg/osx/req_ext.txt \
--global-option=build_ext \
--global-option="-I/opt/salt/include" \
--no-cache-dir
$PIP install -r $SRCDIR/pkg/osx/req_ext.txt \
--global-option=build_ext \
--global-option="-I$INSTALL_DIR/include" \
--no-cache-dir
echo "--------------------------------------------------------------------------------"
echo "Create Symlink to certifi for openssl"
echo "--------------------------------------------------------------------------------"
sudo ln -s $PYDIR/site-packages/certifi/cacert.pem /opt/salt/openssl/cert.pem
ln -s $PYDIR/site-packages/certifi/cacert.pem $INSTALL_DIR/openssl/cert.pem
echo -n -e "\033]0;Build_Env: Finished\007"

View file

@ -28,6 +28,14 @@
#
############################################################################
############################################################################
# Make sure the script is launched with sudo
############################################################################
if [[ $(id -u) -ne 0 ]]
then
exec sudo /bin/bash -c "$(printf '%q ' "$BASH_SOURCE" "$@")"
fi
############################################################################
# Set to Exit on all Errors
############################################################################
@ -96,8 +104,8 @@ mkdir -p $PKGDIR
############################################################################
echo -n -e "\033]0;Build_Pkg: Copy Start Scripts\007"
sudo cp $PKGRESOURCES/scripts/start-*.sh /opt/salt/bin/
sudo cp $PKGRESOURCES/scripts/salt-config.sh /opt/salt/bin
cp $PKGRESOURCES/scripts/start-*.sh /opt/salt/bin/
cp $PKGRESOURCES/scripts/salt-config.sh /opt/salt/bin
############################################################################
# Copy Service Definitions from Salt Repo to the Package Directory
@ -118,20 +126,20 @@ cp $PKGRESOURCES/scripts/com.saltstack.salt.api.plist $PKGDIR/Library/LaunchDaem
############################################################################
echo -n -e "\033]0;Build_Pkg: Trim unneeded files\007"
sudo rm -rdf $PKGDIR/opt/salt/bin/pkg-config
sudo rm -rdf $PKGDIR/opt/salt/lib/pkgconfig
sudo rm -rdf $PKGDIR/opt/salt/lib/engines
sudo rm -rdf $PKGDIR/opt/salt/share/aclocal
sudo rm -rdf $PKGDIR/opt/salt/share/doc
sudo rm -rdf $PKGDIR/opt/salt/share/man/man1/pkg-config.1
rm -rdf $PKGDIR/opt/salt/bin/pkg-config
rm -rdf $PKGDIR/opt/salt/lib/pkgconfig
rm -rdf $PKGDIR/opt/salt/lib/engines
rm -rdf $PKGDIR/opt/salt/share/aclocal
rm -rdf $PKGDIR/opt/salt/share/doc
rm -rdf $PKGDIR/opt/salt/share/man/man1/pkg-config.1
if [ "$PYVER" == "2" ]; then
sudo rm -rdf $PKGDIR/opt/salt/lib/python2.7/test
rm -rdf $PKGDIR/opt/salt/lib/python2.7/test
else
sudo rm -rdf $PKGDIR/opt/salt/lib/python3.5/test
rm -rdf $PKGDIR/opt/salt/lib/python3.5/test
fi
echo -n -e "\033]0;Build_Pkg: Remove compiled python files\007"
sudo find $PKGDIR/opt/salt -name '*.pyc' -type f -delete
find $PKGDIR/opt/salt -name '*.pyc' -type f -delete
############################################################################
# Copy Config Files from Salt Repo to the Package Directory

View file

@ -1,31 +1,31 @@
apache-libcloud==2.1.0
apache-libcloud==2.2.1
backports.ssl_match_hostname==3.5.0.1
backports_abc==0.5
certifi
cffi==1.10.0
CherryPy==11.0.0
cffi==1.11.2
CherryPy==13.0.0
click==6.7
enum34==1.1.6
gitdb==0.6.4
GitPython==2.1.1
idna==2.5
GitPython==2.1.7
idna==2.6
ipaddress==1.0.18
Jinja2==2.9.6
Jinja2==2.10
linode-python==1.1.1
Mako==1.0.7
MarkupSafe==1.0
msgpack-python==0.4.8
pyasn1==0.2.3
pyasn1==0.4.2
pycparser==2.18
pycrypto==2.6.1
python-dateutil==2.6.1
python-gnupg==0.4.1
PyYAML==3.12
pyzmq==16.0.2
requests==2.18.1
pyzmq==17.0.0b3
requests==2.18.4
singledispatch==3.4.0.3
six==1.10.0
six==1.11.0
smmap==0.9.0
timelib==0.2.4
tornado==4.5.1
tornado==4.5.2
vultr==1.0rc1

View file

@ -1,2 +1,2 @@
cryptography==2.0
pyOpenSSL==17.1.0
cryptography==2.1.4
pyOpenSSL==17.5.0

View file

@ -1 +0,0 @@
f37c9a28ce129d01e63c84d7db627a06402854578f62d17927334ea21ede318e04bbf66e890e3f47c85333e6b19f6e5581fb3f3e27efd24be27017d1b6529c4b ./Python-2.7.13.tar.xz

View file

@ -0,0 +1 @@
78310b0be6388ffa15f29a80afb9ab3c03a572cb094e9da00cfe391afadb51696e41f592eb658d6a31a2f422fdac8a55214a382cbb8cfb43d4a127d5b35ea7f9 ./Python-2.7.14.tar.xz

View file

@ -1 +0,0 @@
c619b12fdf0b2e59174b6e383a62d5499ebcd720fdbb2c1a41a98a46c285df075202423454b294fefee185432441e943805397d7656f7cd7837de425da623929 ./libsodium-1.0.13.tar.gz

View file

@ -0,0 +1 @@
299a208f8342793d13498e95b23f1749f5b5b13ec276db3ec401130615e837ef475b6a1283b6e87a5f8227d23e70e38ca721073dadd5dc88fe4aff342aa64adc ./libsodium-1.0.15.tar.gz

View file

@ -1 +0,0 @@
047d964508ad6025c79caabd8965efd2416dc026a56183d0ef4de7a0a6769ce8e0b4608a3f8393d326f6d03b26a2b067e6e0c750f35b20be190e595e8290c0e3 ./openssl-1.0.2l.tar.gz

View file

@ -0,0 +1 @@
144bf0d6aa27b4af01df0b7b734c39962649e1711554247d42e05e14d8945742b18745aefdba162e2dfc762b941fd7d3b2d5dc6a781ae4ba10a6f5a3cadb0687 ./openssl-1.0.2n.tar.gz

View file

@ -218,24 +218,40 @@ Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing pypi resources using pip . . ."
Write-Output " ----------------------------------------------------------------"
if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) {
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts2Dir'])\pip.exe" "--no-cache-dir install -r $($script_path)\req_2.txt" "pip install"
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts2Dir'])\pip.exe" "--no-cache-dir install -r $($script_path)\req.txt" "pip install"
} else {
if ( (Get-ChildItem $Env:SALT_REQ_LOCAL_CACHE | Measure-Object).Count -eq 0 ) {
# folder empty
Write-Output " pip download from req_2.txt into empty local cache SALT_REQ $Env:SALT_REQ_LOCAL_CACHE"
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req_2.txt" "pip download"
Write-Output " pip download from req.txt into empty local cache SALT_REQ $Env:SALT_REQ_LOCAL_CACHE"
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip download"
}
Write-Output " reading from local pip cache $Env:SALT_REQ_LOCAL_CACHE"
Write-Output " If a (new) ressource is missing, please delete all files in this cache, go online and repeat"
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req_2.txt" "pip install"
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip install"
}
#==============================================================================
# Move PyWin32 DLL's to site-packages\win32
# Install PyWin32 from wheel file
#==============================================================================
Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing PyWin32 . . ."
Write-Output " ----------------------------------------------------------------"
# Download
$file = "$($ini[$bitPrograms]['PyWin322'])"
$url = "$($ini['Settings']['SaltRepo'])/$bitFolder/$file"
$file = "$($ini['Settings']['DownloadDir'])\$bitFolder\$file"
DownloadFileWithProgress $url $file
# Install
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts2Dir'])\pip.exe" "install $file " "pip install PyWin32"
# Move DLL's to Python Root
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."
Move-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs2Dir'])\win32" -Force
# Create gen_py directory
New-Item -Path "$($ini['Settings']['SitePkgs2Dir'])\win32com\gen_py" -ItemType Directory -Force
# Remove pywin32_system32 directory
Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ."
Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32"
@ -248,37 +264,6 @@ Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pythonwin" -Force -Recurse
Write-Output " - $script_name :: Removing PyWin32 scripts . . ."
Remove-Item "$($ini['Settings']['Scripts2Dir'])\pywin32_*" -Force -Recurse
#==============================================================================
# Install PyYAML with CLoader
# This has to be a compiled binary to get the CLoader
#==============================================================================
Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing PyYAML . . ."
Write-Output " ----------------------------------------------------------------"
# Download
$file = "$($ini[$bitPrograms]['PyYAML2'])"
$url = "$($ini['Settings']['SaltRepo'])/$bitFolder/$file"
$file = "$($ini['Settings']['DownloadDir'])\$bitFolder\$file"
DownloadFileWithProgress $url $file
# Install
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts2Dir'])\easy_install.exe" "-Z $file " "easy_install PyYAML"
#==============================================================================
# Install PyCrypto from wheel file
#==============================================================================
Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing PyCrypto . . ."
Write-Output " ----------------------------------------------------------------"
# Download
$file = "$($ini[$bitPrograms]['PyCrypto2'])"
$url = "$($ini['Settings']['SaltRepo'])/$bitFolder/$file"
$file = "$($ini['Settings']['DownloadDir'])\$bitFolder\$file"
DownloadFileWithProgress $url $file
# Install
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts2Dir'])\pip.exe" "install --no-index --find-links=$($ini['Settings']['DownloadDir']) $file " "pip install PyCrypto"
#==============================================================================
# Copy DLLs to Python Directory
#==============================================================================

View file

@ -218,16 +218,16 @@ Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing pypi resources using pip . . ."
Write-Output " ----------------------------------------------------------------"
if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) {
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "--no-cache-dir install -r $($script_path)\req_3.txt" "pip install"
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "--no-cache-dir install -r $($script_path)\req.txt" "pip install"
} else {
if ( (Get-ChildItem $Env:SALT_REQ_LOCAL_CACHE | Measure-Object).Count -eq 0 ) {
# folder empty
Write-Output " pip download from req_3.txt into empty local cache SALT_REQ $Env:SALT_REQ_LOCAL_CACHE"
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req_3.txt" "pip download"
Write-Output " pip download from req.txt into empty local cache SALT_REQ $Env:SALT_REQ_LOCAL_CACHE"
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip download"
}
Write-Output " reading from local pip cache $Env:SALT_REQ_LOCAL_CACHE"
Write-Output " If a (new) ressource is missing, please delete all files in this cache, go online and repeat"
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req_3.txt" "pip install"
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip install"
}
#==============================================================================
@ -243,12 +243,15 @@ $file = "$($ini['Settings']['DownloadDir'])\$bitFolder\$file"
DownloadFileWithProgress $url $file
# Install
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "install --no-index --find-links=$($ini['Settings']['DownloadDir']) $file " "pip install PyWin32"
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "install $file " "pip install PyWin32"
# Move DLL's to Python Root
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."
Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs3Dir'])\win32" -Force
# Create gen_py directory
New-Item -Path "$($ini['Settings']['SitePkgs3Dir'])\win32com\gen_py" -ItemType Directory -Force
# Remove pywin32_system32 directory
Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ."
Remove-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32"

View file

@ -33,6 +33,10 @@ goto CheckPython3
MsiExec.exe /X {4A656C6C-D24A-473F-9747-3A8D00907A03} /QN
echo %0 :: - 2.7.13 (64 bit)
MsiExec.exe /X {4A656C6C-D24A-473F-9747-3A8D00907A04} /QN
echo %0 :: - 2.7.14 (32 bit)
MsiExec.exe /X {0398A685-FD8D-46B3-9816-C47319B0CF5E} /QN
echo %0 :: - 2.7.14 (64 bit)
MsiExec.exe /X {0398A685-FD8D-46B3-9816-C47319B0CF5F} /QN
echo.

View file

@ -60,20 +60,20 @@ Function Get-Settings {
# Filenames for 64 bit Windows
$64bitPrograms = @{
"PyCrypto2" = "pycrypto-2.6.1-cp27-none-win_amd64.whl"
"Python2" = "python-2.7.13.amd64.msi"
"PyYAML2" = "PyYAML-3.11.win-amd64-py2.7.exe"
"Python2" = "python-2.7.14.amd64.msi"
"PyWin322" = "pywin32-221-cp27-cp27m-win_amd64.whl"
"Python3" = "python-3.5.3-amd64.exe"
"PyWin323" = "pywin32-220.1-cp35-cp35m-win_amd64.whl"
"PyWin323" = "pywin32-221-cp35-cp35m-win_amd64.whl"
}
$ini.Add("64bitPrograms", $64bitPrograms)
# Filenames for 32 bit Windows
$32bitPrograms = @{
"PyCrypto2" = "pycrypto-2.6.1-cp27-none-win32.whl"
"Python2" = "python-2.7.13.msi"
"PyYAML2" = "PyYAML-3.11.win32-py2.7.exe"
"Python2" = "python-2.7.14.msi"
"PyWin322" = "pywin32-221-cp27-cp27m-win32.whl"
"Python3" = "python-3.5.3.exe"
"PyWin323" = "pywin32-220.1-cp35-cp35m-win32.whl"
"PyWin323" = "pywin32-221-cp35-cp35m-win32.whl"
}
$ini.Add("32bitPrograms", $32bitPrograms)

36
pkg/windows/req.txt Normal file
View file

@ -0,0 +1,36 @@
backports-abc==0.5
backports.ssl-match-hostname==3.5.0.1
certifi
cffi==1.11.2
cryptography==2.1.4
enum34==1.1.6
futures==3.1.1
gitdb==0.6.4
GitPython==2.1.7
idna==2.6
ioloop==0.1a0
ipaddress==1.0.18
Jinja2==2.10
lxml==4.1.1
Mako==1.0.7
MarkupSafe==1.0
msgpack-python==0.4.8
psutil==5.4.1
pyasn1==0.4.2
pycparser==2.18
pycrypto==2.6.1
pycurl==7.43.0
PyMySQL==0.7.11
pyOpenSSL==17.5.0
python-dateutil==2.6.1
python-gnupg==0.4.1
pyyaml==3.12
pyzmq==17.0.0b3
requests==2.18.4
singledispatch==3.4.0.3
six==1.11.0
smmap==0.9.0
timelib==0.2.4
tornado==4.5.2
wheel==0.30.0
WMI==1.4.9

View file

@ -1,4 +0,0 @@
-r req_base.txt
lxml==3.6.0
pypiwin32==219

View file

@ -1,5 +0,0 @@
-r req_base.txt
lxml==3.7.3
pycrypto==2.6.1
PyYAML==3.12

View file

@ -1,34 +0,0 @@
backports-abc==0.5
backports.ssl-match-hostname==3.5.0.1
certifi
cffi==1.10.0
CherryPy==10.2.1
cryptography==1.8.1
enum34==1.1.6
futures==3.1.1
gitdb==0.6.4
GitPython==2.1.3
idna==2.5
ioloop==0.1a0
ipaddress==1.0.18
Jinja2==2.9.6
Mako==1.0.6
MarkupSafe==1.0
msgpack-python==0.4.8
psutil==5.2.2
pyasn1==0.2.3
pycparser==2.17
pycurl==7.43.0
PyMySQL==0.7.11
pyOpenSSL==17.0.0
python-dateutil==2.6.0
python-gnupg==0.4.0
pyzmq==16.0.2
requests==2.13.0
singledispatch==3.4.0.3
six==1.10.0
smmap==0.9.0
timelib==0.2.4
tornado==4.5.1
wheel==0.30.0a0
WMI==1.4.9

View file

@ -1,2 +1,2 @@
pip==9.0.1
setuptools==35.0.2
setuptools==38.2.4

View file

@ -2,7 +2,7 @@ mock
boto
boto3
moto
SaltPyLint>=v2017.3.6
SaltPyLint>=v2017.6.22
apache-libcloud
virtualenv

View file

@ -7,7 +7,6 @@ Salt package
from __future__ import absolute_import
import warnings
# future lint: disable=non-unicode-string
# All salt related deprecation warnings should be shown once each!
warnings.filterwarnings(
'once', # Show once
@ -15,19 +14,18 @@ warnings.filterwarnings(
DeprecationWarning, # This filter is for DeprecationWarnings
r'^(salt|salt\.(.*))$' # Match module(s) 'salt' and 'salt.<whatever>'
)
# future lint: enable=non-unicode-string
# While we are supporting Python2.6, hide nested with-statements warnings
warnings.filterwarnings(
u'ignore',
u'With-statements now directly support multiple context managers',
'ignore',
'With-statements now directly support multiple context managers',
DeprecationWarning
)
# Filter the backports package UserWarning about being re-imported
warnings.filterwarnings(
u'ignore',
u'^Module backports was already imported from (.*), but (.*) is being added to sys.path$',
'ignore',
'^Module backports was already imported from (.*), but (.*) is being added to sys.path$',
UserWarning
)
@ -39,7 +37,7 @@ def __define_global_system_encoding_variable__():
# and reset to None
encoding = None
if not sys.platform.startswith(u'win') and sys.stdin is not None:
if not sys.platform.startswith('win') and sys.stdin is not None:
# On linux we can rely on sys.stdin for the encoding since it
# most commonly matches the filesystem encoding. This however
# does not apply to windows
@ -65,16 +63,16 @@ def __define_global_system_encoding_variable__():
# the way back to ascii
encoding = sys.getdefaultencoding()
if not encoding:
if sys.platform.startswith(u'darwin'):
if sys.platform.startswith('darwin'):
# Mac OS X uses UTF-8
encoding = u'utf-8'
elif sys.platform.startswith(u'win'):
encoding = 'utf-8'
elif sys.platform.startswith('win'):
# Windows uses a configurable encoding; on Windows, Python uses the name “mbcs”
# to refer to whatever the currently configured encoding is.
encoding = u'mbcs'
encoding = 'mbcs'
else:
# On linux default to ascii as a last resort
encoding = u'ascii'
encoding = 'ascii'
# We can't use six.moves.builtins because these builtins get deleted sooner
# than expected. See:
@ -85,7 +83,7 @@ def __define_global_system_encoding_variable__():
import builtins # pylint: disable=import-error
# Define the detected encoding as a built-in variable for ease of use
setattr(builtins, u'__salt_system_encoding__', encoding)
setattr(builtins, '__salt_system_encoding__', encoding)
# This is now garbage collectable
del sys

View file

@ -46,7 +46,7 @@ else:
if HAS_XML:
if not hasattr(ElementTree, u'ParseError'):
if not hasattr(ElementTree, 'ParseError'):
class ParseError(Exception):
'''
older versions of ElementTree do not have ParseError
@ -56,7 +56,7 @@ if HAS_XML:
ElementTree.ParseError = ParseError
def text_(s, encoding=u'latin-1', errors=u'strict'):
def text_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``binary_type``, return
``s.decode(encoding, errors)``, otherwise return ``s``
@ -66,7 +66,7 @@ def text_(s, encoding=u'latin-1', errors=u'strict'):
return s
def bytes_(s, encoding=u'latin-1', errors=u'strict'):
def bytes_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``s``
@ -79,25 +79,25 @@ def bytes_(s, encoding=u'latin-1', errors=u'strict'):
if PY3:
def ascii_native_(s):
if isinstance(s, text_type):
s = s.encode(u'ascii')
return str(s, u'ascii', u'strict')
s = s.encode('ascii')
return str(s, 'ascii', 'strict')
else:
def ascii_native_(s):
if isinstance(s, text_type):
s = s.encode(u'ascii')
s = s.encode('ascii')
return str(s)
ascii_native_.__doc__ = '''
Python 3: If ``s`` is an instance of ``text_type``, return
``s.encode(u'ascii')``, otherwise return ``str(s, 'ascii', 'strict')``
``s.encode('ascii')``, otherwise return ``str(s, 'ascii', 'strict')``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode(u'ascii')``, otherwise return ``str(s)``
''' # future lint: disable=non-unicode-string
``s.encode('ascii')``, otherwise return ``str(s)``
'''
if PY3:
def native_(s, encoding=u'latin-1', errors=u'strict'):
def native_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``text_type``, return
``s``, otherwise return ``str(s, encoding, errors)``
@ -106,7 +106,7 @@ if PY3:
return s
return str(s, encoding, errors)
else:
def native_(s, encoding=u'latin-1', errors=u'strict'):
def native_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``str(s)``
@ -121,7 +121,7 @@ return ``str(s, encoding, errors)``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``str(s)``
''' # future lint: disable=non-unicode-string
'''
def string_io(data=None): # cStringIO can't handle unicode

File diff suppressed because it is too large Load diff

View file

@ -38,7 +38,7 @@ def tokenify(cmd, token=None):
Otherwise return cmd
'''
if token is not None:
cmd[u'token'] = token
cmd['token'] = token
return cmd
@ -51,19 +51,19 @@ class APIClient(object):
if not opts:
opts = salt.config.client_config(
os.environ.get(
u'SALT_MASTER_CONFIG',
os.path.join(syspaths.CONFIG_DIR, u'master')
'SALT_MASTER_CONFIG',
os.path.join(syspaths.CONFIG_DIR, 'master')
)
)
self.opts = opts
self.localClient = salt.client.get_local_client(self.opts[u'conf_file'])
self.localClient = salt.client.get_local_client(self.opts['conf_file'])
self.runnerClient = salt.runner.RunnerClient(self.opts)
self.wheelClient = salt.wheel.Wheel(self.opts)
self.resolver = salt.auth.Resolver(self.opts)
self.event = salt.utils.event.get_event(
u'master',
self.opts[u'sock_dir'],
self.opts[u'transport'],
'master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=listen)
@ -119,20 +119,20 @@ class APIClient(object):
'''
cmd = dict(cmd) # make copy
client = u'minion' # default to local minion client
mode = cmd.get(u'mode', u'async') # default to 'async'
client = 'minion' # default to local minion client
mode = cmd.get('mode', 'async') # default to 'async'
# check for wheel or runner prefix to fun name to use wheel or runner client
funparts = cmd.get(u'fun', u'').split(u'.')
if len(funparts) > 2 and funparts[0] in [u'wheel', u'runner']: # master
funparts = cmd.get('fun', '').split('.')
if len(funparts) > 2 and funparts[0] in ['wheel', 'runner']: # master
client = funparts[0]
cmd[u'fun'] = u'.'.join(funparts[1:]) # strip prefix
cmd['fun'] = '.'.join(funparts[1:]) # strip prefix
if not (u'token' in cmd or
(u'eauth' in cmd and u'password' in cmd and u'username' in cmd)):
raise EauthAuthenticationError(u'No authentication credentials given')
if not ('token' in cmd or
('eauth' in cmd and 'password' in cmd and 'username' in cmd)):
raise EauthAuthenticationError('No authentication credentials given')
executor = getattr(self, u'{0}_{1}'.format(client, mode))
executor = getattr(self, '{0}_{1}'.format(client, mode))
result = executor(**cmd)
return result
@ -205,9 +205,9 @@ class APIClient(object):
Adds client per the command.
'''
cmd[u'client'] = u'minion'
if len(cmd[u'module'].split(u'.')) > 2 and cmd[u'module'].split(u'.')[0] in [u'runner', u'wheel']:
cmd[u'client'] = u'master'
cmd['client'] = 'minion'
if len(cmd['module'].split('.')) > 2 and cmd['module'].split('.')[0] in ['runner', 'wheel']:
cmd['client'] = 'master'
return self._signature(cmd)
def _signature(self, cmd):
@ -217,20 +217,20 @@ class APIClient(object):
'''
result = {}
client = cmd.get(u'client', u'minion')
if client == u'minion':
cmd[u'fun'] = u'sys.argspec'
cmd[u'kwarg'] = dict(module=cmd[u'module'])
client = cmd.get('client', 'minion')
if client == 'minion':
cmd['fun'] = 'sys.argspec'
cmd['kwarg'] = dict(module=cmd['module'])
result = self.run(cmd)
elif client == u'master':
parts = cmd[u'module'].split(u'.')
elif client == 'master':
parts = cmd['module'].split('.')
client = parts[0]
module = u'.'.join(parts[1:]) # strip prefix
if client == u'wheel':
module = '.'.join(parts[1:]) # strip prefix
if client == 'wheel':
functions = self.wheelClient.functions
elif client == u'runner':
elif client == 'runner':
functions = self.runnerClient.functions
result = {u'master': salt.utils.args.argspec_report(functions, module)}
result = {'master': salt.utils.args.argspec_report(functions, module)}
return result
def create_token(self, creds):
@ -275,20 +275,20 @@ class APIClient(object):
tokenage = self.resolver.mk_token(creds)
except Exception as ex:
raise EauthAuthenticationError(
u"Authentication failed with {0}.".format(repr(ex)))
"Authentication failed with {0}.".format(repr(ex)))
if u'token' not in tokenage:
raise EauthAuthenticationError(u"Authentication failed with provided credentials.")
if 'token' not in tokenage:
raise EauthAuthenticationError("Authentication failed with provided credentials.")
# Grab eauth config for the current backend for the current user
tokenage_eauth = self.opts[u'external_auth'][tokenage[u'eauth']]
if tokenage[u'name'] in tokenage_eauth:
tokenage[u'perms'] = tokenage_eauth[tokenage[u'name']]
tokenage_eauth = self.opts['external_auth'][tokenage['eauth']]
if tokenage['name'] in tokenage_eauth:
tokenage['perms'] = tokenage_eauth[tokenage['name']]
else:
tokenage[u'perms'] = tokenage_eauth[u'*']
tokenage['perms'] = tokenage_eauth['*']
tokenage[u'user'] = tokenage[u'name']
tokenage[u'username'] = tokenage[u'name']
tokenage['user'] = tokenage['name']
tokenage['username'] = tokenage['name']
return tokenage
@ -301,11 +301,11 @@ class APIClient(object):
result = self.resolver.get_token(token)
except Exception as ex:
raise EauthAuthenticationError(
u"Token validation failed with {0}.".format(repr(ex)))
"Token validation failed with {0}.".format(repr(ex)))
return result
def get_event(self, wait=0.25, tag=u'', full=False):
def get_event(self, wait=0.25, tag='', full=False):
'''
Get a single salt event.
If no events are available, then block for up to ``wait`` seconds.
@ -323,4 +323,4 @@ class APIClient(object):
Need to convert this to a master call with appropriate authentication
'''
return self.event.fire_event(data, salt.utils.event.tagify(tag, u'wui'))
return self.event.fire_event(data, salt.utils.event.tagify(tag, 'wui'))

View file

@ -38,18 +38,18 @@ import tornado.stack_context
log = logging.getLogger(__name__)
CLIENT_INTERNAL_KEYWORDS = frozenset([
u'client',
u'cmd',
u'eauth',
u'fun',
u'kwarg',
u'match',
u'token',
u'__jid__',
u'__tag__',
u'__user__',
u'username',
u'password'
'client',
'cmd',
'eauth',
'fun',
'kwarg',
'match',
'token',
'__jid__',
'__tag__',
'__user__',
'username',
'password'
])
@ -81,9 +81,9 @@ class ClientFuncsDict(collections.MutableMapping):
raise KeyError
def wrapper(*args, **kwargs):
low = {u'fun': key,
u'args': args,
u'kwargs': kwargs,
low = {'fun': key,
'args': args,
'kwargs': kwargs,
}
pub_data = {}
# Copy kwargs keys so we can iterate over and pop the pub data
@ -91,18 +91,18 @@ class ClientFuncsDict(collections.MutableMapping):
# pull out pub_data if you have it
for kwargs_key in kwargs_keys:
if kwargs_key.startswith(u'__pub_'):
if kwargs_key.startswith('__pub_'):
pub_data[kwargs_key] = kwargs.pop(kwargs_key)
async_pub = self.client._gen_async_pub(pub_data.get(u'__pub_jid'))
async_pub = self.client._gen_async_pub(pub_data.get('__pub_jid'))
user = salt.utils.user.get_specific_user()
return self.client._proc_function(
key,
low,
user,
async_pub[u'tag'], # TODO: fix
async_pub[u'jid'], # TODO: fix
async_pub['tag'], # TODO: fix
async_pub['jid'], # TODO: fix
False, # Don't daemonize
)
return wrapper
@ -133,14 +133,14 @@ class SyncClientMixin(object):
Execute a function through the master network interface.
'''
load = kwargs
load[u'cmd'] = self.client
load['cmd'] = self.client
channel = salt.transport.Channel.factory(self.opts,
crypt=u'clear',
usage=u'master_call')
crypt='clear',
usage='master_call')
ret = channel.send(load)
if isinstance(ret, collections.Mapping):
if u'error' in ret:
salt.utils.error.raise_error(**ret[u'error'])
if 'error' in ret:
salt.utils.error.raise_error(**ret['error'])
return ret
def cmd_sync(self, low, timeout=None, full_return=False):
@ -159,19 +159,19 @@ class SyncClientMixin(object):
'eauth': 'pam',
})
'''
event = salt.utils.event.get_master_event(self.opts, self.opts[u'sock_dir'], listen=True)
event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=True)
job = self.master_call(**low)
ret_tag = salt.utils.event.tagify(u'ret', base=job[u'tag'])
ret_tag = salt.utils.event.tagify('ret', base=job['tag'])
if timeout is None:
timeout = self.opts.get(u'rest_timeout', 300)
timeout = self.opts.get('rest_timeout', 300)
ret = event.get_event(tag=ret_tag, full=True, wait=timeout, auto_reconnect=True)
if ret is None:
raise salt.exceptions.SaltClientTimeout(
u"RunnerClient job '{0}' timed out".format(job[u'jid']),
jid=job[u'jid'])
"RunnerClient job '{0}' timed out".format(job['jid']),
jid=job['jid'])
return ret if full_return else ret[u'data'][u'return']
return ret if full_return else ret['data']['return']
def cmd(self, fun, arg=None, pub_data=None, kwarg=None, print_event=True, full_return=False):
'''
@ -206,40 +206,40 @@ class SyncClientMixin(object):
arg = tuple()
if not isinstance(arg, list) and not isinstance(arg, tuple):
raise salt.exceptions.SaltInvocationError(
u'arg must be formatted as a list/tuple'
'arg must be formatted as a list/tuple'
)
if pub_data is None:
pub_data = {}
if not isinstance(pub_data, dict):
raise salt.exceptions.SaltInvocationError(
u'pub_data must be formatted as a dictionary'
'pub_data must be formatted as a dictionary'
)
if kwarg is None:
kwarg = {}
if not isinstance(kwarg, dict):
raise salt.exceptions.SaltInvocationError(
u'kwarg must be formatted as a dictionary'
'kwarg must be formatted as a dictionary'
)
arglist = salt.utils.args.parse_input(
arg,
no_parse=self.opts.get(u'no_parse', []))
no_parse=self.opts.get('no_parse', []))
# if you were passed kwarg, add it to arglist
if kwarg:
kwarg[u'__kwarg__'] = True
kwarg['__kwarg__'] = True
arglist.append(kwarg)
args, kwargs = salt.minion.load_args_and_kwargs(
self.functions[fun], arglist, pub_data
)
low = {u'fun': fun,
u'arg': args,
u'kwarg': kwargs}
low = {'fun': fun,
'arg': args,
'kwarg': kwargs}
return self.low(fun, low, print_event=print_event, full_return=full_return)
@property
def mminion(self):
if not hasattr(self, u'_mminion'):
if not hasattr(self, '_mminion'):
self._mminion = salt.minion.MasterMinion(self.opts, states=False, rend=False)
return self._mminion
@ -248,15 +248,15 @@ class SyncClientMixin(object):
Check for deprecated usage and allow until Salt Oxygen.
'''
msg = []
if u'args' in low:
msg.append(u'call with arg instead')
low[u'arg'] = low.pop(u'args')
if u'kwargs' in low:
msg.append(u'call with kwarg instead')
low[u'kwarg'] = low.pop(u'kwargs')
if 'args' in low:
msg.append('call with arg instead')
low['arg'] = low.pop('args')
if 'kwargs' in low:
msg.append('call with kwarg instead')
low['kwarg'] = low.pop('kwargs')
if msg:
salt.utils.versions.warn_until(u'Oxygen', u' '.join(msg))
salt.utils.versions.warn_until('Oxygen', ' '.join(msg))
return self._low(fun, low, print_event=print_event, full_return=full_return)
@ -270,13 +270,13 @@ class SyncClientMixin(object):
class_name = self.__class__.__name__.lower()
except AttributeError:
log.warning(
u'Unable to determine class name',
'Unable to determine class name',
exc_info_on_loglevel=logging.DEBUG
)
return True
try:
return self.opts[u'{0}_returns'.format(class_name)]
return self.opts['{0}_returns'.format(class_name)]
except KeyError:
# No such option, assume this isn't one we care about gating and
# just return True.
@ -299,24 +299,24 @@ class SyncClientMixin(object):
# this is not to clutter the output with the module loading
# if we have a high debug level.
self.mminion # pylint: disable=W0104
jid = low.get(u'__jid__', salt.utils.jid.gen_jid(self.opts))
tag = low.get(u'__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix))
jid = low.get('__jid__', salt.utils.jid.gen_jid(self.opts))
tag = low.get('__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix))
data = {u'fun': u'{0}.{1}'.format(self.client, fun),
u'jid': jid,
u'user': low.get(u'__user__', u'UNKNOWN'),
data = {'fun': '{0}.{1}'.format(self.client, fun),
'jid': jid,
'user': low.get('__user__', 'UNKNOWN'),
}
event = salt.utils.event.get_event(
u'master',
self.opts[u'sock_dir'],
self.opts[u'transport'],
'master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
if print_event:
print_func = self.print_async_event \
if hasattr(self, u'print_async_event') \
if hasattr(self, 'print_async_event') \
else None
else:
# Suppress printing of return event (this keeps us from printing
@ -331,12 +331,12 @@ class SyncClientMixin(object):
# TODO: document these, and test that they exist
# TODO: Other things to inject??
func_globals = {u'__jid__': jid,
u'__user__': data[u'user'],
u'__tag__': tag,
func_globals = {'__jid__': jid,
'__user__': data['user'],
'__tag__': tag,
# weak ref to avoid the Exception in interpreter
# teardown of event
u'__jid_event__': weakref.proxy(namespaced_event),
'__jid_event__': weakref.proxy(namespaced_event),
}
try:
@ -348,9 +348,9 @@ class SyncClientMixin(object):
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if u'.' not in mod_name:
if '.' not in mod_name:
continue
mod, _ = mod_name.split(u'.', 1)
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
@ -366,81 +366,81 @@ class SyncClientMixin(object):
# we make the transition we will load "kwargs" using format_call if
# there are no kwargs in the low object passed in.
if u'arg' in low and u'kwarg' in low:
args = low[u'arg']
kwargs = low[u'kwarg']
if 'arg' in low and 'kwarg' in low:
args = low['arg']
kwargs = low['kwarg']
else:
f_call = salt.utils.args.format_call(
self.functions[fun],
low,
expected_extra_kws=CLIENT_INTERNAL_KEYWORDS
)
args = f_call.get(u'args', ())
kwargs = f_call.get(u'kwargs', {})
args = f_call.get('args', ())
kwargs = f_call.get('kwargs', {})
# Update the event data with loaded args and kwargs
data[u'fun_args'] = list(args) + ([kwargs] if kwargs else [])
func_globals[u'__jid_event__'].fire_event(data, u'new')
data['fun_args'] = list(args) + ([kwargs] if kwargs else [])
func_globals['__jid_event__'].fire_event(data, 'new')
# Initialize a context for executing the method.
with tornado.stack_context.StackContext(self.functions.context_dict.clone):
data[u'return'] = self.functions[fun](*args, **kwargs)
data['return'] = self.functions[fun](*args, **kwargs)
try:
data[u'success'] = self.context.get(u'retcode', 0) == 0
data['success'] = self.context.get('retcode', 0) == 0
except AttributeError:
# Assume a True result if no context attribute
data[u'success'] = True
if isinstance(data[u'return'], dict) and u'data' in data[u'return']:
data['success'] = True
if isinstance(data['return'], dict) and 'data' in data['return']:
# some functions can return boolean values
data[u'success'] = salt.utils.state.check_result(data[u'return'][u'data'])
data['success'] = salt.utils.state.check_result(data['return']['data'])
except (Exception, SystemExit) as ex:
if isinstance(ex, salt.exceptions.NotImplemented):
data[u'return'] = str(ex)
data['return'] = str(ex)
else:
data[u'return'] = u'Exception occurred in {0} {1}: {2}'.format(
data['return'] = 'Exception occurred in {0} {1}: {2}'.format(
self.client,
fun,
traceback.format_exc(),
)
data[u'success'] = False
data['success'] = False
if self.store_job:
try:
salt.utils.job.store_job(
self.opts,
{
u'id': self.opts[u'id'],
u'tgt': self.opts[u'id'],
u'jid': data[u'jid'],
u'return': data,
'id': self.opts['id'],
'tgt': self.opts['id'],
'jid': data['jid'],
'return': data,
},
event=None,
mminion=self.mminion,
)
except salt.exceptions.SaltCacheError:
log.error(u'Could not store job cache info. '
u'Job details for this run may be unavailable.')
log.error('Could not store job cache info. '
'Job details for this run may be unavailable.')
# Outputters _can_ mutate data so write to the job cache first!
namespaced_event.fire_event(data, u'ret')
namespaced_event.fire_event(data, 'ret')
# if we fired an event, make sure to delete the event object.
# This will ensure that we call destroy, which will do the 0MQ linger
log.info(u'Runner completed: %s', data[u'jid'])
log.info('Runner completed: %s', data['jid'])
del event
del namespaced_event
return data if full_return else data[u'return']
return data if full_return else data['return']
def get_docs(self, arg=None):
'''
Return a dictionary of functions and the inline documentation for each
'''
if arg:
if u'*' in arg:
if '*' in arg:
target_mod = arg
_use_fnmatch = True
else:
target_mod = arg + u'.' if not arg.endswith(u'.') else arg
target_mod = arg + '.' if not arg.endswith('.') else arg
_use_fnmatch = False
if _use_fnmatch:
docs = [(fun, self.functions[fun].__doc__)
@ -478,9 +478,9 @@ class AsyncClientMixin(object):
salt.log.setup.setup_multiprocessing_logging()
# pack a few things into low
low[u'__jid__'] = jid
low[u'__user__'] = user
low[u'__tag__'] = tag
low['__jid__'] = jid
low['__user__'] = user
low['__tag__'] = tag
return self.low(fun, low, full_return=False)
@ -508,9 +508,9 @@ class AsyncClientMixin(object):
if jid is None:
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix=self.tag_prefix)
return {u'tag': tag, u'jid': jid}
return {'tag': tag, 'jid': jid}
def async(self, fun, low, user=u'UNKNOWN', pub=None):
def async(self, fun, low, user='UNKNOWN', pub=None):
'''
Execute the function in a multiprocess and return the event tag to use
to watch for the return
@ -519,7 +519,7 @@ class AsyncClientMixin(object):
proc = salt.utils.process.SignalHandlingMultiprocessingProcess(
target=self._proc_function,
args=(fun, low, user, async_pub[u'tag'], async_pub[u'jid']))
args=(fun, low, user, async_pub['tag'], async_pub['jid']))
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
@ -535,29 +535,29 @@ class AsyncClientMixin(object):
return
# if we are "quiet", don't print
if self.opts.get(u'quiet', False):
if self.opts.get('quiet', False):
return
# some suffixes we don't want to print
if suffix in (u'new',):
if suffix in ('new',):
return
try:
outputter = self.opts.get(u'output', event.get(u'outputter', None) or event.get(u'return').get(u'outputter'))
outputter = self.opts.get('output', event.get('outputter', None) or event.get('return').get('outputter'))
except AttributeError:
outputter = None
# if this is a ret, we have our own set of rules
if suffix == u'ret':
if suffix == 'ret':
# Check if outputter was passed in the return data. If this is the case,
# then the return data will be a dict two keys: 'data' and 'outputter'
if isinstance(event.get(u'return'), dict) \
and set(event[u'return']) == set((u'data', u'outputter')):
event_data = event[u'return'][u'data']
outputter = event[u'return'][u'outputter']
if isinstance(event.get('return'), dict) \
and set(event['return']) == set(('data', 'outputter')):
event_data = event['return']['data']
outputter = event['return']['outputter']
else:
event_data = event[u'return']
event_data = event['return']
else:
event_data = {u'suffix': suffix, u'event': event}
event_data = {'suffix': suffix, 'event': event}
salt.output.display_output(event_data, outputter, self.opts)

View file

@ -20,7 +20,7 @@ class NetapiClient(object):
'''
def __init__(self, opts):
self.opts = opts
self.process_manager = salt.utils.process.ProcessManager(name=u'NetAPIProcessManager')
self.process_manager = salt.utils.process.ProcessManager(name='NetAPIProcessManager')
self.netapi = salt.loader.netapi(self.opts)
def run(self):
@ -28,11 +28,11 @@ class NetapiClient(object):
Load and start all available api modules
'''
if not len(self.netapi):
log.error(u"Did not find any netapi configurations, nothing to start")
log.error("Did not find any netapi configurations, nothing to start")
for fun in self.netapi:
if fun.endswith(u'.start'):
log.info(u'Starting %s netapi module', fun)
if fun.endswith('.start'):
log.info('Starting %s netapi module', fun)
self.process_manager.add_process(self.netapi[fun])
# Install the SIGINT/SIGTERM handlers if not done so far

View file

@ -32,7 +32,7 @@ class LocalClient(salt.client.LocalClient):
The RAET LocalClient
'''
def __init__(self,
c_path=os.path.join(syspaths.CONFIG_DIR, u'master'),
c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),
mopts=None):
salt.client.LocalClient.__init__(self, c_path, mopts)
@ -41,22 +41,22 @@ class LocalClient(salt.client.LocalClient):
tgt,
fun,
arg=(),
tgt_type=u'glob',
ret=u'',
jid=u'',
tgt_type='glob',
ret='',
jid='',
timeout=5,
**kwargs):
'''
Publish the command!
'''
if u'expr_form' in kwargs:
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
u'Fluorine',
u'The target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop(u'expr_form')
tgt_type = kwargs.pop('expr_form')
payload_kwargs = self._prep_pub(
tgt,
@ -68,21 +68,21 @@ class LocalClient(salt.client.LocalClient):
timeout=timeout,
**kwargs)
kind = self.opts[u'__role']
kind = self.opts['__role']
if kind not in kinds.APPL_KINDS:
emsg = (u"Invalid application kind = '{0}' for Raet LocalClient.".format(kind))
log.error(emsg + u"\n")
emsg = ("Invalid application kind = '{0}' for Raet LocalClient.".format(kind))
log.error(emsg + "\n")
raise ValueError(emsg)
if kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.master],
kinds.APPL_KIND_NAMES[kinds.applKinds.syndic]]:
lanename = u'master'
lanename = 'master'
else:
emsg = (u"Unsupported application kind '{0}' for Raet LocalClient.".format(kind))
log.error(emsg + u'\n')
emsg = ("Unsupported application kind '{0}' for Raet LocalClient.".format(kind))
log.error(emsg + '\n')
raise ValueError(emsg)
sockdirpath = self.opts[u'sock_dir']
name = u'client' + nacling.uuid(size=18)
sockdirpath = self.opts['sock_dir']
name = 'client' + nacling.uuid(size=18)
stack = LaneStack(
name=name,
lanename=lanename,
@ -91,12 +91,12 @@ class LocalClient(salt.client.LocalClient):
manor_yard = RemoteYard(
stack=stack,
lanename=lanename,
name=u'manor',
name='manor',
dirpath=sockdirpath)
stack.addRemote(manor_yard)
route = {u'dst': (None, manor_yard.name, u'local_cmd'),
u'src': (None, stack.local.name, None)}
msg = {u'route': route, u'load': payload_kwargs}
route = {'dst': (None, manor_yard.name, 'local_cmd'),
'src': (None, stack.local.name, None)}
msg = {'route': route, 'load': payload_kwargs}
stack.transmit(msg)
stack.serviceAll()
while True:
@ -104,9 +104,9 @@ class LocalClient(salt.client.LocalClient):
stack.serviceAll()
while stack.rxMsgs:
msg, sender = stack.rxMsgs.popleft()
ret = msg.get(u'return', {})
if u'ret' in ret:
ret = msg.get('return', {})
if 'ret' in ret:
stack.server.close()
return ret[u'ret']
return ret['ret']
stack.server.close()
return ret

File diff suppressed because it is too large Load diff

View file

@ -23,7 +23,7 @@ class SSHClient(object):
.. versionadded:: 2015.5.0
'''
def __init__(self,
c_path=os.path.join(syspaths.CONFIG_DIR, u'master'),
c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),
mopts=None,
disable_custom_roster=False):
if mopts:
@ -31,14 +31,14 @@ class SSHClient(object):
else:
if os.path.isdir(c_path):
log.warning(
u'%s expects a file path not a directory path(%s) to '
u'its \'c_path\' keyword argument',
'%s expects a file path not a directory path(%s) to '
'its \'c_path\' keyword argument',
self.__class__.__name__, c_path
)
self.opts = salt.config.client_config(c_path)
# Salt API should never offer a custom roster!
self.opts[u'__disable_custom_roster'] = disable_custom_roster
self.opts['__disable_custom_roster'] = disable_custom_roster
def _prep_ssh(
self,
@ -46,30 +46,30 @@ class SSHClient(object):
fun,
arg=(),
timeout=None,
tgt_type=u'glob',
tgt_type='glob',
kwarg=None,
**kwargs):
'''
Prepare the arguments
'''
if u'expr_form' in kwargs:
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
u'Fluorine',
u'The target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop(u'expr_form')
tgt_type = kwargs.pop('expr_form')
opts = copy.deepcopy(self.opts)
opts.update(kwargs)
if timeout:
opts[u'timeout'] = timeout
opts['timeout'] = timeout
arg = salt.utils.args.condition_input(arg, kwarg)
opts[u'argv'] = [fun] + arg
opts[u'selected_target_option'] = tgt_type
opts[u'tgt'] = tgt
opts[u'arg'] = arg
opts['argv'] = [fun] + arg
opts['selected_target_option'] = tgt_type
opts['tgt'] = tgt
opts['arg'] = arg
return salt.client.ssh.SSH(opts)
def cmd_iter(
@ -78,8 +78,8 @@ class SSHClient(object):
fun,
arg=(),
timeout=None,
tgt_type=u'glob',
ret=u'',
tgt_type='glob',
ret='',
kwarg=None,
**kwargs):
'''
@ -88,14 +88,14 @@ class SSHClient(object):
.. versionadded:: 2015.5.0
'''
if u'expr_form' in kwargs:
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
u'Fluorine',
u'The target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop(u'expr_form')
tgt_type = kwargs.pop('expr_form')
ssh = self._prep_ssh(
tgt,
@ -105,7 +105,7 @@ class SSHClient(object):
tgt_type,
kwarg,
**kwargs)
for ret in ssh.run_iter(jid=kwargs.get(u'jid', None)):
for ret in ssh.run_iter(jid=kwargs.get('jid', None)):
yield ret
def cmd(self,
@ -113,7 +113,7 @@ class SSHClient(object):
fun,
arg=(),
timeout=None,
tgt_type=u'glob',
tgt_type='glob',
kwarg=None,
**kwargs):
'''
@ -122,14 +122,14 @@ class SSHClient(object):
.. versionadded:: 2015.5.0
'''
if u'expr_form' in kwargs:
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
u'Fluorine',
u'The target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop(u'expr_form')
tgt_type = kwargs.pop('expr_form')
ssh = self._prep_ssh(
tgt,
@ -140,7 +140,7 @@ class SSHClient(object):
kwarg,
**kwargs)
final = {}
for ret in ssh.run_iter(jid=kwargs.get(u'jid', None)):
for ret in ssh.run_iter(jid=kwargs.get('jid', None)):
final.update(ret)
return final
@ -166,16 +166,16 @@ class SSHClient(object):
kwargs = copy.deepcopy(low)
for ignore in [u'tgt', u'fun', u'arg', u'timeout', u'tgt_type', u'kwarg']:
for ignore in ['tgt', 'fun', 'arg', 'timeout', 'tgt_type', 'kwarg']:
if ignore in kwargs:
del kwargs[ignore]
return self.cmd(low[u'tgt'],
low[u'fun'],
low.get(u'arg', []),
low.get(u'timeout'),
low.get(u'tgt_type'),
low.get(u'kwarg'),
return self.cmd(low['tgt'],
low['fun'],
low.get('arg', []),
low.get('timeout'),
low.get('tgt_type'),
low.get('kwarg'),
**kwargs)
def cmd_async(self, low, timeout=None):
@ -204,8 +204,8 @@ class SSHClient(object):
fun,
arg=(),
timeout=None,
tgt_type=u'glob',
ret=u'',
tgt_type='glob',
ret='',
kwarg=None,
sub=3,
**kwargs):
@ -226,24 +226,24 @@ class SSHClient(object):
.. versionadded:: 2017.7.0
'''
if u'expr_form' in kwargs:
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
u'Fluorine',
u'The target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop(u'expr_form')
tgt_type = kwargs.pop('expr_form')
minion_ret = self.cmd(tgt,
u'sys.list_functions',
'sys.list_functions',
tgt_type=tgt_type,
**kwargs)
minions = list(minion_ret)
random.shuffle(minions)
f_tgt = []
for minion in minions:
if fun in minion_ret[minion][u'return']:
if fun in minion_ret[minion]['return']:
f_tgt.append(minion)
if len(f_tgt) >= sub:
break
return self.cmd_iter(f_tgt, fun, arg, timeout, tgt_type=u'list', ret=ret, kwarg=kwarg, **kwargs)
return self.cmd_iter(f_tgt, fun, arg, timeout, tgt_type='list', ret=ret, kwarg=kwarg, **kwargs)

View file

@ -20,12 +20,12 @@ import salt.utils.vt
log = logging.getLogger(__name__)
SSH_PASSWORD_PROMPT_RE = re.compile(r'(?:.*)[Pp]assword(?: for .*)?:', re.M) # future lint: disable=non-unicode-string
KEY_VALID_RE = re.compile(r'.*\(yes\/no\).*') # future lint: disable=non-unicode-string
SSH_PASSWORD_PROMPT_RE = re.compile(r'(?:.*)[Pp]assword(?: for .*)?:', re.M)
KEY_VALID_RE = re.compile(r'.*\(yes\/no\).*')
# Keep these in sync with ./__init__.py
RSTR = u'_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878'
RSTR_RE = re.compile(r'(?:^|\r?\n)' + RSTR + r'(?:\r?\n|$)') # future lint: disable=non-unicode-string
RSTR = '_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878'
RSTR_RE = re.compile(r'(?:^|\r?\n)' + RSTR + r'(?:\r?\n|$)')
class NoPasswdError(Exception):
@ -40,7 +40,7 @@ def gen_key(path):
'''
Generate a key for use with salt-ssh
'''
cmd = u'ssh-keygen -P "" -f {0} -t rsa -q'.format(path)
cmd = 'ssh-keygen -P "" -f {0} -t rsa -q'.format(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
subprocess.call(cmd, shell=True)
@ -50,12 +50,12 @@ def gen_shell(opts, **kwargs):
'''
Return the correct shell interface for the target system
'''
if kwargs[u'winrm']:
if kwargs['winrm']:
try:
import saltwinshell
shell = saltwinshell.Shell(opts, **kwargs)
except ImportError:
log.error(u'The saltwinshell library is not available')
log.error('The saltwinshell library is not available')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
shell = Shell(opts, **kwargs)
@ -85,7 +85,7 @@ class Shell(object):
ssh_options=None):
self.opts = opts
# ssh <ipv6>, but scp [<ipv6]:/path
self.host = host.strip(u'[]')
self.host = host.strip('[]')
self.user = user
self.port = port
self.passwd = str(passwd) if passwd else passwd
@ -96,18 +96,18 @@ class Shell(object):
self.mods = mods
self.identities_only = identities_only
self.remote_port_forwards = remote_port_forwards
self.ssh_options = u'' if ssh_options is None else ssh_options
self.ssh_options = '' if ssh_options is None else ssh_options
def get_error(self, errstr):
'''
Parse out an error and return a targeted error string
'''
for line in errstr.split(u'\n'):
if line.startswith(u'ssh:'):
for line in errstr.split('\n'):
if line.startswith('ssh:'):
return line
if line.startswith(u'Pseudo-terminal'):
if line.startswith('Pseudo-terminal'):
continue
if u'to the list of known hosts.' in line:
if 'to the list of known hosts.' in line:
continue
return line
return errstr
@ -117,36 +117,36 @@ class Shell(object):
Return options for the ssh command base for Salt to call
'''
options = [
u'KbdInteractiveAuthentication=no',
'KbdInteractiveAuthentication=no',
]
if self.passwd:
options.append(u'PasswordAuthentication=yes')
options.append('PasswordAuthentication=yes')
else:
options.append(u'PasswordAuthentication=no')
if self.opts.get(u'_ssh_version', (0,)) > (4, 9):
options.append(u'GSSAPIAuthentication=no')
options.append(u'ConnectTimeout={0}'.format(self.timeout))
if self.opts.get(u'ignore_host_keys'):
options.append(u'StrictHostKeyChecking=no')
if self.opts.get(u'no_host_keys'):
options.extend([u'StrictHostKeyChecking=no',
u'UserKnownHostsFile=/dev/null'])
known_hosts = self.opts.get(u'known_hosts_file')
options.append('PasswordAuthentication=no')
if self.opts.get('_ssh_version', (0,)) > (4, 9):
options.append('GSSAPIAuthentication=no')
options.append('ConnectTimeout={0}'.format(self.timeout))
if self.opts.get('ignore_host_keys'):
options.append('StrictHostKeyChecking=no')
if self.opts.get('no_host_keys'):
options.extend(['StrictHostKeyChecking=no',
'UserKnownHostsFile=/dev/null'])
known_hosts = self.opts.get('known_hosts_file')
if known_hosts and os.path.isfile(known_hosts):
options.append(u'UserKnownHostsFile={0}'.format(known_hosts))
options.append('UserKnownHostsFile={0}'.format(known_hosts))
if self.port:
options.append(u'Port={0}'.format(self.port))
options.append('Port={0}'.format(self.port))
if self.priv:
options.append(u'IdentityFile={0}'.format(self.priv))
options.append('IdentityFile={0}'.format(self.priv))
if self.user:
options.append(u'User={0}'.format(self.user))
options.append('User={0}'.format(self.user))
if self.identities_only:
options.append(u'IdentitiesOnly=yes')
options.append('IdentitiesOnly=yes')
ret = []
for option in options:
ret.append(u'-o {0} '.format(option))
return u''.join(ret)
ret.append('-o {0} '.format(option))
return ''.join(ret)
def _passwd_opts(self):
'''
@ -155,41 +155,41 @@ class Shell(object):
# TODO ControlMaster does not work without ControlPath
# user could take advantage of it if they set ControlPath in their
# ssh config. Also, ControlPersist not widely available.
options = [u'ControlMaster=auto',
u'StrictHostKeyChecking=no',
options = ['ControlMaster=auto',
'StrictHostKeyChecking=no',
]
if self.opts[u'_ssh_version'] > (4, 9):
options.append(u'GSSAPIAuthentication=no')
options.append(u'ConnectTimeout={0}'.format(self.timeout))
if self.opts.get(u'ignore_host_keys'):
options.append(u'StrictHostKeyChecking=no')
if self.opts.get(u'no_host_keys'):
options.extend([u'StrictHostKeyChecking=no',
u'UserKnownHostsFile=/dev/null'])
if self.opts['_ssh_version'] > (4, 9):
options.append('GSSAPIAuthentication=no')
options.append('ConnectTimeout={0}'.format(self.timeout))
if self.opts.get('ignore_host_keys'):
options.append('StrictHostKeyChecking=no')
if self.opts.get('no_host_keys'):
options.extend(['StrictHostKeyChecking=no',
'UserKnownHostsFile=/dev/null'])
if self.passwd:
options.extend([u'PasswordAuthentication=yes',
u'PubkeyAuthentication=yes'])
options.extend(['PasswordAuthentication=yes',
'PubkeyAuthentication=yes'])
else:
options.extend([u'PasswordAuthentication=no',
u'PubkeyAuthentication=yes',
u'KbdInteractiveAuthentication=no',
u'ChallengeResponseAuthentication=no',
u'BatchMode=yes'])
options.extend(['PasswordAuthentication=no',
'PubkeyAuthentication=yes',
'KbdInteractiveAuthentication=no',
'ChallengeResponseAuthentication=no',
'BatchMode=yes'])
if self.port:
options.append(u'Port={0}'.format(self.port))
options.append('Port={0}'.format(self.port))
if self.user:
options.append(u'User={0}'.format(self.user))
options.append('User={0}'.format(self.user))
if self.identities_only:
options.append(u'IdentitiesOnly=yes')
options.append('IdentitiesOnly=yes')
ret = []
for option in options:
ret.append(u'-o {0} '.format(option))
return u''.join(ret)
ret.append('-o {0} '.format(option))
return ''.join(ret)
def _ssh_opts(self):
return u' '.join([u'-o {0}'.format(opt)
return ' '.join(['-o {0}'.format(opt)
for opt in self.ssh_options])
def _copy_id_str_old(self):
@ -199,9 +199,9 @@ class Shell(object):
if self.passwd:
# Using single quotes prevents shell expansion and
# passwords containing '$'
return u"{0} {1} '{2} -p {3} {4} {5}@{6}'".format(
u'ssh-copy-id',
u'-i {0}.pub'.format(self.priv),
return "{0} {1} '{2} -p {3} {4} {5}@{6}'".format(
'ssh-copy-id',
'-i {0}.pub'.format(self.priv),
self._passwd_opts(),
self.port,
self._ssh_opts(),
@ -217,9 +217,9 @@ class Shell(object):
if self.passwd:
# Using single quotes prevents shell expansion and
# passwords containing '$'
return u"{0} {1} {2} -p {3} {4} {5}@{6}".format(
u'ssh-copy-id',
u'-i {0}.pub'.format(self.priv),
return "{0} {1} {2} -p {3} {4} {5}@{6}".format(
'ssh-copy-id',
'-i {0}.pub'.format(self.priv),
self._passwd_opts(),
self.port,
self._ssh_opts(),
@ -232,11 +232,11 @@ class Shell(object):
Execute ssh-copy-id to plant the id file on the target
'''
stdout, stderr, retcode = self._run_cmd(self._copy_id_str_old())
if salt.defaults.exitcodes.EX_OK != retcode and u'Usage' in stderr:
if salt.defaults.exitcodes.EX_OK != retcode and 'Usage' in stderr:
stdout, stderr, retcode = self._run_cmd(self._copy_id_str_new())
return stdout, stderr, retcode
def _cmd_str(self, cmd, ssh=u'ssh'):
def _cmd_str(self, cmd, ssh='ssh'):
'''
Return the cmd string to execute
'''
@ -245,21 +245,21 @@ class Shell(object):
# need to deliver the SHIM to the remote host and execute it there
command = [ssh]
if ssh != u'scp':
if ssh != 'scp':
command.append(self.host)
if self.tty and ssh == u'ssh':
command.append(u'-t -t')
if self.tty and ssh == 'ssh':
command.append('-t -t')
if self.passwd or self.priv:
command.append(self.priv and self._key_opts() or self._passwd_opts())
if ssh != u'scp' and self.remote_port_forwards:
command.append(u' '.join([u'-R {0}'.format(item)
for item in self.remote_port_forwards.split(u',')]))
if ssh != 'scp' and self.remote_port_forwards:
command.append(' '.join(['-R {0}'.format(item)
for item in self.remote_port_forwards.split(',')]))
if self.ssh_options:
command.append(self._ssh_opts())
command.append(cmd)
return u' '.join(command)
return ' '.join(command)
def _old_run_cmd(self, cmd):
'''
@ -276,7 +276,7 @@ class Shell(object):
data = proc.communicate()
return data[0], data[1], proc.returncode
except Exception:
return (u'local', u'Unknown Error', None)
return ('local', 'Unknown Error', None)
def _run_nb_cmd(self, cmd):
'''
@ -300,7 +300,7 @@ class Shell(object):
err = self.get_error(err)
yield out, err, rcode
except Exception:
yield (u'', u'Unknown Error', None)
yield ('', 'Unknown Error', None)
def exec_nb_cmd(self, cmd):
'''
@ -311,9 +311,9 @@ class Shell(object):
rcode = None
cmd = self._cmd_str(cmd)
logmsg = u'Executing non-blocking command: {0}'.format(cmd)
logmsg = 'Executing non-blocking command: {0}'.format(cmd)
if self.passwd:
logmsg = logmsg.replace(self.passwd, (u'*' * 6))
logmsg = logmsg.replace(self.passwd, ('*' * 6))
log.debug(logmsg)
for out, err, rcode in self._run_nb_cmd(cmd):
@ -322,7 +322,7 @@ class Shell(object):
if err is not None:
r_err.append(err)
yield None, None, None
yield u''.join(r_out), u''.join(r_err), rcode
yield ''.join(r_out), ''.join(r_err), rcode
def exec_cmd(self, cmd):
'''
@ -330,11 +330,11 @@ class Shell(object):
'''
cmd = self._cmd_str(cmd)
logmsg = u'Executing command: {0}'.format(cmd)
logmsg = 'Executing command: {0}'.format(cmd)
if self.passwd:
logmsg = logmsg.replace(self.passwd, (u'*' * 6))
if u'decode("base64")' in logmsg or u'base64.b64decode(' in logmsg:
log.debug(u'Executed SHIM command. Command logged to TRACE')
logmsg = logmsg.replace(self.passwd, ('*' * 6))
if 'decode("base64")' in logmsg or 'base64.b64decode(' in logmsg:
log.debug('Executed SHIM command. Command logged to TRACE')
log.trace(logmsg)
else:
log.debug(logmsg)
@ -347,19 +347,19 @@ class Shell(object):
scp a file or files to a remote system
'''
if makedirs:
self.exec_cmd(u'mkdir -p {0}'.format(os.path.dirname(remote)))
self.exec_cmd('mkdir -p {0}'.format(os.path.dirname(remote)))
# scp needs [<ipv6}
host = self.host
if u':' in host:
host = u'[{0}]'.format(host)
if ':' in host:
host = '[{0}]'.format(host)
cmd = u'{0} {1}:{2}'.format(local, host, remote)
cmd = self._cmd_str(cmd, ssh=u'scp')
cmd = '{0} {1}:{2}'.format(local, host, remote)
cmd = self._cmd_str(cmd, ssh='scp')
logmsg = u'Executing command: {0}'.format(cmd)
logmsg = 'Executing command: {0}'.format(cmd)
if self.passwd:
logmsg = logmsg.replace(self.passwd, (u'*' * 6))
logmsg = logmsg.replace(self.passwd, ('*' * 6))
log.debug(logmsg)
return self._run_cmd(cmd)
@ -373,16 +373,16 @@ class Shell(object):
cmd,
shell=True,
log_stdout=True,
log_stdout_level=u'trace',
log_stdout_level='trace',
log_stderr=True,
log_stderr_level=u'trace',
log_stderr_level='trace',
stream_stdout=False,
stream_stderr=False)
sent_passwd = 0
send_password = True
ret_stdout = u''
ret_stderr = u''
old_stdout = u''
ret_stdout = ''
ret_stderr = ''
old_stdout = ''
try:
while term.has_unread_data:
@ -399,26 +399,26 @@ class Shell(object):
send_password = False
if buff and SSH_PASSWORD_PROMPT_RE.search(buff) and send_password:
if not self.passwd:
return u'', u'Permission denied, no authentication information', 254
return '', 'Permission denied, no authentication information', 254
if sent_passwd < passwd_retries:
term.sendline(self.passwd)
sent_passwd += 1
continue
else:
# asking for a password, and we can't seem to send it
return u'', u'Password authentication failed', 254
return '', 'Password authentication failed', 254
elif buff and KEY_VALID_RE.search(buff):
if key_accept:
term.sendline(u'yes')
term.sendline('yes')
continue
else:
term.sendline(u'no')
ret_stdout = (u'The host key needs to be accepted, to '
u'auto accept run salt-ssh with the -i '
u'flag:\n{0}').format(stdout)
return ret_stdout, u'', 254
elif buff and buff.endswith(u'_||ext_mods||_'):
mods_raw = json.dumps(self.mods, separators=(u',', u':')) + u'|_E|0|'
term.sendline('no')
ret_stdout = ('The host key needs to be accepted, to '
'auto accept run salt-ssh with the -i '
'flag:\n{0}').format(stdout)
return ret_stdout, '', 254
elif buff and buff.endswith('_||ext_mods||_'):
mods_raw = json.dumps(self.mods, separators=(',', ':')) + '|_E|0|'
term.sendline(mods_raw)
if stdout:
old_stdout = stdout

View file

@ -18,8 +18,8 @@ import os
import stat
import subprocess
THIN_ARCHIVE = u'salt-thin.tgz'
EXT_ARCHIVE = u'salt-ext_mods.tgz'
THIN_ARCHIVE = 'salt-thin.tgz'
EXT_ARCHIVE = 'salt-ext_mods.tgz'
# Keep these in sync with salt/defaults/exitcodes.py
EX_THIN_DEPLOY = 11
@ -54,7 +54,7 @@ def get_system_encoding():
# and reset to None
encoding = None
if not sys.platform.startswith(u'win') and sys.stdin is not None:
if not sys.platform.startswith('win') and sys.stdin is not None:
# On linux we can rely on sys.stdin for the encoding since it
# most commonly matches the filesystem encoding. This however
# does not apply to windows
@ -80,16 +80,16 @@ def get_system_encoding():
# the way back to ascii
encoding = sys.getdefaultencoding()
if not encoding:
if sys.platform.startswith(u'darwin'):
if sys.platform.startswith('darwin'):
# Mac OS X uses UTF-8
encoding = u'utf-8'
elif sys.platform.startswith(u'win'):
encoding = 'utf-8'
elif sys.platform.startswith('win'):
# Windows uses a configurable encoding; on Windows, Python uses the name "mbcs"
# to refer to whatever the currently configured encoding is.
encoding = u'mbcs'
encoding = 'mbcs'
else:
# On linux default to ascii as a last resort
encoding = u'ascii'
encoding = 'ascii'
return encoding
@ -97,7 +97,7 @@ def is_windows():
'''
Simple function to return if a host is Windows or not
'''
return sys.platform.startswith(u'win')
return sys.platform.startswith('win')
def need_deployment():
@ -121,35 +121,35 @@ def need_deployment():
# Attack detected
need_deployment()
# If SUDOing then also give the super user group write permissions
sudo_gid = os.environ.get(u'SUDO_GID')
sudo_gid = os.environ.get('SUDO_GID')
if sudo_gid:
try:
os.chown(OPTIONS.saltdir, -1, int(sudo_gid))
stt = os.stat(OPTIONS.saltdir)
os.chmod(OPTIONS.saltdir, stt.st_mode | stat.S_IWGRP | stat.S_IRGRP | stat.S_IXGRP)
except OSError:
sys.stdout.write(u'\n\nUnable to set permissions on thin directory.\nIf sudo_user is set '
u'and is not root, be certain the user is in the same group\nas the login user')
sys.stdout.write('\n\nUnable to set permissions on thin directory.\nIf sudo_user is set '
'and is not root, be certain the user is in the same group\nas the login user')
sys.exit(1)
# Delimiter emitted on stdout *only* to indicate shim message to master.
sys.stdout.write(u"{0}\ndeploy\n".format(OPTIONS.delimiter))
sys.stdout.write("{0}\ndeploy\n".format(OPTIONS.delimiter))
sys.exit(EX_THIN_DEPLOY)
# Adapted from salt.utils.hashutils.get_hash()
def get_hash(path, form=u'sha1', chunk_size=4096):
def get_hash(path, form='sha1', chunk_size=4096):
'''
Generate a hash digest string for a file.
'''
try:
hash_type = getattr(hashlib, form)
except AttributeError:
raise ValueError(u'Invalid hash type: {0}'.format(form))
with open(path, u'rb') as ifile:
raise ValueError('Invalid hash type: {0}'.format(form))
with open(path, 'rb') as ifile:
hash_obj = hash_type()
# read the file in in chunks, not the entire file
for chunk in iter(lambda: ifile.read(chunk_size), b''): # future lint: disable=non-unicode-string
for chunk in iter(lambda: ifile.read(chunk_size), b''):
hash_obj.update(chunk)
return hash_obj.hexdigest()
@ -170,7 +170,7 @@ def need_ext():
'''
Signal that external modules need to be deployed.
'''
sys.stdout.write(u"{0}\next_mods\n".format(OPTIONS.delimiter))
sys.stdout.write("{0}\next_mods\n".format(OPTIONS.delimiter))
sys.exit(EX_MOD_DEPLOY)
@ -180,20 +180,20 @@ def unpack_ext(ext_path):
'''
modcache = os.path.join(
OPTIONS.saltdir,
u'running_data',
u'var',
u'cache',
u'salt',
u'minion',
u'extmods')
'running_data',
'var',
'cache',
'salt',
'minion',
'extmods')
tfile = tarfile.TarFile.gzopen(ext_path)
old_umask = os.umask(0o077)
tfile.extractall(path=modcache)
tfile.close()
os.umask(old_umask)
os.unlink(ext_path)
ver_path = os.path.join(modcache, u'ext_version')
ver_dst = os.path.join(OPTIONS.saltdir, u'ext_version')
ver_path = os.path.join(modcache, 'ext_version')
ver_dst = os.path.join(OPTIONS.saltdir, 'ext_version')
shutil.move(ver_path, ver_dst)
@ -208,8 +208,8 @@ def main(argv): # pylint: disable=W0613
unpack_thin(thin_path)
# Salt thin now is available to use
else:
if not sys.platform.startswith(u'win'):
scpstat = subprocess.Popen([u'/bin/sh', u'-c', u'command -v scp']).wait()
if not sys.platform.startswith('win'):
scpstat = subprocess.Popen(['/bin/sh', '-c', 'command -v scp']).wait()
if scpstat != 0:
sys.exit(EX_SCP_NOT_FOUND)
@ -218,46 +218,46 @@ def main(argv): # pylint: disable=W0613
if not os.path.isdir(OPTIONS.saltdir):
sys.stderr.write(
u'ERROR: salt path "{0}" exists but is'
u' not a directory\n'.format(OPTIONS.saltdir)
'ERROR: salt path "{0}" exists but is'
' not a directory\n'.format(OPTIONS.saltdir)
)
sys.exit(EX_CANTCREAT)
version_path = os.path.normpath(os.path.join(OPTIONS.saltdir, u'version'))
version_path = os.path.normpath(os.path.join(OPTIONS.saltdir, 'version'))
if not os.path.exists(version_path) or not os.path.isfile(version_path):
sys.stderr.write(
u'WARNING: Unable to locate current thin '
u' version: {0}.\n'.format(version_path)
'WARNING: Unable to locate current thin '
' version: {0}.\n'.format(version_path)
)
need_deployment()
with open(version_path, u'r') as vpo:
with open(version_path, 'r') as vpo:
cur_version = vpo.readline().strip()
if cur_version != OPTIONS.version:
sys.stderr.write(
u'WARNING: current thin version {0}'
u' is not up-to-date with {1}.\n'.format(
'WARNING: current thin version {0}'
' is not up-to-date with {1}.\n'.format(
cur_version, OPTIONS.version
)
)
need_deployment()
# Salt thin exists and is up-to-date - fall through and use it
salt_call_path = os.path.join(OPTIONS.saltdir, u'salt-call')
salt_call_path = os.path.join(OPTIONS.saltdir, 'salt-call')
if not os.path.isfile(salt_call_path):
sys.stderr.write(u'ERROR: thin is missing "{0}"\n'.format(salt_call_path))
sys.stderr.write('ERROR: thin is missing "{0}"\n'.format(salt_call_path))
need_deployment()
with open(os.path.join(OPTIONS.saltdir, u'minion'), u'w') as config:
config.write(OPTIONS.config + u'\n')
with open(os.path.join(OPTIONS.saltdir, 'minion'), 'w') as config:
config.write(OPTIONS.config + '\n')
if OPTIONS.ext_mods:
ext_path = os.path.join(OPTIONS.saltdir, EXT_ARCHIVE)
if os.path.exists(ext_path):
unpack_ext(ext_path)
else:
version_path = os.path.join(OPTIONS.saltdir, u'ext_version')
version_path = os.path.join(OPTIONS.saltdir, 'ext_version')
if not os.path.exists(version_path) or not os.path.isfile(version_path):
need_ext()
with open(version_path, u'r') as vpo:
with open(version_path, 'r') as vpo:
cur_version = vpo.readline().strip()
if cur_version != OPTIONS.ext_mods:
need_ext()
@ -270,38 +270,38 @@ def main(argv): # pylint: disable=W0613
salt_argv = [
sys.executable,
salt_call_path,
u'--retcode-passthrough',
u'--local',
u'--metadata',
u'--out', u'json',
u'-l', u'quiet',
u'-c', OPTIONS.saltdir
'--retcode-passthrough',
'--local',
'--metadata',
'--out', 'json',
'-l', 'quiet',
'-c', OPTIONS.saltdir
]
try:
if argv_prepared[-1].startswith(u'--no-parse='):
if argv_prepared[-1].startswith('--no-parse='):
salt_argv.append(argv_prepared.pop(-1))
except (IndexError, TypeError):
pass
salt_argv.append(u'--')
salt_argv.append('--')
salt_argv.extend(argv_prepared)
sys.stderr.write(u'SALT_ARGV: {0}\n'.format(salt_argv))
sys.stderr.write('SALT_ARGV: {0}\n'.format(salt_argv))
# Only emit the delimiter on *both* stdout and stderr when completely successful.
# Yes, the flush() is necessary.
sys.stdout.write(OPTIONS.delimiter + u'\n')
sys.stdout.write(OPTIONS.delimiter + '\n')
sys.stdout.flush()
if not OPTIONS.tty:
sys.stderr.write(OPTIONS.delimiter + u'\n')
sys.stderr.write(OPTIONS.delimiter + '\n')
sys.stderr.flush()
if OPTIONS.cmd_umask is not None:
old_umask = os.umask(OPTIONS.cmd_umask)
if OPTIONS.tty:
# Returns bytes instead of string on python 3
stdout, _ = subprocess.Popen(salt_argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
sys.stdout.write(stdout.decode(encoding=get_system_encoding(), errors=u"replace"))
sys.stdout.write(stdout.decode(encoding=get_system_encoding(), errors="replace"))
sys.stdout.flush()
if OPTIONS.wipe:
shutil.rmtree(OPTIONS.saltdir)
@ -313,5 +313,5 @@ def main(argv): # pylint: disable=W0613
if OPTIONS.cmd_umask is not None:
os.umask(old_umask)
if __name__ == u'__main__':
if __name__ == '__main__':
sys.exit(main(sys.argv))

View file

@ -85,33 +85,33 @@ class SSHHighState(salt.state.BaseHighState):
'''
Evaluate master_tops locally
'''
if u'id' not in self.opts:
log.error(u'Received call for external nodes without an id')
if 'id' not in self.opts:
log.error('Received call for external nodes without an id')
return {}
if not salt.utils.verify.valid_id(self.opts, self.opts[u'id']):
if not salt.utils.verify.valid_id(self.opts, self.opts['id']):
return {}
# Evaluate all configured master_tops interfaces
grains = {}
ret = {}
if u'grains' in self.opts:
grains = self.opts[u'grains']
if 'grains' in self.opts:
grains = self.opts['grains']
for fun in self.tops:
if fun not in self.opts.get(u'master_tops', {}):
if fun not in self.opts.get('master_tops', {}):
continue
try:
ret.update(self.tops[fun](opts=self.opts, grains=grains))
except Exception as exc:
# If anything happens in the top generation, log it and move on
log.error(
u'Top function %s failed with error %s for minion %s',
fun, exc, self.opts[u'id']
'Top function %s failed with error %s for minion %s',
fun, exc, self.opts['id']
)
return ret
def lowstate_file_refs(chunks, extras=u''):
def lowstate_file_refs(chunks, extras=''):
'''
Create a list of file ref objects to reconcile
'''
@ -119,12 +119,12 @@ def lowstate_file_refs(chunks, extras=u''):
for chunk in chunks:
if not isinstance(chunk, dict):
continue
saltenv = u'base'
saltenv = 'base'
crefs = []
for state in chunk:
if state == u'__env__':
if state == '__env__':
saltenv = chunk[state]
elif state.startswith(u'__'):
elif state.startswith('__'):
continue
crefs.extend(salt_refs(chunk[state]))
if crefs:
@ -132,7 +132,7 @@ def lowstate_file_refs(chunks, extras=u''):
refs[saltenv] = []
refs[saltenv].append(crefs)
if extras:
extra_refs = extras.split(u',')
extra_refs = extras.split(',')
if extra_refs:
for env in refs:
for x in extra_refs:
@ -144,7 +144,7 @@ def salt_refs(data, ret=None):
'''
Pull salt file references out of the states
'''
proto = u'salt://'
proto = 'salt://'
if ret is None:
ret = []
if isinstance(data, six.string_types):
@ -166,38 +166,38 @@ def prep_trans_tar(opts, file_client, chunks, file_refs, pillar=None, id_=None,
'''
gendir = tempfile.mkdtemp()
trans_tar = salt.utils.files.mkstemp()
lowfn = os.path.join(gendir, u'lowstate.json')
pillarfn = os.path.join(gendir, u'pillar.json')
roster_grainsfn = os.path.join(gendir, u'roster_grains.json')
lowfn = os.path.join(gendir, 'lowstate.json')
pillarfn = os.path.join(gendir, 'pillar.json')
roster_grainsfn = os.path.join(gendir, 'roster_grains.json')
sync_refs = [
[salt.utils.url.create(u'_modules')],
[salt.utils.url.create(u'_states')],
[salt.utils.url.create(u'_grains')],
[salt.utils.url.create(u'_renderers')],
[salt.utils.url.create(u'_returners')],
[salt.utils.url.create(u'_output')],
[salt.utils.url.create(u'_utils')],
[salt.utils.url.create('_modules')],
[salt.utils.url.create('_states')],
[salt.utils.url.create('_grains')],
[salt.utils.url.create('_renderers')],
[salt.utils.url.create('_returners')],
[salt.utils.url.create('_output')],
[salt.utils.url.create('_utils')],
]
with salt.utils.files.fopen(lowfn, u'w+') as fp_:
with salt.utils.files.fopen(lowfn, 'w+') as fp_:
fp_.write(json.dumps(chunks))
if pillar:
with salt.utils.files.fopen(pillarfn, u'w+') as fp_:
with salt.utils.files.fopen(pillarfn, 'w+') as fp_:
fp_.write(json.dumps(pillar))
if roster_grains:
with salt.utils.files.fopen(roster_grainsfn, u'w+') as fp_:
with salt.utils.files.fopen(roster_grainsfn, 'w+') as fp_:
fp_.write(json.dumps(roster_grains))
if id_ is None:
id_ = u''
id_ = ''
try:
cachedir = os.path.join(u'salt-ssh', id_).rstrip(os.sep)
cachedir = os.path.join('salt-ssh', id_).rstrip(os.sep)
except AttributeError:
# Minion ID should always be a str, but don't let an int break this
cachedir = os.path.join(u'salt-ssh', str(id_)).rstrip(os.sep)
cachedir = os.path.join('salt-ssh', str(id_)).rstrip(os.sep)
for saltenv in file_refs:
# Location where files in this saltenv will be cached
cache_dest_root = os.path.join(cachedir, u'files', saltenv)
cache_dest_root = os.path.join(cachedir, 'files', saltenv)
file_refs[saltenv].extend(sync_refs)
env_root = os.path.join(gendir, saltenv)
if not os.path.isdir(env_root):
@ -209,7 +209,7 @@ def prep_trans_tar(opts, file_client, chunks, file_refs, pillar=None, id_=None,
try:
path = file_client.cache_file(name, saltenv, cachedir=cachedir)
except IOError:
path = u''
path = ''
if path:
tgt = os.path.join(env_root, short)
tgt_dir = os.path.dirname(tgt)
@ -220,10 +220,10 @@ def prep_trans_tar(opts, file_client, chunks, file_refs, pillar=None, id_=None,
try:
files = file_client.cache_dir(name, saltenv, cachedir=cachedir)
except IOError:
files = u''
files = ''
if files:
for filename in files:
fn = filename[len(file_client.get_cachedir(cache_dest)):].strip(u'/')
fn = filename[len(file_client.get_cachedir(cache_dest)):].strip('/')
tgt = os.path.join(
env_root,
short,
@ -240,7 +240,7 @@ def prep_trans_tar(opts, file_client, chunks, file_refs, pillar=None, id_=None,
except OSError:
cwd = None
os.chdir(gendir)
with closing(tarfile.open(trans_tar, u'w:gz')) as tfp:
with closing(tarfile.open(trans_tar, 'w:gz')) as tfp:
for root, dirs, files in os.walk(gendir):
for name in files:
full = os.path.join(root, name)

View file

@ -42,8 +42,8 @@ class FunctionWrapper(object):
self.wfuncs = wfuncs if isinstance(wfuncs, dict) else {}
self.opts = opts
self.mods = mods if isinstance(mods, dict) else {}
self.kwargs = {u'id_': id_,
u'host': host}
self.kwargs = {'id_': id_,
'host': host}
self.fsclient = fsclient
self.kwargs.update(kwargs)
self.aliases = aliases
@ -67,14 +67,14 @@ class FunctionWrapper(object):
'''
Return the function call to simulate the salt local lookup system
'''
if u'.' not in cmd and not self.cmd_prefix:
if '.' not in cmd and not self.cmd_prefix:
# Form of salt.cmd.run in Jinja -- it's expecting a subdictionary
# containing only 'cmd' module calls, in that case. Create a new
# FunctionWrapper which contains the prefix 'cmd' (again, for the
# salt.cmd.run example)
kwargs = copy.deepcopy(self.kwargs)
id_ = kwargs.pop(u'id_')
host = kwargs.pop(u'host')
id_ = kwargs.pop('id_')
host = kwargs.pop('host')
return FunctionWrapper(self.opts,
id_,
host,
@ -90,7 +90,7 @@ class FunctionWrapper(object):
# We're in an inner FunctionWrapper as created by the code block
# above. Reconstruct the original cmd in the form 'cmd.run' and
# then evaluate as normal
cmd = u'{0}.{1}'.format(self.cmd_prefix, cmd)
cmd = '{0}.{1}'.format(self.cmd_prefix, cmd)
if cmd in self.wfuncs:
return self.wfuncs[cmd]
@ -104,7 +104,7 @@ class FunctionWrapper(object):
'''
argv = [cmd]
argv.extend([json.dumps(arg) for arg in args])
argv.extend([u'{0}={1}'.format(key, json.dumps(val)) for key, val in six.iteritems(kwargs)])
argv.extend(['{0}={1}'.format(key, json.dumps(val)) for key, val in six.iteritems(kwargs)])
single = salt.client.ssh.Single(
self.opts,
argv,
@ -115,21 +115,21 @@ class FunctionWrapper(object):
**self.kwargs
)
stdout, stderr, retcode = single.cmd_block()
if stderr.count(u'Permission Denied'):
return {u'_error': u'Permission Denied',
u'stdout': stdout,
u'stderr': stderr,
u'retcode': retcode}
if stderr.count('Permission Denied'):
return {'_error': 'Permission Denied',
'stdout': stdout,
'stderr': stderr,
'retcode': retcode}
try:
ret = json.loads(stdout, object_hook=salt.utils.data.decode_dict)
if len(ret) < 2 and u'local' in ret:
ret = ret[u'local']
ret = ret.get(u'return', {})
if len(ret) < 2 and 'local' in ret:
ret = ret['local']
ret = ret.get('return', {})
except ValueError:
ret = {u'_error': u'Failed to return clean data',
u'stderr': stderr,
u'stdout': stdout,
u'retcode': retcode}
ret = {'_error': 'Failed to return clean data',
'stderr': stderr,
'stdout': stdout,
'retcode': retcode}
return ret
return caller
@ -137,18 +137,18 @@ class FunctionWrapper(object):
'''
Set aliases for functions
'''
if u'.' not in cmd and not self.cmd_prefix:
if '.' not in cmd and not self.cmd_prefix:
# Form of salt.cmd.run in Jinja -- it's expecting a subdictionary
# containing only 'cmd' module calls, in that case. We don't
# support assigning directly to prefixes in this way
raise KeyError(u'Cannot assign to module key {0} in the '
u'FunctionWrapper'.format(cmd))
raise KeyError('Cannot assign to module key {0} in the '
'FunctionWrapper'.format(cmd))
if self.cmd_prefix:
# We're in an inner FunctionWrapper as created by the first code
# block in __getitem__. Reconstruct the original cmd in the form
# 'cmd.run' and then evaluate as normal
cmd = u'{0}.{1}'.format(self.cmd_prefix, cmd)
cmd = '{0}.{1}'.format(self.cmd_prefix, cmd)
if cmd in self.wfuncs:
self.wfuncs[cmd] = value

View file

@ -17,44 +17,44 @@ import salt.syspaths as syspaths
from salt.ext import six
# Set up the default values for all systems
DEFAULTS = {u'mongo.db': u'salt',
u'mongo.host': u'salt',
u'mongo.password': u'',
u'mongo.port': 27017,
u'mongo.user': u'',
u'redis.db': u'0',
u'redis.host': u'salt',
u'redis.port': 6379,
u'test.foo': u'unconfigured',
u'ca.cert_base_path': u'/etc/pki',
u'solr.cores': [],
u'solr.host': u'localhost',
u'solr.port': u'8983',
u'solr.baseurl': u'/solr',
u'solr.type': u'master',
u'solr.request_timeout': None,
u'solr.init_script': u'/etc/rc.d/solr',
u'solr.dih.import_options': {u'clean': False, u'optimize': True,
u'commit': True, u'verbose': False},
u'solr.backup_path': None,
u'solr.num_backups': 1,
u'poudriere.config': u'/usr/local/etc/poudriere.conf',
u'poudriere.config_dir': u'/usr/local/etc/poudriere.d',
u'ldap.server': u'localhost',
u'ldap.port': u'389',
u'ldap.tls': False,
u'ldap.scope': 2,
u'ldap.attrs': None,
u'ldap.binddn': u'',
u'ldap.bindpw': u'',
u'hosts.file': u'/etc/hosts',
u'aliases.file': u'/etc/aliases',
u'virt.images': os.path.join(syspaths.SRV_ROOT_DIR, u'salt-images'),
u'virt.tunnel': False,
DEFAULTS = {'mongo.db': 'salt',
'mongo.host': 'salt',
'mongo.password': '',
'mongo.port': 27017,
'mongo.user': '',
'redis.db': '0',
'redis.host': 'salt',
'redis.port': 6379,
'test.foo': 'unconfigured',
'ca.cert_base_path': '/etc/pki',
'solr.cores': [],
'solr.host': 'localhost',
'solr.port': '8983',
'solr.baseurl': '/solr',
'solr.type': 'master',
'solr.request_timeout': None,
'solr.init_script': '/etc/rc.d/solr',
'solr.dih.import_options': {'clean': False, 'optimize': True,
'commit': True, 'verbose': False},
'solr.backup_path': None,
'solr.num_backups': 1,
'poudriere.config': '/usr/local/etc/poudriere.conf',
'poudriere.config_dir': '/usr/local/etc/poudriere.d',
'ldap.server': 'localhost',
'ldap.port': '389',
'ldap.tls': False,
'ldap.scope': 2,
'ldap.attrs': None,
'ldap.binddn': '',
'ldap.bindpw': '',
'hosts.file': '/etc/hosts',
'aliases.file': '/etc/aliases',
'virt.images': os.path.join(syspaths.SRV_ROOT_DIR, 'salt-images'),
'virt.tunnel': False,
}
def backup_mode(backup=u''):
def backup_mode(backup=''):
'''
Return the backup mode
@ -66,7 +66,7 @@ def backup_mode(backup=u''):
'''
if backup:
return backup
return option(u'backup_mode')
return option('backup_mode')
def manage_mode(mode):
@ -97,14 +97,14 @@ def valid_fileproto(uri):
salt '*' config.valid_fileproto salt://path/to/file
'''
try:
return bool(re.match(u'^(?:salt|https?|ftp)://', uri))
return bool(re.match('^(?:salt|https?|ftp)://', uri))
except Exception:
return False
def option(
value,
default=u'',
default='',
omit_opts=False,
omit_master=False,
omit_pillar=False):
@ -121,8 +121,8 @@ def option(
if value in __opts__:
return __opts__[value]
if not omit_master:
if value in __pillar__.get(u'master', {}):
return __pillar__[u'master'][value]
if value in __pillar__.get('master', {}):
return __pillar__['master'][value]
if not omit_pillar:
if value in __pillar__:
return __pillar__[value]
@ -132,7 +132,7 @@ def option(
def merge(value,
default=u'',
default='',
omit_opts=False,
omit_master=False,
omit_pillar=False):
@ -155,8 +155,8 @@ def merge(value,
if isinstance(ret, six.string_types):
return ret
if not omit_master:
if value in __pillar__.get(u'master', {}):
tmp = __pillar__[u'master'][value]
if value in __pillar__.get('master', {}):
tmp = __pillar__['master'][value]
if ret is None:
ret = tmp
if isinstance(ret, six.string_types):
@ -185,7 +185,7 @@ def merge(value,
return ret or default
def get(key, default=u''):
def get(key, default=''):
'''
.. versionadded: 0.14.0
@ -216,17 +216,17 @@ def get(key, default=u''):
salt '*' config.get pkg:apache
'''
ret = salt.utils.data.traverse_dict_and_list(__opts__, key, u'_|-')
if ret != u'_|-':
ret = salt.utils.data.traverse_dict_and_list(__opts__, key, '_|-')
if ret != '_|-':
return ret
ret = salt.utils.data.traverse_dict_and_list(__grains__, key, u'_|-')
if ret != u'_|-':
ret = salt.utils.data.traverse_dict_and_list(__grains__, key, '_|-')
if ret != '_|-':
return ret
ret = salt.utils.data.traverse_dict_and_list(__pillar__, key, u'_|-')
if ret != u'_|-':
ret = salt.utils.data.traverse_dict_and_list(__pillar__, key, '_|-')
if ret != '_|-':
return ret
ret = salt.utils.data.traverse_dict_and_list(__pillar__.get(u'master', {}), key, u'_|-')
if ret != u'_|-':
ret = salt.utils.data.traverse_dict_and_list(__pillar__.get('master', {}), key, '_|-')
if ret != '_|-':
return ret
return default
@ -243,10 +243,10 @@ def dot_vals(value):
salt '*' config.dot_vals host
'''
ret = {}
for key, val in six.iteritems(__pillar__.get(u'master', {})):
if key.startswith(u'{0}.'.format(value)):
for key, val in six.iteritems(__pillar__.get('master', {})):
if key.startswith('{0}.'.format(value)):
ret[key] = val
for key, val in six.iteritems(__opts__):
if key.startswith(u'{0}.'.format(value)):
if key.startswith('{0}.'.format(value)):
ret[key] = val
return ret

View file

@ -18,7 +18,7 @@ log = logging.getLogger(__name__)
def get_file(path,
dest,
saltenv=u'base',
saltenv='base',
makedirs=False,
template=None,
gzip=None):
@ -31,83 +31,83 @@ def get_file(path,
cp.get_file. The argument is only accepted for interface compatibility.
'''
if gzip is not None:
log.warning(u'The gzip argument to cp.get_file in salt-ssh is '
u'unsupported')
log.warning('The gzip argument to cp.get_file in salt-ssh is '
'unsupported')
if template is not None:
(path, dest) = _render_filenames(path, dest, saltenv, template)
src = __context__[u'fileclient'].cache_file(
src = __context__['fileclient'].cache_file(
path,
saltenv,
cachedir=os.path.join(u'salt-ssh', __salt__.kwargs[u'id_']))
cachedir=os.path.join('salt-ssh', __salt__.kwargs['id_']))
single = salt.client.ssh.Single(
__opts__,
u'',
'',
**__salt__.kwargs)
ret = single.shell.send(src, dest, makedirs)
return not ret[2]
def get_dir(path, dest, saltenv=u'base'):
def get_dir(path, dest, saltenv='base'):
'''
Transfer a directory down
'''
src = __context__[u'fileclient'].cache_dir(
src = __context__['fileclient'].cache_dir(
path,
saltenv,
cachedir=os.path.join(u'salt-ssh', __salt__.kwargs[u'id_']))
src = u' '.join(src)
cachedir=os.path.join('salt-ssh', __salt__.kwargs['id_']))
src = ' '.join(src)
single = salt.client.ssh.Single(
__opts__,
u'',
'',
**__salt__.kwargs)
ret = single.shell.send(src, dest)
return not ret[2]
def get_url(path, dest, saltenv=u'base'):
def get_url(path, dest, saltenv='base'):
'''
retrieve a URL
'''
src = __context__[u'fileclient'].cache_file(
src = __context__['fileclient'].cache_file(
path,
saltenv,
cachedir=os.path.join(u'salt-ssh', __salt__.kwargs[u'id_']))
cachedir=os.path.join('salt-ssh', __salt__.kwargs['id_']))
single = salt.client.ssh.Single(
__opts__,
u'',
'',
**__salt__.kwargs)
ret = single.shell.send(src, dest)
return not ret[2]
def list_states(saltenv=u'base'):
def list_states(saltenv='base'):
'''
List all the available state modules in an environment
'''
return __context__[u'fileclient'].list_states(saltenv)
return __context__['fileclient'].list_states(saltenv)
def list_master(saltenv=u'base', prefix=u''):
def list_master(saltenv='base', prefix=''):
'''
List all of the files stored on the master
'''
return __context__[u'fileclient'].file_list(saltenv, prefix)
return __context__['fileclient'].file_list(saltenv, prefix)
def list_master_dirs(saltenv=u'base', prefix=u''):
def list_master_dirs(saltenv='base', prefix=''):
'''
List all of the directories stored on the master
'''
return __context__[u'fileclient'].dir_list(saltenv, prefix)
return __context__['fileclient'].dir_list(saltenv, prefix)
def list_master_symlinks(saltenv=u'base', prefix=u''):
def list_master_symlinks(saltenv='base', prefix=''):
'''
List all of the symlinks stored on the master
'''
return __context__[u'fileclient'].symlink_list(saltenv, prefix)
return __context__['fileclient'].symlink_list(saltenv, prefix)
def _render_filenames(path, dest, saltenv, template):
@ -122,16 +122,16 @@ def _render_filenames(path, dest, saltenv, template):
# render the path as a template using path_template_engine as the engine
if template not in salt.utils.templates.TEMPLATE_REGISTRY:
raise CommandExecutionError(
u'Attempted to render file paths with unavailable engine '
u'{0}'.format(template)
'Attempted to render file paths with unavailable engine '
'{0}'.format(template)
)
kwargs = {}
kwargs[u'salt'] = __salt__
kwargs[u'pillar'] = __pillar__
kwargs[u'grains'] = __grains__
kwargs[u'opts'] = __opts__
kwargs[u'saltenv'] = saltenv
kwargs['salt'] = __salt__
kwargs['pillar'] = __pillar__
kwargs['grains'] = __grains__
kwargs['opts'] = __opts__
kwargs['saltenv'] = saltenv
def _render(contents):
'''
@ -140,7 +140,7 @@ def _render_filenames(path, dest, saltenv, template):
'''
# write out path to temp file
tmp_path_fn = salt.utils.files.mkstemp()
with salt.utils.files.fopen(tmp_path_fn, u'w+') as fp_:
with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_:
fp_.write(contents)
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
tmp_path_fn,
@ -148,15 +148,15 @@ def _render_filenames(path, dest, saltenv, template):
**kwargs
)
salt.utils.files.safe_rm(tmp_path_fn)
if not data[u'result']:
if not data['result']:
# Failed to render the template
raise CommandExecutionError(
u'Failed to render file path with error: {0}'.format(
data[u'data']
'Failed to render file path with error: {0}'.format(
data['data']
)
)
else:
return data[u'data']
return data['data']
path = _render(path)
dest = _render(dest)

View file

@ -29,28 +29,28 @@ def _serial_sanitizer(instr):
'''
length = len(instr)
index = int(math.floor(length * .75))
return u'{0}{1}'.format(instr[:index], u'X' * (length - index))
return '{0}{1}'.format(instr[:index], 'X' * (length - index))
_FQDN_SANITIZER = lambda x: u'MINION.DOMAINNAME'
_HOSTNAME_SANITIZER = lambda x: u'MINION'
_DOMAINNAME_SANITIZER = lambda x: u'DOMAINNAME'
_FQDN_SANITIZER = lambda x: 'MINION.DOMAINNAME'
_HOSTNAME_SANITIZER = lambda x: 'MINION'
_DOMAINNAME_SANITIZER = lambda x: 'DOMAINNAME'
# A dictionary of grain -> function mappings for sanitizing grain output. This
# is used when the 'sanitize' flag is given.
_SANITIZERS = {
u'serialnumber': _serial_sanitizer,
u'domain': _DOMAINNAME_SANITIZER,
u'fqdn': _FQDN_SANITIZER,
u'id': _FQDN_SANITIZER,
u'host': _HOSTNAME_SANITIZER,
u'localhost': _HOSTNAME_SANITIZER,
u'nodename': _HOSTNAME_SANITIZER,
'serialnumber': _serial_sanitizer,
'domain': _DOMAINNAME_SANITIZER,
'fqdn': _FQDN_SANITIZER,
'id': _FQDN_SANITIZER,
'host': _HOSTNAME_SANITIZER,
'localhost': _HOSTNAME_SANITIZER,
'nodename': _HOSTNAME_SANITIZER,
}
def get(key, default=u'', delimiter=DEFAULT_TARGET_DELIM, ordered=True):
def get(key, default='', delimiter=DEFAULT_TARGET_DELIM, ordered=True):
'''
Attempt to retrieve the named value from grains, if the named value is not
available return the passed default. The default return is an empty string.
@ -154,7 +154,7 @@ def item(*args, **kwargs):
ret[arg] = __grains__[arg]
except KeyError:
pass
if salt.utils.data.is_true(kwargs.get(u'sanitize')):
if salt.utils.data.is_true(kwargs.get('sanitize')):
for arg, func in six.iteritems(_SANITIZERS):
if arg in ret:
ret[arg] = func(ret[arg])
@ -175,9 +175,9 @@ def ls(): # pylint: disable=C0103
def filter_by(lookup_dict,
grain=u'os_family',
grain='os_family',
merge=None,
default=u'default',
default='default',
base=None):
'''
.. versionadded:: 0.17.0
@ -271,12 +271,12 @@ def filter_by(lookup_dict,
elif isinstance(base_values, collections.Mapping):
if not isinstance(ret, collections.Mapping):
raise SaltException(u'filter_by default and look-up values must both be dictionaries.')
raise SaltException('filter_by default and look-up values must both be dictionaries.')
ret = salt.utils.dictupdate.update(copy.deepcopy(base_values), ret)
if merge:
if not isinstance(merge, collections.Mapping):
raise SaltException(u'filter_by merge argument must be a dictionary.')
raise SaltException('filter_by merge argument must be a dictionary.')
else:
if ret is None:
ret = merge

View file

@ -14,7 +14,7 @@ import copy
import salt.client.ssh
def get(tgt, fun, tgt_type=u'glob', roster=u'flat'):
def get(tgt, fun, tgt_type='glob', roster='flat'):
'''
Get data from the mine based on the target, function and tgt_type
@ -36,15 +36,15 @@ def get(tgt, fun, tgt_type=u'glob', roster=u'flat'):
salt-ssh '*' mine.get '192.168.5.0' network.ipaddrs roster=scan
'''
# Set up opts for the SSH object
opts = copy.deepcopy(__context__[u'master_opts'])
opts = copy.deepcopy(__context__['master_opts'])
minopts = copy.deepcopy(__opts__)
opts.update(minopts)
if roster:
opts[u'roster'] = roster
opts[u'argv'] = [fun]
opts[u'selected_target_option'] = tgt_type
opts[u'tgt'] = tgt
opts[u'arg'] = []
opts['roster'] = roster
opts['argv'] = [fun]
opts['selected_target_option'] = tgt_type
opts['tgt'] = tgt
opts['arg'] = []
# Create the SSH object to handle the actual call
ssh = salt.client.ssh.SSH(opts)
@ -56,8 +56,8 @@ def get(tgt, fun, tgt_type=u'glob', roster=u'flat'):
cret = {}
for host in rets:
if u'return' in rets[host]:
cret[host] = rets[host][u'return']
if 'return' in rets[host]:
cret[host] = rets[host]['return']
else:
cret[host] = rets[host]
return cret

View file

@ -14,7 +14,7 @@ import salt.utils.dictupdate
from salt.defaults import DEFAULT_TARGET_DELIM
def get(key, default=u'', merge=False, delimiter=DEFAULT_TARGET_DELIM):
def get(key, default='', merge=False, delimiter=DEFAULT_TARGET_DELIM):
'''
.. versionadded:: 0.14
@ -132,10 +132,10 @@ def keys(key, delimiter=DEFAULT_TARGET_DELIM):
__pillar__, key, KeyError, delimiter)
if ret is KeyError:
raise KeyError(u"Pillar key not found: {0}".format(key))
raise KeyError("Pillar key not found: {0}".format(key))
if not isinstance(ret, dict):
raise ValueError(u"Pillar value in key {0} is not a dict".format(key))
raise ValueError("Pillar value in key {0} is not a dict".format(key))
return ret.keys()

View file

@ -26,10 +26,10 @@ log = logging.getLogger(__name__)
def _publish(tgt,
fun,
arg=None,
tgt_type=u'glob',
returner=u'',
tgt_type='glob',
returner='',
timeout=None,
form=u'clean',
form='clean',
roster=None):
'''
Publish a command "from the minion out to other minions". In reality, the
@ -55,13 +55,13 @@ def _publish(tgt,
salt-ssh system.example.com publish.publish '*' cmd.run 'ls -la /tmp'
'''
if fun.startswith(u'publish.'):
log.info(u'Cannot publish publish calls. Returning {}')
if fun.startswith('publish.'):
log.info('Cannot publish publish calls. Returning {}')
return {}
# TODO: implement returners? Do they make sense for salt-ssh calls?
if returner:
log.warning(u'Returners currently not supported in salt-ssh publish')
log.warning('Returners currently not supported in salt-ssh publish')
# Make sure args have been processed
if arg is None:
@ -74,17 +74,17 @@ def _publish(tgt,
arg = []
# Set up opts for the SSH object
opts = copy.deepcopy(__context__[u'master_opts'])
opts = copy.deepcopy(__context__['master_opts'])
minopts = copy.deepcopy(__opts__)
opts.update(minopts)
if roster:
opts[u'roster'] = roster
opts['roster'] = roster
if timeout:
opts[u'timeout'] = timeout
opts[u'argv'] = [fun] + arg
opts[u'selected_target_option'] = tgt_type
opts[u'tgt'] = tgt
opts[u'arg'] = arg
opts['timeout'] = timeout
opts['argv'] = [fun] + arg
opts['selected_target_option'] = tgt_type
opts['tgt'] = tgt
opts['arg'] = arg
# Create the SSH object to handle the actual call
ssh = salt.client.ssh.SSH(opts)
@ -94,11 +94,11 @@ def _publish(tgt,
for ret in ssh.run_iter():
rets.update(ret)
if form == u'clean':
if form == 'clean':
cret = {}
for host in rets:
if u'return' in rets[host]:
cret[host] = rets[host][u'return']
if 'return' in rets[host]:
cret[host] = rets[host]['return']
else:
cret[host] = rets[host]
return cret
@ -109,8 +109,8 @@ def _publish(tgt,
def publish(tgt,
fun,
arg=None,
tgt_type=u'glob',
returner=u'',
tgt_type='glob',
returner='',
timeout=5,
roster=None,
expr_form=None):
@ -176,10 +176,10 @@ def publish(tgt,
# performing the cleanup on this deprecation.
if expr_form is not None:
salt.utils.versions.warn_until(
u'Fluorine',
u'the target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
@ -189,15 +189,15 @@ def publish(tgt,
tgt_type=tgt_type,
returner=returner,
timeout=timeout,
form=u'clean',
form='clean',
roster=roster)
def full_data(tgt,
fun,
arg=None,
tgt_type=u'glob',
returner=u'',
tgt_type='glob',
returner='',
timeout=5,
roster=None,
expr_form=None):
@ -226,10 +226,10 @@ def full_data(tgt,
# performing the cleanup on this deprecation.
if expr_form is not None:
salt.utils.versions.warn_until(
u'Fluorine',
u'the target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
@ -239,7 +239,7 @@ def full_data(tgt,
tgt_type=tgt_type,
returner=returner,
timeout=timeout,
form=u'full',
form='full',
roster=roster)
@ -262,5 +262,5 @@ def runner(fun, arg=None, timeout=5):
arg = []
# Create and run the runner
runner = salt.runner.RunnerClient(__opts__[u'__master_opts__'])
runner = salt.runner.RunnerClient(__opts__['__master_opts__'])
return runner.cmd(fun, arg)

View file

@ -29,7 +29,7 @@ import salt.log
from salt.ext import six
__func_alias__ = {
u'apply_': u'apply'
'apply_': 'apply'
}
log = logging.getLogger(__name__)
@ -42,35 +42,35 @@ def _merge_extra_filerefs(*args):
for arg in args:
if isinstance(arg, six.string_types):
if arg:
ret.extend(arg.split(u','))
ret.extend(arg.split(','))
elif isinstance(arg, list):
if arg:
ret.extend(arg)
return u','.join(ret)
return ','.join(ret)
def sls(mods, saltenv=u'base', test=None, exclude=None, **kwargs):
def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
'''
Create the seed file for a state.sls run
'''
st_kwargs = __salt__.kwargs
__opts__[u'grains'] = __grains__
__pillar__.update(kwargs.get(u'pillar', {}))
__opts__['grains'] = __grains__
__pillar__.update(kwargs.get('pillar', {}))
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
if isinstance(mods, six.string_types):
mods = mods.split(u',')
mods = mods.split(',')
high_data, errors = st_.render_highstate({saltenv: mods})
if exclude:
if isinstance(exclude, six.string_types):
exclude = exclude.split(u',')
if u'__exclude__' in high_data:
high_data[u'__exclude__'].extend(exclude)
exclude = exclude.split(',')
if '__exclude__' in high_data:
high_data['__exclude__'].extend(exclude)
else:
high_data[u'__exclude__'] = exclude
high_data['__exclude__'] = exclude
high_data, ext_errors = st_.state.reconcile_extend(high_data)
errors += ext_errors
errors += st_.state.verify_high(high_data)
@ -87,38 +87,38 @@ def sls(mods, saltenv=u'base', test=None, exclude=None, **kwargs):
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__[u'fileclient'],
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs[u'id_'],
st_kwargs['id_'],
roster_grains)
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__[u'thin_dir'],
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__['thin_dir'],
test,
trans_tar_sum,
__opts__[u'hash_type'])
__opts__['hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__[u'fileclient'],
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
@ -131,7 +131,7 @@ def sls(mods, saltenv=u'base', test=None, exclude=None, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
except Exception as e:
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
# If for some reason the json load fails, return the stdout
@ -150,51 +150,51 @@ def low(data, **kwargs):
salt '*' state.low '{"state": "pkg", "fun": "installed", "name": "vi"}'
'''
st_kwargs = __salt__.kwargs
__opts__[u'grains'] = __grains__
__opts__['grains'] = __grains__
chunks = [data]
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
for chunk in chunks:
chunk[u'__id__'] = chunk[u'name'] if not chunk.get(u'__id__') else chunk[u'__id__']
chunk['__id__'] = chunk['name'] if not chunk.get('__id__') else chunk['__id__']
err = st_.state.verify_data(data)
if err:
return err
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__[u'fileclient'],
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs[u'id_'],
st_kwargs['id_'],
roster_grains)
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
cmd = u'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
__opts__[u'thin_dir'],
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
__opts__['thin_dir'],
trans_tar_sum,
__opts__[u'hash_type'])
__opts__['hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__[u'fileclient'],
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
@ -207,7 +207,7 @@ def low(data, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
except Exception as e:
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
# If for some reason the json load fails, return the stdout
@ -225,49 +225,49 @@ def high(data, **kwargs):
salt '*' state.high '{"vim": {"pkg": ["installed"]}}'
'''
__pillar__.update(kwargs.get(u'pillar', {}))
__pillar__.update(kwargs.get('pillar', {}))
st_kwargs = __salt__.kwargs
__opts__[u'grains'] = __grains__
__opts__['grains'] = __grains__
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
chunks = st_.state.compile_high_data(data)
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__[u'fileclient'],
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs[u'id_'],
st_kwargs['id_'],
roster_grains)
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
cmd = u'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
__opts__[u'thin_dir'],
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
__opts__['thin_dir'],
trans_tar_sum,
__opts__[u'hash_type'])
__opts__['hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__[u'fileclient'],
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
@ -280,7 +280,7 @@ def high(data, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
except Exception as e:
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
# If for some reason the json load fails, return the stdout
@ -459,56 +459,56 @@ def highstate(test=None, **kwargs):
salt '*' state.highstate exclude=sls_to_exclude
salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
'''
__pillar__.update(kwargs.get(u'pillar', {}))
__pillar__.update(kwargs.get('pillar', {}))
st_kwargs = __salt__.kwargs
__opts__[u'grains'] = __grains__
__opts__['grains'] = __grains__
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
chunks = st_.compile_low_chunks()
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
# Check for errors
for chunk in chunks:
if not isinstance(chunk, dict):
__context__[u'retcode'] = 1
__context__['retcode'] = 1
return chunks
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__[u'fileclient'],
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs[u'id_'],
st_kwargs['id_'],
roster_grains)
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__[u'thin_dir'],
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__['thin_dir'],
test,
trans_tar_sum,
__opts__[u'hash_type'])
__opts__['hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__[u'fileclient'],
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
@ -521,7 +521,7 @@ def highstate(test=None, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
except Exception as e:
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
# If for some reason the json load fails, return the stdout
@ -540,55 +540,55 @@ def top(topfn, test=None, **kwargs):
salt '*' state.top reverse_top.sls exclude=sls_to_exclude
salt '*' state.top reverse_top.sls exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
'''
__pillar__.update(kwargs.get(u'pillar', {}))
__pillar__.update(kwargs.get('pillar', {}))
st_kwargs = __salt__.kwargs
__opts__[u'grains'] = __grains__
__opts__['grains'] = __grains__
if salt.utils.args.test_mode(test=test, **kwargs):
__opts__[u'test'] = True
__opts__['test'] = True
else:
__opts__[u'test'] = __opts__.get(u'test', None)
__opts__['test'] = __opts__.get('test', None)
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
st_.opts[u'state_top'] = os.path.join(u'salt://', topfn)
__context__['fileclient'])
st_.opts['state_top'] = os.path.join('salt://', topfn)
chunks = st_.compile_low_chunks()
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__[u'fileclient'],
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs[u'id_'],
st_kwargs['id_'],
roster_grains)
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__[u'thin_dir'],
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__['thin_dir'],
test,
trans_tar_sum,
__opts__[u'hash_type'])
__opts__['hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__[u'fileclient'],
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
@ -601,7 +601,7 @@ def top(topfn, test=None, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
except Exception as e:
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
# If for some reason the json load fails, return the stdout
@ -618,12 +618,12 @@ def show_highstate():
salt '*' state.show_highstate
'''
__opts__[u'grains'] = __grains__
__opts__['grains'] = __grains__
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
return st_.compile_highstate()
@ -637,16 +637,16 @@ def show_lowstate():
salt '*' state.show_lowstate
'''
__opts__[u'grains'] = __grains__
__opts__['grains'] = __grains__
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
return st_.compile_low_chunks()
def show_sls(mods, saltenv=u'base', test=None, **kwargs):
def show_sls(mods, saltenv='base', test=None, **kwargs):
'''
Display the state data from a specific sls or list of sls files on the
master
@ -657,20 +657,20 @@ def show_sls(mods, saltenv=u'base', test=None, **kwargs):
salt '*' state.show_sls core,edit.vim dev
'''
__pillar__.update(kwargs.get(u'pillar', {}))
__opts__[u'grains'] = __grains__
__pillar__.update(kwargs.get('pillar', {}))
__opts__['grains'] = __grains__
opts = copy.copy(__opts__)
if salt.utils.args.test_mode(test=test, **kwargs):
opts[u'test'] = True
opts['test'] = True
else:
opts[u'test'] = __opts__.get(u'test', None)
opts['test'] = __opts__.get('test', None)
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
if isinstance(mods, six.string_types):
mods = mods.split(u',')
mods = mods.split(',')
high_data, errors = st_.render_highstate({saltenv: mods})
high_data, ext_errors = st_.state.reconcile_extend(high_data)
errors += ext_errors
@ -686,7 +686,7 @@ def show_sls(mods, saltenv=u'base', test=None, **kwargs):
return high_data
def show_low_sls(mods, saltenv=u'base', test=None, **kwargs):
def show_low_sls(mods, saltenv='base', test=None, **kwargs):
'''
Display the low state data from a specific sls or list of sls files on the
master.
@ -699,21 +699,21 @@ def show_low_sls(mods, saltenv=u'base', test=None, **kwargs):
salt '*' state.show_sls core,edit.vim dev
'''
__pillar__.update(kwargs.get(u'pillar', {}))
__opts__[u'grains'] = __grains__
__pillar__.update(kwargs.get('pillar', {}))
__opts__['grains'] = __grains__
opts = copy.copy(__opts__)
if salt.utils.args.test_mode(test=test, **kwargs):
opts[u'test'] = True
opts['test'] = True
else:
opts[u'test'] = __opts__.get(u'test', None)
opts['test'] = __opts__.get('test', None)
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
if isinstance(mods, six.string_types):
mods = mods.split(u',')
mods = mods.split(',')
high_data, errors = st_.render_highstate({saltenv: mods})
high_data, ext_errors = st_.state.reconcile_extend(high_data)
errors += ext_errors
@ -740,12 +740,12 @@ def show_top():
salt '*' state.show_top
'''
__opts__[u'grains'] = __grains__
__opts__['grains'] = __grains__
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
top_data = st_.get_top()
errors = []
errors += st_.verify_tops(top_data)
@ -775,30 +775,30 @@ def single(fun, name, test=None, **kwargs):
'''
st_kwargs = __salt__.kwargs
__opts__[u'grains'] = __grains__
__opts__['grains'] = __grains__
# state.fun -> [state, fun]
comps = fun.split(u'.')
comps = fun.split('.')
if len(comps) < 2:
__context__[u'retcode'] = 1
return u'Invalid function passed'
__context__['retcode'] = 1
return 'Invalid function passed'
# Create the low chunk, using kwargs as a base
kwargs.update({u'state': comps[0],
u'fun': comps[1],
u'__id__': name,
u'name': name})
kwargs.update({'state': comps[0],
'fun': comps[1],
'__id__': name,
'name': name})
opts = copy.deepcopy(__opts__)
# Set test mode
if salt.utils.args.test_mode(test=test, **kwargs):
opts[u'test'] = True
opts['test'] = True
else:
opts[u'test'] = __opts__.get(u'test', None)
opts['test'] = __opts__.get('test', None)
# Get the override pillar data
__pillar__.update(kwargs.get(u'pillar', {}))
__pillar__.update(kwargs.get('pillar', {}))
# Create the State environment
st_ = salt.client.ssh.state.SSHState(__opts__, __pillar__)
@ -806,7 +806,7 @@ def single(fun, name, test=None, **kwargs):
# Verify the low chunk
err = st_.verify_data(kwargs)
if err:
__context__[u'retcode'] = 1
__context__['retcode'] = 1
return err
# Must be a list of low-chunks
@ -817,46 +817,46 @@ def single(fun, name, test=None, **kwargs):
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__[u'fileclient'],
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs[u'id_'],
st_kwargs['id_'],
roster_grains)
# Create a hash so we can verify the tar on the target system
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
# We use state.pkg to execute the "state package"
cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__[u'thin_dir'],
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__['thin_dir'],
test,
trans_tar_sum,
__opts__[u'hash_type'])
__opts__['hash_type'])
# Create a salt-ssh Single object to actually do the ssh work
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__[u'fileclient'],
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
# Copy the tar down
single.shell.send(
trans_tar,
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
# Run the state.pkg command on the target
stdout, stderr, _ = single.cmd_block()
@ -871,7 +871,7 @@ def single(fun, name, test=None, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
except Exception as e:
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
# If for some reason the json load fails, return the stdout

View file

@ -821,7 +821,7 @@ def avail_images(call=None):
.. code-block:: yaml
image_url: images.joyent.com/image
image_url: images.joyent.com/images
'''
if call == 'action':
raise SaltCloudSystemExit(

View file

@ -3652,6 +3652,65 @@ def revert_to_snapshot(name, kwargs=None, call=None):
return msg
def remove_snapshot(name, kwargs=None, call=None):
'''
Remove a snapshot of the specified virtual machine in this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -a remove_snapshot vmname snapshot_name="mySnapshot"
salt-cloud -a remove_snapshot vmname snapshot_name="mySnapshot" [remove_children="True"]
'''
if call != 'action':
raise SaltCloudSystemExit(
'The create_snapshot action must be called with '
'-a or --action.'
)
if kwargs is None:
kwargs = {}
snapshot_name = kwargs.get('snapshot_name') if kwargs and 'snapshot_name' in kwargs else None
remove_children = _str_to_bool(kwargs.get('remove_children', False))
if not snapshot_name:
raise SaltCloudSystemExit(
'You must specify snapshot name for the snapshot to be deleted.'
)
vm_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, name)
if not _get_snapshot_ref_by_name(vm_ref, snapshot_name):
raise SaltCloudSystemExit(
'Сould not find the snapshot with the specified name.'
)
try:
snap_obj = _get_snapshot_ref_by_name(vm_ref, snapshot_name).snapshot
task = snap_obj.RemoveSnapshot_Task(remove_children)
salt.utils.vmware.wait_for_task(task, name, 'remove snapshot', 5, 'info')
except Exception as exc:
log.error(
'Error while removing snapshot of {0}: {1}'.format(
name,
exc
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return 'failed to remove snapshot'
if vm_ref.snapshot:
return {'Snapshot removed successfully': _get_snapshots(vm_ref.snapshot.rootSnapshotList,
vm_ref.snapshot.currentSnapshot)}
else:
return 'Snapshots removed successfully'
def remove_all_snapshots(name, kwargs=None, call=None):
'''
Remove all the snapshots present for the specified virtual machine.

View file

@ -124,6 +124,15 @@ VALID_OPTS = {
# master address will not be split into IP and PORT.
'master_uri_format': str,
# The following optiosn refer to the Minion only, and they specify
# the details of the source address / port to be used when connecting to
# the Master. This is useful when dealing withmachines where due to firewall
# rules you are restricted to use a certain IP/port combination only.
'source_interface_name': str,
'source_address': str,
'source_ret_port': (six.string_types, int),
'source_publish_port': (six.string_types, int),
# The fingerprint of the master key may be specified to increase security. Generate
# a master fingerprint with `salt-key -F master`
'master_finger': str,
@ -1144,6 +1153,9 @@ VALID_OPTS = {
# part of the extra_minion_data param
# Subconfig entries can be specified by using the ':' notation (e.g. key:subkey)
'pass_to_ext_pillars': (six.string_types, list),
# Used by salt.modules.dockermod.compare_container_networks to specify which keys are compared
'docker.compare_container_networks': dict,
}
# default configurations
@ -1152,6 +1164,10 @@ DEFAULT_MINION_OPTS = {
'master': 'salt',
'master_type': 'str',
'master_uri_format': 'default',
'source_interface_name': '',
'source_address': '',
'source_ret_port': 0,
'source_publish_port': 0,
'master_port': 4506,
'master_finger': '',
'master_shuffle': False,
@ -1419,6 +1435,11 @@ DEFAULT_MINION_OPTS = {
'extmod_whitelist': {},
'extmod_blacklist': {},
'minion_sign_messages': False,
'docker.compare_container_networks': {
'static': ['Aliases', 'Links', 'IPAMConfig'],
'automatic': ['IPAddress', 'Gateway',
'GlobalIPv6Address', 'IPv6Gateway'],
},
}
DEFAULT_MASTER_OPTS = {
@ -2423,7 +2444,7 @@ def syndic_config(master_config_path,
# Prepend root_dir to other paths
prepend_root_dirs = [
'pki_dir', 'key_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
'autosign_file', 'autoreject_file', 'token_dir'
'autosign_file', 'autoreject_file', 'token_dir', 'autosign_grains_dir'
]
for config_key in ('log_file', 'key_logfile', 'syndic_log_file'):
# If this is not a URI and instead a local path
@ -3603,23 +3624,23 @@ def apply_minion_config(overrides=None,
if overrides:
opts.update(overrides)
if u'environment' in opts:
if u'saltenv' in opts:
if 'environment' in opts:
if 'saltenv' in opts:
log.warning(
u'The \'saltenv\' and \'environment\' minion config options '
u'cannot both be used. Ignoring \'environment\' in favor of '
u'\'saltenv\'.',
'The \'saltenv\' and \'environment\' minion config options '
'cannot both be used. Ignoring \'environment\' in favor of '
'\'saltenv\'.',
)
# Set environment to saltenv in case someone's custom module is
# refrencing __opts__['environment']
opts[u'environment'] = opts[u'saltenv']
opts['environment'] = opts['saltenv']
else:
log.warning(
u'The \'environment\' minion config option has been renamed '
u'to \'saltenv\'. Using %s as the \'saltenv\' config value.',
opts[u'environment']
'The \'environment\' minion config option has been renamed '
'to \'saltenv\'. Using %s as the \'saltenv\' config value.',
opts['environment']
)
opts[u'saltenv'] = opts[u'environment']
opts['saltenv'] = opts['environment']
opts['__cli'] = os.path.basename(sys.argv[0])
@ -3773,23 +3794,23 @@ def apply_master_config(overrides=None, defaults=None):
if overrides:
opts.update(overrides)
if u'environment' in opts:
if u'saltenv' in opts:
if 'environment' in opts:
if 'saltenv' in opts:
log.warning(
u'The \'saltenv\' and \'environment\' master config options '
u'cannot both be used. Ignoring \'environment\' in favor of '
u'\'saltenv\'.',
'The \'saltenv\' and \'environment\' master config options '
'cannot both be used. Ignoring \'environment\' in favor of '
'\'saltenv\'.',
)
# Set environment to saltenv in case someone's custom runner is
# refrencing __opts__['environment']
opts[u'environment'] = opts[u'saltenv']
opts['environment'] = opts['saltenv']
else:
log.warning(
u'The \'environment\' master config option has been renamed '
u'to \'saltenv\'. Using %s as the \'saltenv\' config value.',
opts[u'environment']
'The \'environment\' master config option has been renamed '
'to \'saltenv\'. Using %s as the \'saltenv\' config value.',
opts['environment']
)
opts[u'saltenv'] = opts[u'environment']
opts['saltenv'] = opts['environment']
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')
@ -3833,7 +3854,7 @@ def apply_master_config(overrides=None, defaults=None):
prepend_root_dirs = [
'pki_dir', 'key_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
'autosign_file', 'autoreject_file', 'token_dir', 'syndic_dir',
'sqlite_queue_dir'
'sqlite_queue_dir', 'autosign_grains_dir'
]
# These can be set to syslog, so, not actual paths on the system

File diff suppressed because it is too large Load diff

View file

@ -670,7 +670,7 @@ class SaltLoadModules(ioflo.base.deeding.Deed):
)
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()
rss, vms = psutil.Process(os.getpid()).memory_info()[:2]
mem_limit = rss + vms + self.opts.value['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.value.get('modules_max_memory', -1) > 0:

View file

@ -348,6 +348,33 @@ class AutoKey(object):
os.remove(stub_file)
return True
def check_autosign_grains(self, autosign_grains):
'''
Check for matching grains in the autosign_grains_dir.
'''
if not autosign_grains or u'autosign_grains_dir' not in self.opts:
return False
autosign_grains_dir = self.opts[u'autosign_grains_dir']
for root, dirs, filenames in os.walk(autosign_grains_dir):
for grain in filenames:
if grain in autosign_grains:
grain_file = os.path.join(autosign_grains_dir, grain)
if not self.check_permissions(grain_file):
message = 'Wrong permissions for {0}, ignoring content'
log.warning(message.format(grain_file))
continue
with salt.utils.files.fopen(grain_file, u'r') as f:
for line in f:
line = line.strip()
if line.startswith(u'#'):
continue
if autosign_grains[grain] == line:
return True
return False
def check_autoreject(self, keyid):
'''
Checks if the specified keyid should automatically be rejected.
@ -357,7 +384,7 @@ class AutoKey(object):
self.opts.get('autoreject_file', None)
)
def check_autosign(self, keyid):
def check_autosign(self, keyid, autosign_grains=None):
'''
Checks if the specified keyid should automatically be signed.
'''
@ -367,6 +394,8 @@ class AutoKey(object):
return True
if self.check_autosign_dir(keyid):
return True
if self.check_autosign_grains(autosign_grains):
return True
return False

View file

@ -2,7 +2,7 @@
'''
This module is a central location for all salt exceptions
'''
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
# Import python libs
import copy
@ -31,16 +31,34 @@ def get_error_message(error):
'''
Get human readable message from Python Exception
'''
return error.args[0] if error.args else u''
return error.args[0] if error.args else ''
class SaltException(Exception):
'''
Base exception class; all Salt-specific exceptions should subclass this
'''
def __init__(self, message=u''):
super(SaltException, self).__init__(message)
self.strerror = message
def __init__(self, message=''):
# Avoid circular import
import salt.utils.stringutils
if six.PY3 or isinstance(message, unicode): # pylint: disable=incompatible-py3-code
super(SaltException, self).__init__(
salt.utils.stringutils.to_str(message)
)
self.message = self.strerror = message
elif isinstance(message, str):
super(SaltException, self).__init__(message)
self.message = self.strerror = \
salt.utils.stringutils.to_unicode(message)
else:
# Some non-string input was passed. Run the parent dunder init with
# a str version, and convert the passed value to unicode for the
# message/strerror attributes.
super(SaltException, self).__init__(str(message))
self.message = self.strerror = unicode(message) # pylint: disable=incompatible-py3-code
def __unicode__(self):
return self.strerror
def pack(self):
'''
@ -49,7 +67,7 @@ class SaltException(Exception):
'''
if six.PY3:
# The message should be a str type, not a unicode
return {u'message': str(self), u'args': self.args}
return {'message': str(self), 'args': self.args}
return dict(message=self.__unicode__(), args=self.args)
@ -100,16 +118,18 @@ class CommandExecutionError(SaltException):
Used when a module runs a command which returns an error and wants
to show the user the output gracefully instead of dying
'''
def __init__(self, message=u'', info=None):
self.error = exc_str_prefix = message
def __init__(self, message='', info=None):
# Avoid circular import
import salt.utils.stringutils
self.error = exc_str_prefix = salt.utils.stringutils.to_unicode(message)
self.info = info
if self.info:
if exc_str_prefix:
if exc_str_prefix[-1] not in u'.?!':
exc_str_prefix += u'.'
exc_str_prefix += u' '
if exc_str_prefix[-1] not in '.?!':
exc_str_prefix += '.'
exc_str_prefix += ' '
exc_str_prefix += u'Additional info follows:\n\n'
exc_str_prefix += 'Additional info follows:\n\n'
# NOTE: exc_str will be passed to the parent class' constructor and
# become self.strerror.
exc_str = exc_str_prefix + _nested_output(self.info)
@ -120,7 +140,7 @@ class CommandExecutionError(SaltException):
# this information would be redundant).
if isinstance(self.info, dict):
info_without_changes = copy.deepcopy(self.info)
info_without_changes.pop(u'changes', None)
info_without_changes.pop('changes', None)
if info_without_changes:
self.strerror_without_changes = \
exc_str_prefix + _nested_output(info_without_changes)
@ -134,6 +154,9 @@ class CommandExecutionError(SaltException):
else:
self.strerror_without_changes = exc_str = self.error
# We call the parent __init__ last instead of first because we need the
# logic above to derive the message string to use for the exception
# message.
super(CommandExecutionError, self).__init__(exc_str)
@ -165,13 +188,13 @@ class FileLockError(SaltException):
'''
Used when an error occurs obtaining a file lock
'''
def __init__(self, msg, time_start=None, *args, **kwargs):
super(FileLockError, self).__init__(msg, *args, **kwargs)
def __init__(self, message, time_start=None, *args, **kwargs):
super(FileLockError, self).__init__(message, *args, **kwargs)
if time_start is None:
log.warning(
u'time_start should be provided when raising a FileLockError. '
u'Defaulting to current time as a fallback, but this may '
u'result in an inaccurate timeout.'
'time_start should be provided when raising a FileLockError. '
'Defaulting to current time as a fallback, but this may '
'result in an inaccurate timeout.'
)
self.time_start = time.time()
else:
@ -188,10 +211,9 @@ class GitLockError(SaltException):
this exception class can be caught in a try/except without being caught as
an OSError.
'''
def __init__(self, errno, strerror, *args, **kwargs):
super(GitLockError, self).__init__(strerror, *args, **kwargs)
def __init__(self, errno, message, *args, **kwargs):
super(GitLockError, self).__init__(message, *args, **kwargs)
self.errno = errno
self.strerror = strerror
class GitRemoteError(SaltException):
@ -224,28 +246,29 @@ class SaltRenderError(SaltException):
def __init__(self,
message,
line_num=None,
buf=u'',
marker=u' <======================',
buf='',
marker=' <======================',
trace=None):
# Avoid circular import
import salt.utils.stringutils
self.error = message
exc_str = copy.deepcopy(message)
exc_str = salt.utils.stringutils.to_unicode(message)
self.line_num = line_num
self.buffer = buf
self.context = u''
self.context = ''
if trace:
exc_str += u'\n{0}\n'.format(trace)
exc_str += '\n{0}\n'.format(trace)
if self.line_num and self.buffer:
# Avoid circular import
import salt.utils.stringutils
import salt.utils.templates
self.context = salt.utils.templates.get_context(
self.buffer,
self.line_num,
marker=marker
)
exc_str += '; line {0}\n\n{1}'.format( # future lint: disable=non-unicode-string
exc_str += '; line {0}\n\n{1}'.format(
self.line_num,
salt.utils.stringutils.to_str(self.context),
salt.utils.stringutils.to_unicode(self.context),
)
super(SaltRenderError, self).__init__(exc_str)
@ -256,8 +279,8 @@ class SaltClientTimeout(SaltException):
Takes the ``jid`` as a parameter
'''
def __init__(self, msg, jid=None, *args, **kwargs):
super(SaltClientTimeout, self).__init__(msg, *args, **kwargs)
def __init__(self, message, jid=None, *args, **kwargs):
super(SaltClientTimeout, self).__init__(message, *args, **kwargs)
self.jid = jid

File diff suppressed because it is too large Load diff

View file

@ -15,7 +15,7 @@ be in the :conf_master:`fileserver_backend` list to enable this backend.
Fileserver environments are defined using the :conf_master:`file_roots`
configuration option.
'''
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
# Import python libs
import os
@ -30,6 +30,7 @@ import salt.utils.gzip_util
import salt.utils.hashutils
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.versions
from salt.ext import six
@ -229,7 +230,7 @@ def file_hash(load, fnd):
cache_path = os.path.join(__opts__['cachedir'],
'roots/hash',
load['saltenv'],
u'{0}.hash.{1}'.format(fnd['rel'],
'{0}.hash.{1}'.format(fnd['rel'],
__opts__['hash_type']))
# if we have a cache, serve that if the mtime hasn't changed
if os.path.exists(cache_path):
@ -386,7 +387,7 @@ def _file_lists(load, form):
for path in __opts__['file_roots'][load['saltenv']]:
for root, dirs, files in os.walk(
path,
salt.utils.stringutils.to_unicode(path),
followlinks=__opts__['fileserver_followsymlinks']):
_add_to(ret['dirs'], path, root, dirs)
_add_to(ret['files'], path, root, files)

View file

@ -19,6 +19,7 @@ import logging
import salt.utils.dictupdate
import salt.utils.path
import salt.utils.platform
from salt.modules.zfs import _conform_value
# Solve the Chicken and egg problem where grains need to run before any
# of the modules are loaded and are generally available for any usage.
@ -88,11 +89,11 @@ def _zfs_pool_data():
# collect zpool data
zpool_cmd = salt.utils.path.which('zpool')
for zpool in __salt__['cmd.run']('{zpool} list -H -o name,size'.format(zpool=zpool_cmd)).splitlines():
for zpool in __salt__['cmd.run']('{zpool} list -H -p -o name,size'.format(zpool=zpool_cmd)).splitlines():
if 'zpool' not in grains:
grains['zpool'] = {}
zpool = zpool.split()
grains['zpool'][zpool[0]] = zpool[1]
grains['zpool'][zpool[0]] = _conform_value(zpool[1], True)
# return grain data
return grains

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -116,7 +116,7 @@ boto3_param_map = {
'publicly_accessible': ('PubliclyAccessible', bool),
'storage_encrypted': ('StorageEncrypted', bool),
'storage_type': ('StorageType', str),
'taglist': ('Tags', list),
'tags': ('Tags', list),
'tde_credential_arn': ('TdeCredentialArn', str),
'tde_credential_password': ('TdeCredentialPassword', str),
'vpc_security_group_ids': ('VpcSecurityGroupIds', list),
@ -280,10 +280,11 @@ def create(name, allocated_storage, db_instance_class, engine,
if not conn:
return {'results': bool(conn)}
taglist = _tag_doc(tags)
kwargs = {}
boto_params = set(boto3_param_map.keys())
keys = set(locals().keys())
tags = _tag_doc(tags)
for param_key in keys.intersection(boto_params):
val = locals()[param_key]
if val is not None:

View file

@ -171,34 +171,23 @@ def _render_cmd(cmd, cwd, template, saltenv='base', pillarenv=None, pillar_overr
return (cmd, cwd)
def _check_loglevel(level='info', quiet=False):
def _check_loglevel(level='info'):
'''
Retrieve the level code for use in logging.Logger.log().
'''
def _bad_level(level):
log.error(
'Invalid output_loglevel \'{0}\'. Valid levels are: {1}. Falling '
'back to \'info\'.'
.format(
level,
', '.join(
sorted(LOG_LEVELS, reverse=True)
)
)
)
return LOG_LEVELS['info']
if salt.utils.data.is_true(quiet) or str(level).lower() == 'quiet':
return None
try:
level = level.lower()
if level not in LOG_LEVELS:
return _bad_level(level)
except AttributeError:
return _bad_level(level)
return LOG_LEVELS[level]
if level == 'quiet':
return None
else:
return LOG_LEVELS[level]
except (AttributeError, KeyError):
log.error(
'Invalid output_loglevel \'%s\'. Valid levels are: %s. Falling '
'back to \'info\'.',
level, ', '.join(sorted(LOG_LEVELS, reverse=True))
)
return LOG_LEVELS['info']
def _parse_env(env):
@ -783,6 +772,7 @@ def run(cmd,
umask=None,
output_loglevel='debug',
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
@ -890,8 +880,20 @@ def run(cmd,
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: Oxygen
:param int timeout: A timeout in seconds for the executed process to return.
@ -901,9 +903,8 @@ def run(cmd,
:param bool encoded_cmd: Specify if the supplied command is encoded.
Only applies to shell 'powershell'.
:param bool raise_err: Specifies whether to raise a CommandExecutionError.
If False, the error will be logged, but no exception will be raised.
Default is False.
:param bool raise_err: If ``True`` and the command has a nonzero exit code,
a CommandExecutionError exception will be raised.
.. warning::
This function does not process commands through a shell
@ -996,9 +997,11 @@ def run(cmd,
)
log.error(log_callback(msg))
if raise_err:
raise CommandExecutionError(log_callback(ret['stdout']))
log.log(lvl, u'output: %s', log_callback(ret['stdout']))
return ret['stdout']
raise CommandExecutionError(
log_callback(ret[u'stdout'] if not hide_output else u'')
)
log.log(lvl, u'output: %s', log_callback(ret[u'stdout']))
return ret[u'stdout'] if not hide_output else u''
def shell(cmd,
@ -1013,7 +1016,7 @@ def shell(cmd,
umask=None,
output_loglevel='debug',
log_callback=None,
quiet=False,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
@ -1112,8 +1115,20 @@ def shell(cmd,
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: Oxygen
:param int timeout: A timeout in seconds for the executed process to return.
@ -1186,7 +1201,7 @@ def shell(cmd,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
quiet=quiet,
hide_output=hide_output,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
@ -1211,6 +1226,7 @@ def run_stdout(cmd,
umask=None,
output_loglevel='debug',
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
@ -1305,8 +1321,20 @@ def run_stdout(cmd,
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: Oxygen
:param int timeout: A timeout in seconds for the executed process to return.
@ -1383,7 +1411,7 @@ def run_stdout(cmd,
log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr'])))
if ret['retcode']:
log.log(lvl, 'retcode: {0}'.format(ret['retcode']))
return ret['stdout']
return ret['stdout'] if not hide_output else ''
def run_stderr(cmd,
@ -1399,6 +1427,7 @@ def run_stderr(cmd,
umask=None,
output_loglevel='debug',
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
@ -1494,8 +1523,20 @@ def run_stderr(cmd,
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: Oxygen
:param int timeout: A timeout in seconds for the executed process to return.
@ -1572,7 +1613,7 @@ def run_stderr(cmd,
log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr'])))
if ret['retcode']:
log.log(lvl, 'retcode: {0}'.format(ret['retcode']))
return ret['stderr']
return ret['stderr'] if not hide_output else ''
def run_all(cmd,
@ -1588,6 +1629,7 @@ def run_all(cmd,
umask=None,
output_loglevel='debug',
log_callback=None,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
@ -1685,8 +1727,20 @@ def run_all(cmd,
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: Oxygen
:param int timeout: A timeout in seconds for the executed process to return.
@ -1788,6 +1842,9 @@ def run_all(cmd,
log.log(lvl, u'stderr: {0}'.format(log_callback(ret['stderr'])))
if ret['retcode']:
log.log(lvl, 'retcode: {0}'.format(ret['retcode']))
if hide_output:
ret['stdout'] = ret['stderr'] = ''
return ret
@ -2025,7 +2082,7 @@ def script(source,
umask=None,
output_loglevel='debug',
log_callback=None,
quiet=False,
hide_output=False,
timeout=None,
reset_system_locale=True,
saltenv='base',
@ -2122,12 +2179,20 @@ def script(source,
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG)regardless, unless ``quiet`` is used for this value.
the command is logged to the minion log.
:param bool quiet: The command will be executed quietly, meaning no log
entries of the actual command or its return data. This is deprecated as of
the **2014.1.0** release, and is being replaced with ``output_loglevel: quiet``.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: Oxygen
:param int timeout: If the command has not terminated after timeout seconds,
send the subprocess sigterm, and if sigterm is ignored, follow up with
@ -2231,6 +2296,9 @@ def script(source,
_cleanup_tempfile(cwd)
else:
_cleanup_tempfile(path)
if hide_output:
ret['stdout'] = ret['stderr'] = ''
return ret
@ -2343,8 +2411,11 @@ def script_retcode(source,
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool quiet: The command will be executed quietly, meaning no log
entries of the actual command or its return data. This is deprecated as of
@ -2539,7 +2610,7 @@ def run_chroot(root,
umask=None,
output_loglevel='quiet',
log_callback=None,
quiet=False,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
@ -2633,10 +2704,22 @@ def run_chroot(root,
umask
The umask (in octal) to use when running the command.
output_loglevel
Control the loglevel at which the output from the command is logged.
Note that the command being run will still be logged (loglevel: DEBUG)
regardless, unless ``quiet`` is used for this value.
output_loglevel : quiet
Control the loglevel at which the output from the command is logged to
the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
hide_output : False
If ``True``, suppress stdout and stderr in the return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: Oxygen
timeout
A timeout in seconds for the executed process to return.
@ -2686,7 +2769,6 @@ def run_chroot(root,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
quiet=quiet,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
@ -2712,6 +2794,8 @@ def run_chroot(root,
__salt__['mount.umount'](os.path.join(root, 'proc'))
__salt__['mount.umount'](os.path.join(root, 'dev'))
if hide_output:
ret[u'stdout'] = ret[u'stderr'] = u''
return ret
@ -2962,7 +3046,7 @@ def powershell(cmd,
rstrip=True,
umask=None,
output_loglevel='debug',
quiet=False,
hide_output=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
@ -3090,8 +3174,20 @@ def powershell(cmd,
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
the command is logged to the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
:param bool hide_output: If ``True``, suppress stdout and stderr in the
return data.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: Oxygen
:param int timeout: A timeout in seconds for the executed process to return.
@ -3155,7 +3251,7 @@ def powershell(cmd,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
quiet=quiet,
hide_output=hide_output,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,

View file

@ -214,7 +214,8 @@ def merge(value,
return ret
def get(key, default='', delimiter=':', merge=None):
def get(key, default='', delimiter=':', merge=None, omit_opts=False,
omit_pillar=False, omit_master=False, omit_grains=False):
'''
.. versionadded: 0.14.0
@ -354,37 +355,41 @@ def get(key, default='', delimiter=':', merge=None):
salt '*' config.get lxc.container_profile:centos merge=recurse
'''
if merge is None:
ret = salt.utils.data.traverse_dict_and_list(
__opts__,
key,
'_|-',
delimiter=delimiter)
if ret != '_|-':
return sdb.sdb_get(ret, __opts__)
if not omit_opts:
ret = salt.utils.data.traverse_dict_and_list(
__opts__,
key,
'_|-',
delimiter=delimiter)
if ret != '_|-':
return sdb.sdb_get(ret, __opts__)
ret = salt.utils.data.traverse_dict_and_list(
__grains__,
key,
'_|-',
delimiter)
if ret != '_|-':
return sdb.sdb_get(ret, __opts__)
if not omit_grains:
ret = salt.utils.data.traverse_dict_and_list(
__grains__,
key,
'_|-',
delimiter)
if ret != '_|-':
return sdb.sdb_get(ret, __opts__)
ret = salt.utils.data.traverse_dict_and_list(
__pillar__,
key,
'_|-',
delimiter=delimiter)
if ret != '_|-':
return sdb.sdb_get(ret, __opts__)
if not omit_pillar:
ret = salt.utils.data.traverse_dict_and_list(
__pillar__,
key,
'_|-',
delimiter=delimiter)
if ret != '_|-':
return sdb.sdb_get(ret, __opts__)
ret = salt.utils.data.traverse_dict_and_list(
__pillar__.get('master', {}),
key,
'_|-',
delimiter=delimiter)
if ret != '_|-':
return sdb.sdb_get(ret, __opts__)
if not omit_master:
ret = salt.utils.data.traverse_dict_and_list(
__pillar__.get('master', {}),
key,
'_|-',
delimiter=delimiter)
if ret != '_|-':
return sdb.sdb_get(ret, __opts__)
else:
if merge not in ('recurse', 'overwrite'):
log.warning('Unsupported merge strategy \'{0}\'. Falling back '

View file

@ -5,6 +5,7 @@ Module to work with salt formula defaults files
'''
from __future__ import absolute_import
import copy
import json
import logging
import os
@ -107,11 +108,18 @@ def get(key, default=''):
return defaults
def merge(dest, upd):
def merge(dest, src, merge_lists=False, in_place=True):
'''
defaults.merge
Allows deep merging of dicts in formulas.
merge_lists : False
If True, it will also merge lists instead of replace their items.
in_place : True
If True, it will merge into dest dict,
if not it will make a new copy from that dict and return it.
CLI Example:
.. code-block:: bash
@ -120,4 +128,22 @@ def merge(dest, upd):
It is more typical to use this in a templating language in formulas,
instead of directly on the command-line.
'''
return dictupdate.update(dest, upd)
if in_place:
merged = dest
else:
merged = copy.deepcopy(dest)
return dictupdate.update(merged, src, merge_lists=merge_lists)
def deepcopy(source):
'''
defaults.deepcopy
Allows deep copy of objects in formulas.
By default, Python does not copy objects,
it creates bindings between a target and an object.
It is more typical to use this in a templating language in formulas,
instead of directly on the command-line.
'''
return copy.deepcopy(source)

View file

@ -106,6 +106,7 @@ import inspect
import logging
import os
import re
import salt.utils.files
from operator import attrgetter
@ -130,7 +131,7 @@ log = logging.getLogger(__name__)
debug = False
__virtualname__ = 'dockercompose'
dc_filename = 'docker-compose.yml'
DEFAULT_DC_FILENAMES = ('docker-compose.yml', 'docker-compose.yaml')
def __virtual__():
@ -168,27 +169,45 @@ def __standardize_result(status, message, data=None, debug_msg=None):
return result
def __read_docker_compose(path):
def __get_docker_file_path(path):
'''
Read the docker-compose.yml file if it exists in the directory
Determines the filepath to use
:param path:
:return:
'''
if os.path.isfile(os.path.join(path, dc_filename)) is False:
if os.path.isfile(path):
return path
for dc_filename in DEFAULT_DC_FILENAMES:
file_path = os.path.join(path, dc_filename)
if os.path.isfile(file_path):
return file_path
# implicitly return None
def __read_docker_compose_file(file_path):
'''
Read the compose file if it exists in the directory
:param file_path:
:return:
'''
if not os.path.isfile(file_path):
return __standardize_result(False,
'Path does not exist or docker-compose.yml is not present',
'Path {} is not present'.format(file_path),
None, None)
f = salt.utils.files.fopen(os.path.join(path, dc_filename), 'r') # pylint: disable=resource-leakage
result = {'docker-compose.yml': ''}
if f:
for line in f:
result['docker-compose.yml'] += line
f.close()
else:
return __standardize_result(False, 'Could not read docker-compose.yml file.',
try:
with salt.utils.files.fopen(file_path, 'r') as fl:
file_name = os.path.basename(file_path)
result = {file_name: ''}
for line in fl:
result[file_name] += line
except EnvironmentError:
return __standardize_result(False,
'Could not read {0}'.format(file_path),
None, None)
return __standardize_result(True, 'Reading content of docker-compose.yml file',
return __standardize_result(True,
'Reading content of {0}'.format(file_path),
result, None)
@ -204,33 +223,54 @@ def __write_docker_compose(path, docker_compose):
:return:
'''
if os.path.isdir(path) is False:
os.mkdir(path)
f = salt.utils.files.fopen(os.path.join(path, dc_filename), 'w') # pylint: disable=resource-leakage
if f:
f.write(docker_compose)
f.close()
if path.lower().endswith(('.yml', '.yaml')):
file_path = path
dir_name = os.path.dirname(path)
else:
dir_name = path
file_path = os.path.join(dir_name, DEFAULT_DC_FILENAMES[0])
if os.path.isdir(dir_name) is False:
os.mkdir(dir_name)
try:
with salt.utils.files.fopen(file_path, 'w') as fl:
fl.write(docker_compose)
except EnvironmentError:
return __standardize_result(False,
'Could not write docker-compose file in {0}'.format(path),
'Could not write {0}'.format(file_path),
None, None)
project = __load_project(path)
project = __load_project_from_file_path(file_path)
if isinstance(project, dict):
os.remove(os.path.join(path, dc_filename))
os.remove(file_path)
return project
return path
return file_path
def __load_project(path):
'''
Load a docker-compose project from path
:param path:
:return:
'''
file_path = __get_docker_file_path(path)
if file_path is None:
msg = 'Could not find docker-compose file at {0}'.format(path)
return __standardize_result(False,
msg,
None, None)
return __load_project_from_file_path(file_path)
def __load_project_from_file_path(file_path):
'''
Load a docker-compose project from file path
:param path:
:return:
'''
try:
project = get_project(path)
project = get_project(project_dir=os.path.dirname(file_path),
config_path=[os.path.basename(file_path)])
except Exception as inst:
return __handle_except(inst)
return project
@ -286,8 +326,12 @@ def get(path):
salt myminion dockercompose.get /path/where/docker-compose/stored
'''
salt_result = __read_docker_compose(path)
file_path = __get_docker_file_path(path)
if file_path is None:
return __standardize_result(False,
'Path {} is not present'.format(path),
None, None)
salt_result = __read_docker_compose_file(file_path)
if not salt_result['status']:
return salt_result
project = __load_project(path)
@ -322,7 +366,10 @@ def create(path, docker_compose):
return __standardize_result(False,
'Creating a docker-compose project failed, you must send a valid docker-compose file',
None, None)
return __standardize_result(True, 'Successfully created the docker-compose file', {'compose.base_dir': path}, None)
return __standardize_result(True,
'Successfully created the docker-compose file',
{'compose.base_dir': path},
None)
def pull(path, service_names=None):

File diff suppressed because it is too large Load diff

View file

@ -4763,7 +4763,7 @@ def check_file_meta(
if mode is not None and mode != smode:
changes['mode'] = mode
if lsattr_cmd:
if lsattr_cmd and attrs:
diff_attrs = _cmp_attrs(name, attrs)
if (
attrs is not None and

View file

@ -176,8 +176,9 @@ def _query(function,
if result.get('status', None) == salt.ext.six.moves.http_client.OK:
response = hipchat_functions.get(api_version).get(function).get('response')
return result.get('dict', {}).get(response, None)
elif result.get('status', None) == salt.ext.six.moves.http_client.NO_CONTENT:
return False
elif result.get('status', None) == salt.ext.six.moves.http_client.NO_CONTENT and \
api_version == 'v2':
return True
else:
log.debug(url)
log.debug(query_params)

View file

@ -39,6 +39,9 @@ import collections
import json
import logging
# Import salt libs
from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS
log = logging.getLogger(__name__)
# name used to refer to this module in __salt__
@ -64,10 +67,14 @@ def _client(user=None, password=None, host=None, port=None, **client_args):
host = __salt__['config.option']('influxdb.host', 'localhost')
if not port:
port = __salt__['config.option']('influxdb.port', 8086)
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in client_args:
del client_args[ignore]
return influxdb.InfluxDBClient(host=host,
port=port,
username=user,
password=password)
password=password,
**client_args)
def list_dbs(**client_args):

View file

@ -239,10 +239,13 @@ def pvcreate(devices, override=True, **kwargs):
'pvmetadatacopies', 'metadatacopies', 'metadataignore',
'restorefile', 'norestorefile', 'labelsector',
'setphysicalvolumesize')
no_parameter = ('force', 'norestorefile')
for var in kwargs:
if kwargs[var] and var in valid:
cmd.extend(['--{0}'.format(var), kwargs[var]])
elif kwargs[var] and var in no_parameter:
cmd.append('--{0}'.format(var))
cmd.append(kwargs[var])
out = __salt__['cmd.run_all'](cmd, python_shell=False)
if out.get('retcode'):
raise CommandExecutionError(out.get('stderr'))

View file

@ -236,7 +236,7 @@ def neighbors(neighbor=None, **kwargs):
'local_address_configured' : True,
'local_port' : 179,
'remote_address' : u'192.247.78.0',
'router_id': : u'192.168.0.1',
'router_id' : u'192.168.0.1',
'remote_port' : 58380,
'multihop' : False,
'import_policy' : u'4-NTT-TRANSIT-IN',

View file

@ -230,7 +230,7 @@ def _config_logic(napalm_device,
@salt.utils.napalm.proxy_napalm_wrap
def connected(**kwarvs): # pylint: disable=unused-argument
def connected(**kwargs): # pylint: disable=unused-argument
'''
Specifies if the connection to the device succeeded.
@ -1178,6 +1178,7 @@ def load_config(filename=None,
debug=False,
replace=False,
inherit_napalm_device=None,
saltenv='base',
**kwargs): # pylint: disable=unused-argument
'''
Applies configuration changes on the device. It can be loaded from a file or from inline string.
@ -1193,10 +1194,21 @@ def load_config(filename=None,
To replace the config, set ``replace`` to ``True``.
filename
Path to the file containing the desired configuration. By default is None.
Path to the file containing the desired configuration.
This can be specified using the absolute path to the file,
or using one of the following URL schemes:
- ``salt://``, to fetch the template from the Salt fileserver.
- ``http://`` or ``https://``
- ``ftp://``
- ``s3://``
- ``swift://``
.. versionchanged:: Oxygen
text
String containing the desired configuration.
This argument is ignored when ``filename`` is specified.
test: False
Dry run? If set as ``True``, will apply the config, discard and return the changes. Default: ``False``
@ -1216,6 +1228,11 @@ def load_config(filename=None,
.. versionadded:: 2016.11.2
saltenv: ``base``
Specifies the Salt environment name.
.. versionadded:: Oxygen
:return: a dictionary having the following keys:
* result (bool): if the config was applied successfully. It is ``False`` only in case of failure. In case \
@ -1246,7 +1263,6 @@ def load_config(filename=None,
'diff': '[edit interfaces xe-0/0/5]+ description "Adding a description";'
}
'''
fun = 'load_merge_candidate'
if replace:
fun = 'load_replace_candidate'
@ -1259,11 +1275,22 @@ def load_config(filename=None,
# compare_config, discard / commit
# which have to be over the same session
napalm_device['CLOSE'] = False # pylint: disable=undefined-variable
if filename:
text = __salt__['cp.get_file_str'](filename, saltenv=saltenv)
if text is False:
# When using salt:// or https://, if the resource is not available,
# it will either raise an exception, or return False.
ret = {
'result': False,
'out': None
}
ret['comment'] = 'Unable to read from {}. Please specify a valid file or text.'.format(filename)
log.error(ret['comment'])
return ret
_loaded = salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
fun,
**{
'filename': filename,
'config': text
}
)

View file

@ -113,6 +113,17 @@ def servers(**kwargs): # pylint: disable=unused-argument
.. code-block:: bash
salt '*' ntp.servers
Example output:
.. code-block:: python
[
'192.168.0.1',
'172.17.17.1',
'172.17.17.2',
'2400:cb00:6:1024::c71b:840a'
]
'''
ntp_servers = salt.utils.napalm.call(

View file

@ -142,7 +142,7 @@ def results(**kwargs): # pylint: disable=unused-argument
'last_test_loss' : 0,
'round_trip_jitter' : -59.0,
'target' : '192.168.0.1',
'source' : '192.168.0.2'
'source' : '192.168.0.2',
'probe_count' : 15,
'current_test_min_delay': 63.138
},
@ -160,7 +160,7 @@ def results(**kwargs): # pylint: disable=unused-argument
'last_test_loss' : 0,
'round_trip_jitter' : -34.0,
'target' : '172.17.17.1',
'source' : '172.17.17.2'
'source' : '172.17.17.2',
'probe_count' : 15,
'current_test_min_delay': 176.402
}

View file

@ -82,7 +82,7 @@ def show(destination, protocol=None, **kwargs): # pylint: disable=unused-argume
.. code-block:: bash
salt 'my_router' route.show
salt 'my_router' route.show 172.16.0.0/25
salt 'my_router' route.show 172.16.0.0/25 bgp
Output example:

View file

@ -69,7 +69,7 @@ def _get_root_object(models):
# -----------------------------------------------------------------------------
def diff(candidate, running, models):
def diff(candidate, running, *models):
'''
Returns the difference between two configuration entities structured
according to the YANG model.
@ -119,6 +119,9 @@ def diff(candidate, running, models):
}
}
'''
if isinstance(models, tuple) and isinstance(models[0], list):
models = models[0]
first = _get_root_object(models)
first.load_dict(candidate)
second = _get_root_object(models)
@ -127,7 +130,7 @@ def diff(candidate, running, models):
@proxy_napalm_wrap
def parse(models, **kwargs):
def parse(*models, **kwargs):
'''
Parse configuration from the device.
@ -340,6 +343,8 @@ def parse(models, **kwargs):
}
}
'''
if isinstance(models, tuple) and isinstance(models[0], list):
models = models[0]
config = kwargs.pop('config', False)
state = kwargs.pop('state', False)
profiles = kwargs.pop('profiles', [])
@ -360,7 +365,7 @@ def parse(models, **kwargs):
@proxy_napalm_wrap
def get_config(data, models, **kwargs):
def get_config(data, *models, **kwargs):
'''
Return the native config.
@ -393,6 +398,8 @@ def get_config(data, models, **kwargs):
description Uplink2
mtu 9000
'''
if isinstance(models, tuple) and isinstance(models[0], list):
models = models[0]
profiles = kwargs.pop('profiles', [])
if not profiles and hasattr(napalm_device, 'profile'): # pylint: disable=undefined-variable
profiles = napalm_device.profile # pylint: disable=undefined-variable
@ -410,7 +417,7 @@ def get_config(data, models, **kwargs):
@proxy_napalm_wrap
def load_config(data, models, **kwargs):
def load_config(data, *models, **kwargs):
'''
Generate and load the config on the device using the OpenConfig or IETF
models and device profiles.
@ -545,7 +552,9 @@ def load_config(data, models, **kwargs):
result:
True
'''
config = get_config(data, models, **kwargs)
if isinstance(models, tuple) and isinstance(models[0], list):
models = models[0]
config = get_config(data, *models, **kwargs)
test = kwargs.pop('test', False)
debug = kwargs.pop('debug', False)
commit = kwargs.pop('commit', True)
@ -559,7 +568,7 @@ def load_config(data, models, **kwargs):
@proxy_napalm_wrap
def compliance_report(data, models, **kwargs):
def compliance_report(data, *models, **kwargs):
'''
Return the compliance report using YANG objects.
@ -598,6 +607,8 @@ def compliance_report(data, models, **kwargs):
}
}
'''
if isinstance(models, tuple) and isinstance(models[0], list):
models = models[0]
filepath = kwargs.pop('filepath', '')
root = _get_root_object(models)
root.load_dict(data)

View file

@ -2,7 +2,7 @@
'''
Neutron module for interacting with OpenStack Neutron
.. versionadded:: Nitrogen
.. versionadded:: Oxygen
:depends:shade

View file

@ -83,6 +83,7 @@ import shutil
import logging
import sys
import tempfile
import json
# Import Salt libs
import salt.utils.data
@ -1132,6 +1133,13 @@ def list_upgrades(bin_env=None,
cmd = [pip_bin, 'list', '--outdated']
# If pip >= 9.0 use --format=json
min_version = '9.0'
cur_version = version(pip_bin)
if salt.utils.versions.compare(ver1=cur_version, oper='>=',
ver2=min_version):
cmd.append('--format=json')
cmd_kwargs = dict(cwd=cwd, runas=user)
if bin_env and os.path.isdir(bin_env):
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
@ -1142,14 +1150,20 @@ def list_upgrades(bin_env=None,
raise CommandExecutionError(result['stderr'])
packages = {}
for line in result['stdout'].splitlines():
match = re.search(r'(\S*)\s+\(.*Latest:\s+(.*)\)', line)
if match:
name, version_ = match.groups()
else:
logger.error('Can\'t parse line \'{0}\''.format(line))
continue
packages[name] = version_
try:
json_results = json.loads(result['stdout'])
for json_result in json_results:
packages[json_result['name']] = json_result['latest_version']
except ValueError:
for line in result['stdout'].splitlines():
match = re.search(r'(\S*)\s+.*Latest:\s+(.*)', line)
if match:
name, version_ = match.groups()
else:
logger.error('Can\'t parse line \'{0}\''.format(line))
continue
packages[name] = version_
return packages

View file

@ -166,6 +166,98 @@ def _snapper_post(opts, jid, pre_num):
log.error('Failed to create snapper pre snapshot for jid: {0}'.format(jid))
def _get_pause(jid, state_id=None):
'''
Return the pause information for a given jid
'''
pause_dir = os.path.join(__opts__[u'cachedir'], 'state_pause')
pause_path = os.path.join(pause_dir, jid)
if not os.path.exists(pause_dir):
try:
os.makedirs(pause_dir)
except OSError:
# File created in the gap
pass
data = {}
if state_id is not None:
if state_id not in data:
data[state_id] = {}
if os.path.exists(pause_path):
with salt.utils.files.fopen(pause_path, 'rb') as fp_:
data = msgpack.loads(fp_.read())
return data, pause_path
def get_pauses(jid=None):
'''
Get a report on all of the currently paused state runs and pause
run settings.
Optionally send in a jid if you only desire to see a single pause
data set.
'''
ret = {}
active = __salt__['saltutil.is_running']('state.*')
pause_dir = os.path.join(__opts__[u'cachedir'], 'state_pause')
if not os.path.exists(pause_dir):
return ret
if jid is None:
jids = os.listdir(pause_dir)
elif isinstance(jid, list):
jids = jid
else:
jids = [str(jid)]
for scan_jid in jids:
is_active = False
for active_data in active:
if active_data['jid'] == scan_jid:
is_active = True
if not is_active:
try:
pause_path = os.path.join(pause_dir, scan_jid)
os.remove(pause_path)
except OSError:
# Already gone
pass
continue
data, pause_path = _get_pause(scan_jid)
ret[scan_jid] = data
return ret
def soft_kill(jid, state_id=None):
'''
Set up a state run to die before executing the given state id,
this instructs a running state to safely exit at a given
state id. This needs to pass in the jid of the running state.
If a state_id is not passed then the jid referenced will be safely exited
at the begining of the next state run.
The given state id is the id got a given state execution, so given a state
that looks like this:
.. code-block:: yaml
vim:
pkg.installed: []
The state_id to pass to `soft_kill` is `vim`
CLI Examples:
.. code-block:: bash
salt '*' state.soft_kill 20171130110407769519
salt '*' state.soft_kill 20171130110407769519 vim
'''
jid = str(jid)
if state_id is None:
state_id = '__all__'
data, pause_path = _get_pause(jid, state_id)
data[state_id]['kill'] = True
with salt.utils.files.fopen(pause_path, 'wb') as fp_:
fp_.write(msgpack.dumps(data))
def pause(jid, state_id=None, duration=None):
'''
Set up a state id pause, this instructs a running state to pause at a given
@ -194,20 +286,7 @@ def pause(jid, state_id=None, duration=None):
jid = str(jid)
if state_id is None:
state_id = '__all__'
pause_dir = os.path.join(__opts__[u'cachedir'], 'state_pause')
pause_path = os.path.join(pause_dir, jid)
if not os.path.exists(pause_dir):
try:
os.makedirs(pause_dir)
except OSError:
# File created in the gap
pass
data = {}
if os.path.exists(pause_path):
with salt.utils.files.fopen(pause_path, 'rb') as fp_:
data = msgpack.loads(fp_.read())
if state_id not in data:
data[state_id] = {}
data, pause_path = _get_pause(jid, state_id)
if duration:
data[state_id]['duration'] = int(duration)
with salt.utils.files.fopen(pause_path, 'wb') as fp_:
@ -239,22 +318,11 @@ def resume(jid, state_id=None):
jid = str(jid)
if state_id is None:
state_id = '__all__'
pause_dir = os.path.join(__opts__[u'cachedir'], 'state_pause')
pause_path = os.path.join(pause_dir, jid)
if not os.path.exists(pause_dir):
try:
os.makedirs(pause_dir)
except OSError:
# File created in the gap
pass
data = {}
if os.path.exists(pause_path):
with salt.utils.files.fopen(pause_path, 'rb') as fp_:
data = msgpack.loads(fp_.read())
else:
return True
data, pause_path = _get_pause(jid, state_id)
if state_id in data:
data.pop(state_id)
if state_id == '__all__':
data = {}
with salt.utils.files.fopen(pause_path, 'wb') as fp_:
fp_.write(msgpack.dumps(data))
@ -429,6 +497,8 @@ def _get_test_value(test=None, **kwargs):
if test is None:
if salt.utils.args.test_mode(test=test, **kwargs):
ret = True
elif __salt__['config.get']('test', omit_opts=True) is True:
ret = True
else:
ret = __opts__.get('test', None)
else:

View file

@ -8,6 +8,8 @@ Salt interface to ZFS commands
from __future__ import absolute_import
# Import Python libs
import re
import math
import logging
# Import Salt libs
@ -16,10 +18,15 @@ import salt.utils.path
import salt.modules.cmdmod
import salt.utils.decorators as decorators
from salt.utils.odict import OrderedDict
from salt.utils.stringutils import to_num as str_to_num
from salt.ext import six
__virtualname__ = 'zfs'
log = logging.getLogger(__name__)
# Precompiled regex
re_zfs_size = re.compile(r'^(\d+|\d+(?=\d*)\.\d+)([KkMmGgTtPpEeZz][Bb]?)$')
# Function alias to set mapping.
__func_alias__ = {
'list_': 'list',
@ -62,6 +69,38 @@ def _check_features():
return res['retcode'] == 0
def _conform_value(value, convert_size=False):
'''
Ensure value always conform to what zfs expects
'''
# NOTE: salt breaks the on/off/yes/no properties
if isinstance(value, bool):
return 'on' if value else 'off'
if isinstance(value, six.text_type) or isinstance(value, str):
# NOTE: handle whitespaces
if ' ' in value:
# NOTE: quoting the string may be better
# but it is hard to know if we already quoted it before
# this can be improved in the future
return "'{0}'".format(value.strip("'"))
# NOTE: handle ZFS size conversion
match_size = re_zfs_size.match(value)
if convert_size and match_size:
v_size = float(match_size.group(1))
v_unit = match_size.group(2).upper()[0]
v_power = math.pow(1024, ['K', 'M', 'G', 'T', 'P', 'E', 'Z'].index(v_unit) + 1)
value = v_size * v_power
return int(value) if int(value) == value else value
# NOTE: convert to numeric if needed
return str_to_num(value)
# NOTE: passthrough
return value
def exists(name, **kwargs):
'''
.. versionadded:: 2015.5.0
@ -145,14 +184,10 @@ def create(name, **kwargs):
# if zpool properties specified, then
# create "-o property=value" pairs
if properties:
optlist = []
proplist = []
for prop in properties:
if isinstance(properties[prop], bool): # salt breaks the on/off/yes/no properties :(
properties[prop] = 'on' if properties[prop] else 'off'
optlist.append('-o {0}={1}'.format(prop, properties[prop]))
opts = ' '.join(optlist)
cmd = '{0} {1}'.format(cmd, opts)
proplist.append('-o {0}={1}'.format(prop, _conform_value(properties[prop])))
cmd = '{0} {1}'.format(cmd, ' '.join(proplist))
if volume_size:
cmd = '{0} -V {1}'.format(cmd, volume_size)
@ -292,7 +327,7 @@ def rename(name, new_name, **kwargs):
def list_(name=None, **kwargs):
'''
.. versionadded:: 2015.5.0
.. versionchanged:: 2016.3.0
.. versionchanged:: Oxygen
Return a list of all datasets or a specified dataset on the system and the
values of their used, available, referenced, and mountpoint properties.
@ -312,6 +347,9 @@ def list_(name=None, **kwargs):
property to sort on (default = name)
order : string [ascending|descending]
sort order (default = ascending)
parsable : boolean
display numbers in parsable (exact) values
.. versionadded:: Oxygen
CLI Example:
@ -329,8 +367,13 @@ def list_(name=None, **kwargs):
sort = kwargs.get('sort', None)
ltype = kwargs.get('type', None)
order = kwargs.get('order', 'ascending')
parsable = kwargs.get('parsable', False)
cmd = '{0} list -H'.format(zfs)
# parsable output
if parsable:
cmd = '{0} -p'.format(cmd)
# filter on type
if ltype:
cmd = '{0} -t {1}'.format(cmd, ltype)
@ -367,7 +410,7 @@ def list_(name=None, **kwargs):
ds_data = {}
for prop in properties:
ds_data[prop] = ds[properties.index(prop)]
ds_data[prop] = _conform_value(ds[properties.index(prop)])
ret[ds_data['name']] = ds_data
del ret[ds_data['name']]['name']
@ -668,12 +711,10 @@ def clone(name_a, name_b, **kwargs):
# if zpool properties specified, then
# create "-o property=value" pairs
if properties:
optlist = []
proplist = []
for prop in properties:
if isinstance(properties[prop], bool): # salt breaks the on/off/yes/no properties :(
properties[prop] = 'on' if properties[prop] else 'off'
optlist.append('-o {0}={1}'.format(prop, properties[prop]))
properties = ' '.join(optlist)
proplist.append('-o {0}={1}'.format(prop, properties[prop]))
properties = ' '.join(proplist)
res = __salt__['cmd.run_all']('{zfs} clone {create_parent}{properties}{name_a} {name_b}'.format(
zfs=zfs,
@ -1046,12 +1087,10 @@ def snapshot(*snapshot, **kwargs):
# if zpool properties specified, then
# create "-o property=value" pairs
if properties:
optlist = []
proplist = []
for prop in properties:
if isinstance(properties[prop], bool): # salt breaks the on/off/yes/no properties :(
properties[prop] = 'on' if properties[prop] else 'off'
optlist.append('-o {0}={1}'.format(prop, properties[prop]))
properties = ' '.join(optlist)
proplist.append('-o {0}={1}'.format(prop, _conform_value((properties[prop]))))
properties = ' '.join(proplist)
for csnap in snapshot:
if '@' not in csnap:
@ -1138,14 +1177,10 @@ def set(*dataset, **kwargs):
# for better error handling we don't do one big set command
for ds in dataset:
for prop in properties:
if isinstance(properties[prop], bool): # salt breaks the on/off/yes/no properties :(
properties[prop] = 'on' if properties[prop] else 'off'
res = __salt__['cmd.run_all']('{zfs} set {prop}={value} {dataset}'.format(
zfs=zfs,
prop=prop,
value=properties[prop],
value=_conform_value(properties[prop]),
dataset=ds
))
if ds not in ret:
@ -1164,6 +1199,7 @@ def set(*dataset, **kwargs):
def get(*dataset, **kwargs):
'''
.. versionadded:: 2016.3.0
.. versionchanged:: Oxygen
Displays properties for the given datasets.
@ -1183,6 +1219,9 @@ def get(*dataset, **kwargs):
source : string
comma-separated list of sources to display. Must be one of the following:
local, default, inherited, temporary, and none. The default value is all sources.
parsable : boolean
display numbers in parsable (exact) values
.. versionadded:: Oxygen
.. note::
@ -1206,8 +1245,13 @@ def get(*dataset, **kwargs):
fields = kwargs.get('fields', 'value,source')
ltype = kwargs.get('type', None)
source = kwargs.get('source', None)
parsable = kwargs.get('parsable', False)
cmd = '{0} get -H'.format(zfs)
# parsable output
if parsable:
cmd = '{0} -p'.format(cmd)
# recursively get
if depth:
cmd = '{0} -d {1}'.format(cmd, depth)
@ -1246,7 +1290,7 @@ def get(*dataset, **kwargs):
ds_data = {}
for field in fields:
ds_data[field] = ds[fields.index(field)]
ds_data[field] = _conform_value(ds[fields.index(field)])
ds_name = ds_data['name']
ds_prop = ds_data['property']

View file

@ -16,6 +16,7 @@ import salt.utils.decorators
import salt.utils.decorators.path
import salt.utils.path
from salt.utils.odict import OrderedDict
from salt.modules.zfs import _conform_value
log = logging.getLogger(__name__)
@ -307,10 +308,10 @@ def iostat(zpool=None, sample_time=0):
return ret
def list_(properties='size,alloc,free,cap,frag,health', zpool=None):
def list_(properties='size,alloc,free,cap,frag,health', zpool=None, parsable=False):
'''
.. versionadded:: 2015.5.0
.. versionchanged:: 2016.3.0
.. versionchanged:: Oxygen
Return information about (all) storage pools
@ -318,6 +319,9 @@ def list_(properties='size,alloc,free,cap,frag,health', zpool=None):
optional name of storage pool
properties : string
comma-separated list of properties to list
parsable : boolean
display numbers in parsable (exact) values
.. versionadded:: Oxygen
.. note::
the 'name' property will always be included, the 'frag' property will get removed if not available
@ -333,6 +337,9 @@ def list_(properties='size,alloc,free,cap,frag,health', zpool=None):
.. code-block:: bash
salt '*' zpool.list
salt '*' zpool.list zpool=tank
salt '*' zpool.list 'size,free'
salt '*' zpool.list 'size,free' tank
'''
ret = OrderedDict()
@ -346,9 +353,10 @@ def list_(properties='size,alloc,free,cap,frag,health', zpool=None):
# get zpool list data
zpool_cmd = _check_zpool()
cmd = '{zpool_cmd} list -H -o {properties}{zpool}'.format(
cmd = '{zpool_cmd} list -H -o {properties}{parsable}{zpool}'.format(
zpool_cmd=zpool_cmd,
properties=','.join(properties),
parsable=' -p' if parsable else '',
zpool=' {0}'.format(zpool) if zpool else ''
)
res = __salt__['cmd.run_all'](cmd, python_shell=False)
@ -362,7 +370,7 @@ def list_(properties='size,alloc,free,cap,frag,health', zpool=None):
zp_data = {}
for prop in properties:
zp_data[prop] = zp[properties.index(prop)]
zp_data[prop] = _conform_value(zp[properties.index(prop)])
ret[zp_data['name']] = zp_data
del ret[zp_data['name']]['name']
@ -370,9 +378,10 @@ def list_(properties='size,alloc,free,cap,frag,health', zpool=None):
return ret
def get(zpool, prop=None, show_source=False):
def get(zpool, prop=None, show_source=False, parsable=False):
'''
.. versionadded:: 2016.3.0
.. versionchanged: Oxygen
Retrieves the given list of properties
@ -382,6 +391,9 @@ def get(zpool, prop=None, show_source=False):
optional name of property to retrieve
show_source : boolean
show source of property
parsable : boolean
display numbers in parsable (exact) values
.. versionadded:: Oxygen
CLI Example:
@ -396,9 +408,10 @@ def get(zpool, prop=None, show_source=False):
# get zpool list data
zpool_cmd = _check_zpool()
cmd = '{zpool_cmd} get -H -o {properties} {prop} {zpool}'.format(
cmd = '{zpool_cmd} get -H -o {properties}{parsable} {prop} {zpool}'.format(
zpool_cmd=zpool_cmd,
properties=','.join(properties),
parsable=' -p' if parsable else '',
prop=prop if prop else 'all',
zpool=zpool
)
@ -413,7 +426,7 @@ def get(zpool, prop=None, show_source=False):
zp_data = {}
for prop in properties:
zp_data[prop] = zp[properties.index(prop)]
zp_data[prop] = _conform_value(zp[properties.index(prop)])
if show_source:
ret[zpool][zp_data['property']] = zp_data
@ -445,10 +458,9 @@ def set(zpool, prop, value):
'''
ret = {}
ret[zpool] = {}
if isinstance(value, bool):
value = 'on' if value else 'off'
elif ' ' in value:
value = "'{0}'".format(value)
# make sure value is what zfs expects
value = _conform_value(value)
# get zpool list data
zpool_cmd = _check_zpool()
@ -484,7 +496,7 @@ def exists(zpool):
zpool_cmd=zpool_cmd,
zpool=zpool
)
res = __salt__['cmd.run_all'](cmd, python_shell=False)
res = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True)
if res['retcode'] != 0:
return False
return True
@ -673,34 +685,30 @@ def create(zpool, *vdevs, **kwargs):
if properties and 'bootsize' in properties:
createboot = True
# make sure values are in the format zfs expects
if properties:
for prop in properties:
properties[prop] = _conform_value(properties[prop])
if filesystem_properties:
for prop in filesystem_properties:
filesystem_properties[prop] = _conform_value(filesystem_properties[prop])
# apply extra arguments from kwargs
if force: # force creation
cmd = '{0} -f'.format(cmd)
if createboot: # create boot paritition
cmd = '{0} -B'.format(cmd)
if properties: # create "-o property=value" pairs
optlist = []
proplist = []
for prop in properties:
if isinstance(properties[prop], bool):
value = 'on' if properties[prop] else 'off'
else:
if ' ' in properties[prop]:
value = "'{0}'".format(properties[prop])
else:
value = properties[prop]
optlist.append('-o {0}={1}'.format(prop, value))
opts = ' '.join(optlist)
cmd = '{0} {1}'.format(cmd, opts)
proplist.append('-o {0}={1}'.format(prop, properties[prop]))
cmd = '{0} {1}'.format(cmd, ' '.join(proplist))
if filesystem_properties: # create "-O property=value" pairs
optlist = []
fsproplist = []
for prop in filesystem_properties:
if ' ' in filesystem_properties[prop]:
value = "'{0}'".format(filesystem_properties[prop])
else:
value = filesystem_properties[prop]
optlist.append('-O {0}={1}'.format(prop, value))
opts = ' '.join(optlist)
cmd = '{0} {1}'.format(cmd, opts)
fsproplist.append('-O {0}={1}'.format(prop, filesystem_properties[prop]))
cmd = '{0} {1}'.format(cmd, ' '.join(fsproplist))
if mountpoint: # set mountpoint
cmd = '{0} -m {1}'.format(cmd, mountpoint)
if altroot: # set altroot
@ -938,18 +946,10 @@ def split(zpool, newzpool, **kwargs):
# apply extra arguments from kwargs
if properties: # create "-o property=value" pairs
optlist = []
proplist = []
for prop in properties:
if isinstance(properties[prop], bool):
value = 'on' if properties[prop] else 'off'
else:
if ' ' in properties[prop]:
value = "'{0}'".format(properties[prop])
else:
value = properties[prop]
optlist.append('-o {0}={1}'.format(prop, value))
opts = ' '.join(optlist)
cmd = '{0} {1}'.format(cmd, opts)
proplist.append('-o {0}={1}'.format(prop, _conform_value(properties[prop])))
cmd = '{0} {1}'.format(cmd, ' '.join(proplist))
if altroot: # set altroot
cmd = '{0} -R {1}'.format(cmd, altroot)
cmd = '{0} {1} {2}'.format(cmd, zpool, newzpool)

View file

@ -46,15 +46,15 @@ except ImportError:
except ImportError:
# TODO: Come up with a sane way to get a configured logfile
# and write to the logfile when this error is hit also
LOG_FORMAT = u'[%(levelname)-8s] %(message)s'
LOG_FORMAT = '[%(levelname)-8s] %(message)s'
salt.log.setup_console_logger(log_format=LOG_FORMAT)
log.fatal(u'Unable to import msgpack or msgpack_pure python modules')
log.fatal('Unable to import msgpack or msgpack_pure python modules')
# Don't exit if msgpack is not available, this is to make local mode
# work without msgpack
#sys.exit(salt.defaults.exitcodes.EX_GENERIC)
if HAS_MSGPACK and not hasattr(msgpack, u'exceptions'):
if HAS_MSGPACK and not hasattr(msgpack, 'exceptions'):
class PackValueError(Exception):
'''
older versions of msgpack do not have PackValueError
@ -89,11 +89,11 @@ def format_payload(enc, **kwargs):
Pass in the required arguments for a payload, the enc type and the cmd,
then a list of keyword args to generate the body of the load dict.
'''
payload = {u'enc': enc}
payload = {'enc': enc}
load = {}
for key in kwargs:
load[key] = kwargs[key]
payload[u'load'] = load
payload['load'] = load
return package(payload)
@ -104,11 +104,11 @@ class Serial(object):
'''
def __init__(self, opts):
if isinstance(opts, dict):
self.serial = opts.get(u'serial', u'msgpack')
self.serial = opts.get('serial', 'msgpack')
elif isinstance(opts, six.string_types):
self.serial = opts
else:
self.serial = u'msgpack'
self.serial = 'msgpack'
def loads(self, msg, encoding=None, raw=False):
'''
@ -141,12 +141,12 @@ class Serial(object):
ret = salt.transport.frame.decode_embedded_strs(ret)
except Exception as exc:
log.critical(
u'Could not deserialize msgpack message. This often happens '
u'when trying to read a file not in binary mode. '
u'To see message payload, enable debug logging and retry. '
u'Exception: %s', exc
'Could not deserialize msgpack message. This often happens '
'when trying to read a file not in binary mode. '
'To see message payload, enable debug logging and retry. '
'Exception: %s', exc
)
log.debug(u'Msgpack deserialization failure on message: %s', msg)
log.debug('Msgpack deserialization failure on message: %s', msg)
gc.collect()
raise
finally:
@ -161,7 +161,7 @@ class Serial(object):
fn_.close()
if data:
if six.PY3:
return self.loads(data, encoding=u'utf-8')
return self.loads(data, encoding='utf-8')
else:
return self.loads(data)
@ -218,7 +218,7 @@ class Serial(object):
return msgpack.ExtType(78, obj)
def dt_encode(obj):
datetime_str = obj.strftime(u"%Y%m%dT%H:%M:%S.%f")
datetime_str = obj.strftime("%Y%m%dT%H:%M:%S.%f")
if msgpack.version >= (0, 4, 0):
return msgpack.packb(datetime_str, default=default, use_bin_type=use_bin_type)
else:
@ -244,7 +244,7 @@ class Serial(object):
return obj
def immutable_encoder(obj):
log.debug(u'IMMUTABLE OBJ: %s', obj)
log.debug('IMMUTABLE OBJ: %s', obj)
if isinstance(obj, immutabletypes.ImmutableDict):
return dict(obj)
if isinstance(obj, immutabletypes.ImmutableList):
@ -252,12 +252,12 @@ class Serial(object):
if isinstance(obj, immutabletypes.ImmutableSet):
return set(obj)
if u"datetime.datetime" in str(e):
if "datetime.datetime" in str(e):
if msgpack.version >= (0, 4, 0):
return msgpack.dumps(datetime_encoder(msg), use_bin_type=use_bin_type)
else:
return msgpack.dumps(datetime_encoder(msg))
elif u"Immutable" in str(e):
elif "Immutable" in str(e):
if msgpack.version >= (0, 4, 0):
return msgpack.dumps(msg, default=immutable_encoder, use_bin_type=use_bin_type)
else:
@ -291,8 +291,8 @@ class Serial(object):
return msgpack.dumps(odict_encoder(msg))
except (SystemError, TypeError) as exc: # pylint: disable=W0705
log.critical(
u'Unable to serialize message! Consider upgrading msgpack. '
u'Message which failed was %s, with exception %s', msg, exc
'Unable to serialize message! Consider upgrading msgpack. '
'Message which failed was %s, with exception %s', msg, exc
)
def dump(self, msg, fn_):
@ -313,7 +313,7 @@ class SREQ(object):
'''
Create a generic interface to wrap salt zeromq req calls.
'''
def __init__(self, master, id_=u'', serial=u'msgpack', linger=0, opts=None):
def __init__(self, master, id_='', serial='msgpack', linger=0, opts=None):
self.master = master
self.id_ = id_
self.serial = Serial(serial)
@ -327,20 +327,20 @@ class SREQ(object):
'''
Lazily create the socket.
'''
if not hasattr(self, u'_socket'):
if not hasattr(self, '_socket'):
# create a new one
self._socket = self.context.socket(zmq.REQ)
if hasattr(zmq, u'RECONNECT_IVL_MAX'):
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self._socket.setsockopt(
zmq.RECONNECT_IVL_MAX, 5000
)
self._set_tcp_keepalive()
if self.master.startswith(u'tcp://['):
if self.master.startswith('tcp://['):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, u'IPV6'):
if hasattr(zmq, 'IPV6'):
self._socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, u'IPV4ONLY'):
elif hasattr(zmq, 'IPV4ONLY'):
self._socket.setsockopt(zmq.IPV4ONLY, 0)
self._socket.linger = self.linger
if self.id_:
@ -349,37 +349,37 @@ class SREQ(object):
return self._socket
def _set_tcp_keepalive(self):
if hasattr(zmq, u'TCP_KEEPALIVE') and self.opts:
if u'tcp_keepalive' in self.opts:
if hasattr(zmq, 'TCP_KEEPALIVE') and self.opts:
if 'tcp_keepalive' in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts[u'tcp_keepalive']
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
if u'tcp_keepalive_idle' in self.opts:
if 'tcp_keepalive_idle' in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts[u'tcp_keepalive_idle']
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
if u'tcp_keepalive_cnt' in self.opts:
if 'tcp_keepalive_cnt' in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts[u'tcp_keepalive_cnt']
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
if u'tcp_keepalive_intvl' in self.opts:
if 'tcp_keepalive_intvl' in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts[u'tcp_keepalive_intvl']
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
def clear_socket(self):
'''
delete socket if you have it
'''
if hasattr(self, u'_socket'):
if hasattr(self, '_socket'):
if isinstance(self.poller.sockets, dict):
sockets = list(self.poller.sockets.keys())
for socket in sockets:
log.trace(u'Unregistering socket: %s', socket)
log.trace('Unregistering socket: %s', socket)
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
log.trace(u'Unregistering socket: %s', socket)
log.trace('Unregistering socket: %s', socket)
self.poller.unregister(socket[0])
del self._socket
@ -387,8 +387,8 @@ class SREQ(object):
'''
Takes two arguments, the encryption type and the base payload
'''
payload = {u'enc': enc}
payload[u'load'] = load
payload = {'enc': enc}
payload['load'] = load
pkg = self.serial.dumps(payload)
self.socket.send(pkg)
self.poller.register(self.socket, zmq.POLLIN)
@ -400,14 +400,14 @@ class SREQ(object):
break
if tries > 1:
log.info(
u'SaltReqTimeoutError: after %s seconds. (Try %s of %s)',
'SaltReqTimeoutError: after %s seconds. (Try %s of %s)',
timeout, tried, tries
)
if tried >= tries:
self.clear_socket()
raise SaltReqTimeoutError(
u'SaltReqTimeoutError: after {0} seconds, ran {1} '
u'tries'.format(timeout * tried, tried)
'SaltReqTimeoutError: after {0} seconds, ran {1} '
'tries'.format(timeout * tried, tried)
)
return self.serial.loads(self.socket.recv())
@ -415,8 +415,8 @@ class SREQ(object):
'''
Detect the encryption type based on the payload
'''
enc = payload.get(u'enc', u'clear')
load = payload.get(u'load', {})
enc = payload.get('enc', 'clear')
load = payload.get('load', {})
return self.send(enc, load, tries, timeout)
def destroy(self):

View file

@ -29,6 +29,13 @@ will follow symbolic links to other directories.
Be careful when using ``follow_dir_links``, as a recursive symlink chain
will result in unexpected results.
.. versionchanged:: Oxygen
If ``root_dir`` is a relative path, it will be treated as relative to the
:conf_master:`pillar_roots` of the environment specified by
:conf_minion:`pillarenv`. If an environment specifies multiple
roots, this module will search for files relative to all of them, in order,
merging the results.
If ``keep_newline`` is set to ``True``, then the pillar values for files ending
in newlines will keep that newline. The default behavior is to remove the
end-of-file newline. ``keep_newline`` should be turned on if the pillar data is
@ -259,14 +266,14 @@ def _construct_pillar(top_dir,
log.error('file_tree: %s: not a regular file', file_path)
continue
contents = ''
contents = b''
try:
with salt.utils.files.fopen(file_path, 'rb') as fhr:
buf = fhr.read(__opts__['file_buffer_size'])
while buf:
contents += buf
buf = fhr.read(__opts__['file_buffer_size'])
if contents.endswith('\n') \
if contents.endswith(b'\n') \
and _check_newline(prefix,
file_name,
keep_newline):
@ -311,6 +318,60 @@ def ext_pillar(minion_id,
log.error('file_tree: no root_dir specified')
return {}
if not os.path.isabs(root_dir):
pillarenv = __opts__['pillarenv']
if pillarenv is None:
log.error('file_tree: root_dir is relative but pillarenv is not set')
return {}
log.debug('file_tree: pillarenv = %s', pillarenv)
env_roots = __opts__['pillar_roots'].get(pillarenv, None)
if env_roots is None:
log.error('file_tree: root_dir is relative but no pillar_roots are specified '
' for pillarenv %s', pillarenv)
return {}
env_dirs = []
for env_root in env_roots:
env_dir = os.path.normpath(os.path.join(env_root, root_dir))
# don't redundantly load consecutively, but preserve any expected precedence
if env_dir not in env_dirs or env_dir != env_dirs[-1]:
env_dirs.append(env_dir)
dirs = env_dirs
else:
dirs = [root_dir]
result_pillar = {}
for root in dirs:
dir_pillar = _ext_pillar(minion_id,
root,
follow_dir_links,
debug,
keep_newline,
render_default,
renderer_blacklist,
renderer_whitelist,
template)
result_pillar = salt.utils.dictupdate.merge(result_pillar,
dir_pillar,
strategy='recurse')
return result_pillar
def _ext_pillar(minion_id,
root_dir,
follow_dir_links,
debug,
keep_newline,
render_default,
renderer_blacklist,
renderer_whitelist,
template):
'''
Compile pillar data for a single root_dir for the specified minion ID
'''
log.debug('file_tree: reading %s', root_dir)
if not os.path.isdir(root_dir):
log.error(
'file_tree: root_dir %s does not exist or is not a directory',

View file

@ -354,7 +354,7 @@ def render(template, saltenv='base', sls='', tmplpath=None, rendered_sls=None, *
# is compiled to.
# __name__ can't be assigned a unicode
mod.__name__ = str(sls) # future lint: disable=non-unicode-string
mod.__name__ = str(sls) # future lint: disable=blacklisted-function
# to workaround state.py's use of copy.deepcopy(chunk)
mod.__deepcopy__ = lambda x: mod

View file

@ -52,6 +52,7 @@ from salt.ext import six
try:
from raven import Client
from raven.transport.http import HTTPTransport
has_raven = True
except ImportError:
@ -77,88 +78,8 @@ def returner(ret):
Failed states will be appended as separate list for convenience.
'''
def ret_is_not_error(result):
if result.get('return') and isinstance(result['return'], dict):
result_dict = result['return']
is_staterun = all('-' in key for key in result_dict.keys())
if is_staterun:
failed_states = {}
for state_id, state_result in six.iteritems(result_dict):
if not state_result['result']:
failed_states[state_id] = state_result
if failed_states:
result['failed_states'] = failed_states
return False
if result.get('success') and result.get('retcode', 0) == 0:
return True
return False
def get_message():
return 'func: {fun}, jid: {jid}'.format(fun=ret['fun'], jid=ret['jid'])
def connect_sentry(message, result):
'''
Connect to the Sentry server
'''
pillar_data = __salt__['pillar.raw']()
grains = __salt__['grains.items']()
raven_config = pillar_data['raven']
hide_pillar = raven_config.get('hide_pillar')
sentry_data = {
'result': result,
'pillar': 'HIDDEN' if hide_pillar else pillar_data,
'grains': grains
}
data = {
'platform': 'python',
'culprit': message,
'level': 'error'
}
tags = {}
if 'tags' in raven_config:
for tag in raven_config['tags']:
tags[tag] = grains[tag]
if ret_is_not_error(ret):
data['level'] = 'info'
if raven_config.get('report_errors_only') and data['level'] != 'error':
return
if raven_config.get('dsn'):
client = Client(raven_config.get('dsn'))
else:
try:
servers = []
for server in raven_config['servers']:
servers.append(server + '/api/store/')
client = Client(
servers=servers,
public_key=raven_config['public_key'],
secret_key=raven_config['secret_key'],
project=raven_config['project']
)
except KeyError as missing_key:
logger.error(
'Sentry returner needs key \'%s\' in pillar',
missing_key
)
return
try:
msgid = client.capture('raven.events.Message', message=message, data=data, extra=sentry_data, tags=tags)
logger.info('Message id %s written to sentry', msgid)
except Exception as exc:
logger.error(
'Can\'t send message to sentry: {0}'.format(exc),
exc_info=True
)
try:
connect_sentry(get_message(), ret)
_connect_sentry(_get_message(ret), ret)
except Exception as err:
logger.error(
'Can\'t run connect_sentry: {0}'.format(err),
@ -166,6 +87,105 @@ def returner(ret):
)
def _ret_is_not_error(result):
if result.get('return') and isinstance(result['return'], dict):
result_dict = result['return']
is_staterun = all('-' in key for key in result_dict.keys())
if is_staterun:
failed_states = {}
for state_id, state_result in six.iteritems(result_dict):
if not state_result['result']:
failed_states[state_id] = state_result
if failed_states:
result['failed_states'] = failed_states
return False
return True
if result.get('success'):
return True
return False
def _get_message(ret):
if not ret.get('fun_args'):
return 'salt func: {}'.format(ret['fun'])
arg_string = ' '.join([arg for arg in ret['fun_args'] if isinstance(arg, six.string_types)])
kwarg_string = ''
if isinstance(ret['fun_args'], list) and len(ret['fun_args']) > 0:
kwargs = ret['fun_args'][-1]
if isinstance(kwargs, dict):
kwarg_string = ' '.join(sorted(['{}={}'.format(k, v) for k, v in kwargs.items() if not k.startswith('_')]))
return 'salt func: {fun} {argstr} {kwargstr}'.format(fun=ret['fun'], argstr=arg_string, kwargstr=kwarg_string).strip()
def _connect_sentry(message, result):
'''
Connect to the Sentry server
'''
pillar_data = __salt__['pillar.raw']()
grains = __salt__['grains.items']()
raven_config = pillar_data['raven']
hide_pillar = raven_config.get('hide_pillar')
sentry_data = {
'result': result,
'pillar': 'HIDDEN' if hide_pillar else pillar_data,
'grains': grains
}
data = {
'platform': 'python',
'culprit': message,
'level': 'error'
}
tags = {}
if 'tags' in raven_config:
for tag in raven_config['tags']:
tags[tag] = grains[tag]
if _ret_is_not_error(result):
data['level'] = 'info'
if raven_config.get('report_errors_only') and data['level'] != 'error':
return
if raven_config.get('dsn'):
client = Client(raven_config.get('dsn'), transport=HTTPTransport)
else:
try:
servers = []
for server in raven_config['servers']:
servers.append(server + '/api/store/')
client = Client(
servers=servers,
public_key=raven_config['public_key'],
secret_key=raven_config['secret_key'],
project=raven_config['project'],
transport=HTTPTransport
)
except KeyError as missing_key:
logger.error(
'Sentry returner needs key \'%s\' in pillar',
missing_key
)
return
try:
msgid = client.capture(
'raven.events.Message',
message=message,
data=data,
extra=sentry_data,
tags=tags
)
logger.info('Message id {} written to sentry'.format(msgid))
except Exception as exc:
logger.error(
'Can\'t send message to sentry: {0}'.format(exc),
exc_info=True
)
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
'''
Do any work necessary to prepare a JID, including sending a custom id

View file

@ -38,8 +38,8 @@ class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object):
eauth user must be authorized to execute runner modules: (``@runner``).
Only the :py:meth:`master_call` below supports eauth.
'''
client = u'runner'
tag_prefix = u'run'
client = 'runner'
tag_prefix = 'run'
def __init__(self, opts):
self.opts = opts
@ -47,8 +47,8 @@ class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object):
@property
def functions(self):
if not hasattr(self, u'_functions'):
if not hasattr(self, u'utils'):
if not hasattr(self, '_functions'):
if not hasattr(self, 'utils'):
self.utils = salt.loader.utils(self.opts)
# Must be self.functions for mixin to work correctly :-/
try:
@ -73,19 +73,19 @@ class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object):
New-style: ``{'fun': 'jobs.lookup_jid', 'kwarg': {'jid': '1234'}}``
CLI-style: ``{'fun': 'jobs.lookup_jid', 'arg': ['jid="1234"']}``
'''
fun = low.pop(u'fun')
fun = low.pop('fun')
verify_fun(self.functions, fun)
eauth_creds = dict([(i, low.pop(i)) for i in [
u'username', u'password', u'eauth', u'token', u'client', u'user', u'key',
'username', 'password', 'eauth', 'token', 'client', 'user', 'key',
] if i in low])
# Run name=value args through parse_input. We don't need to run kwargs
# through because there is no way to send name=value strings in the low
# dict other than by including an `arg` array.
_arg, _kwarg = salt.utils.args.parse_input(
low.pop(u'arg', []), condition=False)
_kwarg.update(low.pop(u'kwarg', {}))
low.pop('arg', []), condition=False)
_kwarg.update(low.pop('kwarg', {}))
# If anything hasn't been pop()'ed out of low by this point it must be
# an old-style kwarg.
@ -100,10 +100,9 @@ class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object):
arg, kwarg = salt.minion.load_args_and_kwargs(
self.functions[fun],
munged,
self.opts,
ignore_invalid=True)
return dict(fun=fun, kwarg={u'kwarg': kwarg, u'arg': arg},
return dict(fun=fun, kwarg={'kwarg': kwarg, 'arg': arg},
**eauth_creds)
def cmd_async(self, low):
@ -170,10 +169,10 @@ class Runner(RunnerClient):
'''
Print out the documentation!
'''
arg = self.opts.get(u'fun', None)
arg = self.opts.get('fun', None)
docs = super(Runner, self).get_docs(arg)
for fun in sorted(docs):
display_output(u'{0}:'.format(fun), u'text', self.opts)
display_output('{0}:'.format(fun), 'text', self.opts)
print(docs[fun])
# TODO: move to mixin whenever we want a salt-wheel cli
@ -183,117 +182,115 @@ class Runner(RunnerClient):
'''
import salt.minion
ret = {}
if self.opts.get(u'doc', False):
if self.opts.get('doc', False):
self.print_docs()
else:
low = {u'fun': self.opts[u'fun']}
low = {'fun': self.opts['fun']}
try:
# Allocate a jid
async_pub = self._gen_async_pub()
self.jid = async_pub[u'jid']
self.jid = async_pub['jid']
fun_args = salt.utils.args.parse_input(
self.opts[u'arg'],
no_parse=self.opts.get(u'no_parse', []))
self.opts['arg'],
no_parse=self.opts.get('no_parse', []))
verify_fun(self.functions, low[u'fun'])
verify_fun(self.functions, low['fun'])
args, kwargs = salt.minion.load_args_and_kwargs(
self.functions[low[u'fun']],
fun_args,
self.opts,
)
low[u'arg'] = args
low[u'kwarg'] = kwargs
self.functions[low['fun']],
fun_args)
low['arg'] = args
low['kwarg'] = kwargs
if self.opts.get(u'eauth'):
if u'token' in self.opts:
if self.opts.get('eauth'):
if 'token' in self.opts:
try:
with salt.utils.files.fopen(os.path.join(self.opts[u'key_dir'], u'.root_key'), u'r') as fp_:
low[u'key'] = fp_.readline()
with salt.utils.files.fopen(os.path.join(self.opts['key_dir'], '.root_key'), 'r') as fp_:
low['key'] = fp_.readline()
except IOError:
low[u'token'] = self.opts[u'token']
low['token'] = self.opts['token']
# If using eauth and a token hasn't already been loaded into
# low, prompt the user to enter auth credentials
if u'token' not in low and u'key' not in low and self.opts[u'eauth']:
if 'token' not in low and 'key' not in low and self.opts['eauth']:
# This is expensive. Don't do it unless we need to.
import salt.auth
resolver = salt.auth.Resolver(self.opts)
res = resolver.cli(self.opts[u'eauth'])
if self.opts[u'mktoken'] and res:
res = resolver.cli(self.opts['eauth'])
if self.opts['mktoken'] and res:
tok = resolver.token_cli(
self.opts[u'eauth'],
self.opts['eauth'],
res
)
if tok:
low[u'token'] = tok.get(u'token', u'')
low['token'] = tok.get('token', '')
if not res:
log.error(u'Authentication failed')
log.error('Authentication failed')
return ret
low.update(res)
low[u'eauth'] = self.opts[u'eauth']
low['eauth'] = self.opts['eauth']
else:
user = salt.utils.user.get_specific_user()
if low[u'fun'] == u'state.orchestrate':
low[u'kwarg'][u'orchestration_jid'] = async_pub[u'jid']
if low['fun'] == 'state.orchestrate':
low['kwarg']['orchestration_jid'] = async_pub['jid']
# Run the runner!
if self.opts.get(u'async', False):
if self.opts.get(u'eauth'):
if self.opts.get('async', False):
if self.opts.get('eauth'):
async_pub = self.cmd_async(low)
else:
async_pub = self.async(self.opts[u'fun'],
async_pub = self.async(self.opts['fun'],
low,
user=user,
pub=async_pub)
# by default: info will be not enougth to be printed out !
log.warning(
u'Running in async mode. Results of this execution may '
u'be collected by attaching to the master event bus or '
u'by examing the master job cache, if configured. '
u'This execution is running under tag %s', async_pub[u'tag']
'Running in async mode. Results of this execution may '
'be collected by attaching to the master event bus or '
'by examing the master job cache, if configured. '
'This execution is running under tag %s', async_pub['tag']
)
return async_pub[u'jid'] # return the jid
return async_pub['jid'] # return the jid
# otherwise run it in the main process
if self.opts.get(u'eauth'):
if self.opts.get('eauth'):
ret = self.cmd_sync(low)
if isinstance(ret, dict) and set(ret) == set((u'data', u'outputter')):
outputter = ret[u'outputter']
ret = ret[u'data']
if isinstance(ret, dict) and set(ret) == set(('data', 'outputter')):
outputter = ret['outputter']
ret = ret['data']
else:
outputter = None
display_output(ret, outputter, self.opts)
else:
ret = self._proc_function(self.opts[u'fun'],
ret = self._proc_function(self.opts['fun'],
low,
user,
async_pub[u'tag'],
async_pub[u'jid'],
async_pub['tag'],
async_pub['jid'],
daemonize=False)
except salt.exceptions.SaltException as exc:
evt = salt.utils.event.get_event(u'master', opts=self.opts)
evt.fire_event({u'success': False,
u'return': u'{0}'.format(exc),
u'retcode': 254,
u'fun': self.opts[u'fun'],
u'fun_args': fun_args,
u'jid': self.jid},
tag=u'salt/run/{0}/ret'.format(self.jid))
evt = salt.utils.event.get_event('master', opts=self.opts)
evt.fire_event({'success': False,
'return': '{0}'.format(exc),
'retcode': 254,
'fun': self.opts['fun'],
'fun_args': fun_args,
'jid': self.jid},
tag='salt/run/{0}/ret'.format(self.jid))
# Attempt to grab documentation
if u'fun' in low:
ret = self.get_docs(u'{0}*'.format(low[u'fun']))
if 'fun' in low:
ret = self.get_docs('{0}*'.format(low['fun']))
else:
ret = None
# If we didn't get docs returned then
# return the `not availble` message.
if not ret:
ret = u'{0}'.format(exc)
if not self.opts.get(u'quiet', False):
display_output(ret, u'nested', self.opts)
ret = '{0}'.format(exc)
if not self.opts.get('quiet', False):
display_output(ret, 'nested', self.opts)
else:
log.debug(u'Runner return: %s', ret)
log.debug('Runner return: %s', ret)
return ret

View file

@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
'''
Sync custom types to the Master
The Saltutil runner is used to sync custom types to the Master. See the
:mod:`saltutil module <salt.modules.saltutil>` for documentation on
managing updates to minions.
.. versionadded:: 2016.3.0
'''

View file

@ -42,6 +42,20 @@ def generate_token(minion_id, signature, impersonated_by_master=False):
try:
config = __opts__['vault']
verify = config.get('verify', None)
if config['auth']['method'] == 'approle':
if _selftoken_expired():
log.debug('Vault token expired. Recreating one')
# Requesting a short ttl token
url = '{0}/v1/auth/approle/login'.format(config['url'])
payload = {'role_id': config['auth']['role_id']}
if 'secret_id' in config['auth']:
payload['secret_id'] = config['auth']['secret_id']
response = requests.post(url, json=payload, verify=verify)
if response.status_code != 200:
return {'error': response.reason}
config['auth']['token'] = response.json()['auth']['client_token']
url = '{0}/v1/auth/token/create'.format(config['url'])
headers = {'X-Vault-Token': config['auth']['token']}
@ -56,8 +70,6 @@ def generate_token(minion_id, signature, impersonated_by_master=False):
'metadata': audit_data
}
verify = config.get('verify', None)
log.trace('Sending token creation request to Vault')
response = requests.post(url, headers=headers, json=payload, verify=verify)
@ -185,3 +197,23 @@ def _expand_pattern_lists(pattern, **mappings):
expanded_patterns += result
return expanded_patterns
return [pattern]
def _selftoken_expired():
'''
Validate the current token exists and is still valid
'''
try:
verify = __opts__['vault'].get('verify', None)
url = '{0}/v1/auth/token/lookup-self'.format(__opts__['vault']['url'])
if 'token' not in __opts__['vault']['auth']:
return True
headers = {'X-Vault-Token': __opts__['vault']['auth']['token']}
response = requests.get(url, headers=headers, verify=verify)
if response.status_code != 200:
return True
return False
except Exception as e:
raise salt.exceptions.CommandExecutionError(
'Error while looking up self token : {0}'.format(str(e))
)

View file

@ -24,7 +24,7 @@ import salt.defaults.exitcodes # pylint: disable=unused-import
log = logging.getLogger(__name__)
def _handle_interrupt(exc, original_exc, hardfail=False, trace=u''):
def _handle_interrupt(exc, original_exc, hardfail=False, trace=''):
'''
if hardfailing:
If we got the original stacktrace, log it
@ -50,16 +50,16 @@ def _handle_signals(client, signum, sigframe):
hardcrash = False
if signum == signal.SIGINT:
exit_msg = u'\nExiting gracefully on Ctrl-c'
exit_msg = '\nExiting gracefully on Ctrl-c'
try:
jid = client.local_client.pub_data[u'jid']
jid = client.local_client.pub_data['jid']
exit_msg += (
u'\n'
u'This job\'s jid is: {0}\n'
u'The minions may not have all finished running and any remaining '
u'minions will return upon completion. To look up the return data '
u'for this job later, run the following command:\n\n'
u'salt-run jobs.lookup_jid {0}'.format(jid)
'\n'
'This job\'s jid is: {0}\n'
'The minions may not have all finished running and any remaining '
'minions will return upon completion. To look up the return data '
'for this job later, run the following command:\n\n'
'salt-run jobs.lookup_jid {0}'.format(jid)
)
except (AttributeError, KeyError):
pass
@ -68,7 +68,7 @@ def _handle_signals(client, signum, sigframe):
_handle_interrupt(
SystemExit(exit_msg),
Exception(u'\nExiting with hard crash on Ctrl-c'),
Exception('\nExiting with hard crash on Ctrl-c'),
hardcrash, trace=trace)
@ -102,7 +102,7 @@ def minion_process():
# salt_minion spawns this function in a new process
salt.utils.process.appendproctitle(u'KeepAlive')
salt.utils.process.appendproctitle('KeepAlive')
def handle_hup(manager, sig, frame):
manager.minion.reload()
@ -123,7 +123,7 @@ def minion_process():
except OSError as exc:
# forcibly exit, regular sys.exit raises an exception-- which
# isn't sufficient in a thread
log.error(u'Minion process encountered exception: %s', exc)
log.error('Minion process encountered exception: %s', exc)
os._exit(salt.defaults.exitcodes.EX_GENERIC)
if not salt.utils.platform.is_windows():
@ -138,13 +138,13 @@ def minion_process():
try:
minion.start()
except (SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc:
log.warning(u'Fatal functionality error caught by minion handler:\n', exc_info=True)
log.warning(u'** Restarting minion **')
log.warning('Fatal functionality error caught by minion handler:\n', exc_info=True)
log.warning('** Restarting minion **')
delay = 60
if minion is not None and hasattr(minion, u'config'):
delay = minion.config.get(u'random_reauth_delay', 60)
if minion is not None and hasattr(minion, 'config'):
delay = minion.config.get('random_reauth_delay', 60)
delay = randint(1, delay)
log.info(u'waiting random_reauth_delay %ss', delay)
log.info('waiting random_reauth_delay %ss', delay)
time.sleep(delay)
sys.exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
@ -162,16 +162,16 @@ def salt_minion():
import salt.cli.daemons
import multiprocessing
if u'' in sys.path:
sys.path.remove(u'')
if '' in sys.path:
sys.path.remove('')
if salt.utils.platform.is_windows():
minion = salt.cli.daemons.Minion()
minion.start()
return
if u'--disable-keepalive' in sys.argv:
sys.argv.remove(u'--disable-keepalive')
if '--disable-keepalive' in sys.argv:
sys.argv.remove('--disable-keepalive')
minion = salt.cli.daemons.Minion()
minion.start()
return
@ -263,7 +263,7 @@ def proxy_minion_process(queue):
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
except (Exception, SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc:
log.error(u'Proxy Minion failed to start: ', exc_info=True)
log.error('Proxy Minion failed to start: ', exc_info=True)
restart = True
# status is superfluous since the process will be restarted
status = salt.defaults.exitcodes.SALT_KEEPALIVE
@ -272,13 +272,13 @@ def proxy_minion_process(queue):
status = exc.code
if restart is True:
log.warning(u'** Restarting proxy minion **')
log.warning('** Restarting proxy minion **')
delay = 60
if proxyminion is not None:
if hasattr(proxyminion, u'config'):
delay = proxyminion.config.get(u'random_reauth_delay', 60)
if hasattr(proxyminion, 'config'):
delay = proxyminion.config.get('random_reauth_delay', 60)
random_delay = randint(1, delay)
log.info(u'Sleeping random_reauth_delay of %s seconds', random_delay)
log.info('Sleeping random_reauth_delay of %s seconds', random_delay)
# preform delay after minion resources have been cleaned
queue.put(random_delay)
else:
@ -293,16 +293,16 @@ def salt_proxy():
import salt.cli.daemons
import salt.utils.platform
import multiprocessing
if u'' in sys.path:
sys.path.remove(u'')
if '' in sys.path:
sys.path.remove('')
if salt.utils.platform.is_windows():
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
if u'--disable-keepalive' in sys.argv:
sys.argv.remove(u'--disable-keepalive')
if '--disable-keepalive' in sys.argv:
sys.argv.remove('--disable-keepalive')
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
@ -368,7 +368,7 @@ def salt_key():
_install_signal_handlers(client)
client.run()
except Exception as err:
sys.stderr.write(u"Error: {0}\n".format(err))
sys.stderr.write("Error: {0}\n".format(err))
def salt_cp():
@ -388,8 +388,8 @@ def salt_call():
salt minion to run.
'''
import salt.cli.call
if u'' in sys.path:
sys.path.remove(u'')
if '' in sys.path:
sys.path.remove('')
client = salt.cli.call.SaltCall()
_install_signal_handlers(client)
client.run()
@ -400,8 +400,8 @@ def salt_run():
Execute a salt convenience routine.
'''
import salt.cli.run
if u'' in sys.path:
sys.path.remove(u'')
if '' in sys.path:
sys.path.remove('')
client = salt.cli.run.SaltRun()
_install_signal_handlers(client)
client.run()
@ -412,8 +412,8 @@ def salt_ssh():
Execute the salt-ssh system
'''
import salt.cli.ssh
if u'' in sys.path:
sys.path.remove(u'')
if '' in sys.path:
sys.path.remove('')
try:
client = salt.cli.ssh.SaltSSH()
_install_signal_handlers(client)
@ -444,11 +444,11 @@ def salt_cloud():
import salt.cloud.cli
except ImportError as e:
# No salt cloud on Windows
log.error(u'Error importing salt cloud: %s', e)
print(u'salt-cloud is not available in this system')
log.error('Error importing salt cloud: %s', e)
print('salt-cloud is not available in this system')
sys.exit(salt.defaults.exitcodes.EX_UNAVAILABLE)
if u'' in sys.path:
sys.path.remove(u'')
if '' in sys.path:
sys.path.remove('')
client = salt.cloud.cli.SaltCloud()
_install_signal_handlers(client)
@ -473,8 +473,8 @@ def salt_main():
master.
'''
import salt.cli.salt
if u'' in sys.path:
sys.path.remove(u'')
if '' in sys.path:
sys.path.remove('')
client = salt.cli.salt.SaltCMD()
_install_signal_handlers(client)
client.run()

View file

@ -79,7 +79,7 @@ def serialize(obj, **options):
class EncryptedString(str):
yaml_tag = u'!encrypted'
yaml_tag = '!encrypted'
@staticmethod
def yaml_constructor(loader, tag, node):

View file

@ -215,11 +215,11 @@ class Loader(BaseLoader): # pylint: disable=W0232
# !reset instruction applies on document only.
# It tells to reset previous decoded value for this present key.
reset = key_node.tag == u'!reset'
reset = key_node.tag == '!reset'
# even if !aggregate tag apply only to values and not keys
# it's a reason to act as a such nazi.
if key_node.tag == u'!aggregate':
if key_node.tag == '!aggregate':
log.warning('!aggregate applies on values only, not on keys')
value_node.tag = key_node.tag
key_node.tag = self.resolve_sls_tag(key_node)[0]

File diff suppressed because it is too large Load diff

View file

@ -64,7 +64,7 @@ def exists(
ret['comment'] = created['stderr']
else:
ret['comment'] = u'{0} exists in {1}'.format(name, region)
ret['comment'] = '{0} exists in {1}'.format(name, region)
return ret
@ -106,6 +106,6 @@ def absent(
ret['result'] = False
ret['comment'] = removed['stderr']
else:
ret['comment'] = u'{0} does not exist in {1}'.format(name, region)
ret['comment'] = '{0} does not exist in {1}'.format(name, region)
return ret

View file

@ -287,7 +287,8 @@ def present(name=None,
if not table_exists:
if __opts__['test']:
ret['result'] = None
comments.append('DynamoDB table {0} is set to be created.'.format(name))
ret['comment'] = 'DynamoDB table {0} would be created.'.format(name)
return ret
else:
is_created = __salt__['boto_dynamodb.create_table'](
name,

View file

@ -409,6 +409,7 @@ def wait(name,
stateful=False,
umask=None,
output_loglevel='debug',
hide_output=False,
use_vt=False,
**kwargs):
'''
@ -493,10 +494,22 @@ def wait(name,
.. versionadded:: 2014.7.0
output_loglevel
Control the loglevel at which the output from the command is logged.
Note that the command being run will still be logged (loglevel: DEBUG)
regardless, unless ``quiet`` is used for this value.
output_loglevel : debug
Control the loglevel at which the output from the command is logged to
the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
hide_output : False
Suppress stdout and stderr in the state's results.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: Oxygen
use_vt
Use VT utils (saltstack) to stream the command output more
@ -527,6 +540,7 @@ def wait_script(name,
umask=None,
use_vt=False,
output_loglevel='debug',
hide_output=False,
**kwargs):
'''
Download a script from a remote source and execute it only if a watch
@ -617,11 +631,22 @@ def wait_script(name,
interactively to the console and the logs.
This is experimental.
output_loglevel
Control the loglevel at which the output from the command is logged.
Note that the command being run will still be logged (loglevel: DEBUG)
regardless, unless ``quiet`` is used for this value.
output_loglevel : debug
Control the loglevel at which the output from the command is logged to
the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
hide_output : False
Suppress stdout and stderr in the state's results.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: Oxygen
'''
# Ignoring our arguments is intentional.
return {'name': name,
@ -642,7 +667,7 @@ def run(name,
stateful=False,
umask=None,
output_loglevel='debug',
quiet=False,
hide_output=False,
timeout=None,
ignore_timeout=False,
use_vt=False,
@ -657,11 +682,11 @@ def run(name,
onlyif
A command to run as a check, run the named command only if the command
passed to the ``onlyif`` option returns true
passed to the ``onlyif`` option returns a zero exit status
unless
A command to run as a check, only run the named command if the command
passed to the ``unless`` option returns false
passed to the ``unless`` option returns a non-zero exit status
cwd
The current working directory to execute the command in, defaults to
@ -726,16 +751,29 @@ def run(name,
umask
The umask (in octal) to use when running the command.
output_loglevel
Control the loglevel at which the output from the command is logged.
Note that the command being run will still be logged (loglevel: DEBUG)
regardless, unless ``quiet`` is used for this value.
output_loglevel : debug
Control the loglevel at which the output from the command is logged to
the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
hide_output : False
Suppress stdout and stderr in the state's results.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: Oxygen
quiet
The command will be executed quietly, meaning no log entries of the
actual command or its return data. This is deprecated as of the
**2014.1.0** release, and is being replaced with
``output_loglevel: quiet``.
This option no longer has any functionality and will be removed, please
set ``output_loglevel`` to ``quiet`` to suppress logging of the
command.
.. deprecated:: 2014.1.0
timeout
If the command has not terminated after timeout seconds, send the
@ -752,13 +790,13 @@ def run(name,
.. versionadded:: 2014.7.0
use_vt
use_vt : False
Use VT utils (saltstack) to stream the command output more
interactively to the console and the logs.
This is experimental.
bg
If ``True``, run command in background and do not await or deliver it's
bg : False
If ``True``, run command in background and do not await or deliver its
results.
.. versionadded:: 2016.3.6
@ -788,6 +826,23 @@ def run(name,
### definition, otherwise the use of unsupported arguments in a
### ``cmd.run`` state will result in a traceback.
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if u'quiet' in kwargs:
quiet = kwargs.pop(u'quiet')
msg = (
u'The \'quiet\' argument for cmd.run has been deprecated since '
u'2014.1.0 and will be removed as of the Neon release. Please set '
u'\'output_loglevel\' to \'quiet\' instead.'
)
salt.utils.versions.warn_until(u'Neon', msg)
ret.setdefault(u'warnings', []).append(msg)
else:
quiet = False
test_name = None
if not isinstance(stateful, list):
stateful = stateful is True
@ -796,11 +851,6 @@ def run(name,
if __opts__['test'] and test_name:
name = test_name
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
# Need the check for None here, if env is not provided then it falls back
# to None and it is assumed that the environment is not being overridden.
if env is not None and not isinstance(env, (list, dict)):
@ -817,6 +867,7 @@ def run(name,
'prepend_path': prepend_path,
'umask': umask,
'output_loglevel': output_loglevel,
'hide_output': hide_output,
'quiet': quiet})
cret = mod_run_check(cmd_kwargs, onlyif, unless, creates)
@ -847,7 +898,7 @@ def run(name,
ret['changes'] = cmd_all
ret['result'] = not bool(cmd_all['retcode'])
ret['comment'] = u'Command "{0}" run'.format(name)
ret['comment'] = 'Command "{0}" run'.format(name)
# Ignore timeout errors if asked (for nohups) and treat cmd as a success
if ignore_timeout:
@ -878,6 +929,7 @@ def script(name,
timeout=None,
use_vt=False,
output_loglevel='debug',
hide_output=False,
defaults=None,
context=None,
**kwargs):
@ -996,11 +1048,22 @@ def script(name,
Default context passed to the template.
output_loglevel
Control the loglevel at which the output from the command is logged.
Note that the command being run will still be logged (loglevel: DEBUG)
regardless, unless ``quiet`` is used for this value.
output_loglevel : debug
Control the loglevel at which the output from the command is logged to
the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
hide_output : False
Suppress stdout and stderr in the state's results.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: Oxygen
'''
test_name = None
if not isinstance(stateful, list):
@ -1046,6 +1109,7 @@ def script(name,
'umask': umask,
'timeout': timeout,
'output_loglevel': output_loglevel,
'hide_output': hide_output,
'use_vt': use_vt,
'context': tmpctx,
'saltenv': __env__})
@ -1116,6 +1180,7 @@ def call(name,
unless=None,
creates=None,
output_loglevel='debug',
hide_output=False,
use_vt=False,
**kwargs):
'''
@ -1161,6 +1226,7 @@ def call(name,
'env': kwargs.get('env'),
'use_vt': use_vt,
'output_loglevel': output_loglevel,
'hide_output': hide_output,
'umask': kwargs.get('umask')}
cret = mod_run_check(cmd_kwargs, onlyif, unless, creates)
@ -1193,6 +1259,7 @@ def wait_call(name,
stateful=False,
use_vt=False,
output_loglevel='debug',
hide_output=False,
**kwargs):
# Ignoring our arguments is intentional.
return {'name': name,

File diff suppressed because it is too large Load diff

View file

@ -42,6 +42,8 @@ import logging
import salt.utils.docker
import salt.utils.args
from salt.ext.six.moves import zip
from salt.ext import six
from salt.exceptions import CommandExecutionError
# Enable proper logging
log = logging.getLogger(__name__) # pylint: disable=invalid-name
@ -61,6 +63,7 @@ def __virtual__():
def present(name,
tag=None,
build=None,
load=None,
force=False,
@ -72,45 +75,56 @@ def present(name,
saltenv='base',
**kwargs):
'''
Ensure that an image is present. The image can either be pulled from a
Docker registry, built from a Dockerfile, or loaded from a saved image.
Image names can be specified either using ``repo:tag`` notation, or just
the repo name (in which case a tag of ``latest`` is assumed).
Repo identifier is mandatory, we don't assume the default repository
is docker hub.
.. versionchanged:: Oxygen
The ``tag`` argument has been added. It is now required unless pulling
from a registry.
If neither of the ``build`` or ``load`` arguments are used, then Salt will
pull from the :ref:`configured registries <docker-authentication>`. If the
specified image already exists, it will not be pulled unless ``force`` is
set to ``True``. Here is an example of a state that will pull an image from
the Docker Hub:
Ensure that an image is present. The image can either be pulled from a
Docker registry, built from a Dockerfile, loaded from a saved image, or
built by running SLS files against a base image.
If none of the ``build``, ``load``, or ``sls`` arguments are used, then Salt
will pull from the :ref:`configured registries <docker-authentication>`. If
the specified image already exists, it will not be pulled unless ``force``
is set to ``True``. Here is an example of a state that will pull an image
from the Docker Hub:
.. code-block:: yaml
myuser/myimage:mytag:
docker_image.present
myuser/myimage:
docker_image.present:
- tag: mytag
tag
Tag name for the image. Required when using ``build``, ``load``, or
``sls`` to create the image, but optional if pulling from a repository.
.. versionadded:: Oxygen
build
Path to directory on the Minion containing a Dockerfile
.. code-block:: yaml
myuser/myimage:mytag:
myuser/myimage:
docker_image.present:
- build: /home/myuser/docker/myimage
- tag: mytag
myuser/myimage:mytag:
myuser/myimage:
docker_image.present:
- build: /home/myuser/docker/myimage
- tag: mytag
- dockerfile: Dockerfile.alternative
.. versionadded:: 2016.11.0
The image will be built using :py:func:`docker.build
<salt.modules.dockermod.build>` and the specified image name and tag
will be applied to it.
.. versionadded:: 2016.11.0
.. versionchanged: Oxygen
The ``tag`` must be manually specified using the ``tag`` argument.
load
Loads a tar archive created with :py:func:`docker.load
<salt.modules.dockermod.load>` (or the ``docker load`` Docker CLI
@ -118,9 +132,13 @@ def present(name,
.. code-block:: yaml
myuser/myimage:mytag:
myuser/myimage:
docker_image.present:
- load: salt://path/to/image.tar
- tag: mytag
.. versionchanged: Oxygen
The ``tag`` must be manually specified using the ``tag`` argument.
force : False
Set this parameter to ``True`` to force Salt to pull/build/load the
@ -143,8 +161,9 @@ def present(name,
.. code-block:: yaml
myuser/myimage:mytag:
myuser/myimage:
docker_image.present:
- tag: latest
- sls:
- webapp1
- webapp2
@ -152,6 +171,8 @@ def present(name,
- saltenv: base
.. versionadded: 2017.7.0
.. versionchanged: Oxygen
The ``tag`` must be manually specified using the ``tag`` argument.
base
Base image with which to start :py:func:`docker.sls_build
@ -170,29 +191,48 @@ def present(name,
'result': False,
'comment': ''}
if build is not None and load is not None:
ret['comment'] = 'Only one of \'build\' or \'load\' is permitted.'
if not isinstance(name, six.string_types):
name = str(name)
# At most one of the args that result in an image being built can be used
num_build_args = len([x for x in (build, load, sls) if x is not None])
if num_build_args > 1:
ret['comment'] = \
'Only one of \'build\', \'load\', or \'sls\' is permitted.'
return ret
image = ':'.join(salt.utils.docker.get_repo_tag(name))
resolved_tag = __salt__['docker.resolve_tag'](image)
if resolved_tag is False:
# Specified image is not present
image_info = None
elif num_build_args == 1:
# If building, we need the tag to be specified
if not tag:
ret['comment'] = (
'The \'tag\' argument is required if any one of \'build\', '
'\'load\', or \'sls\' is used.'
)
return ret
if not isinstance(tag, six.string_types):
tag = str(tag)
full_image = ':'.join((name, tag))
else:
if tag:
name = '{0}:{1}'.format(name, tag)
full_image = name
try:
image_info = __salt__['docker.inspect_image'](full_image)
except CommandExecutionError as exc:
msg = exc.__str__()
if '404' in msg:
# Image not present
image_info = None
else:
ret['comment'] = msg
return ret
if image_info is not None:
# Specified image is present
if not force:
ret['result'] = True
ret['comment'] = 'Image \'{0}\' already present'.format(name)
ret['comment'] = 'Image {0} already present'.format(full_image)
return ret
else:
try:
image_info = __salt__['docker.inspect_image'](name)
except Exception as exc:
ret['comment'] = \
'Unable to get info for image \'{0}\': {1}'.format(name, exc)
return ret
if build or sls:
action = 'built'
@ -203,12 +243,12 @@ def present(name,
if __opts__['test']:
ret['result'] = None
if (resolved_tag is not False and force) or resolved_tag is False:
ret['comment'] = 'Image \'{0}\' will be {1}'.format(name, action)
if (image_info is not None and force) or image_info is None:
ret['comment'] = 'Image {0} will be {1}'.format(full_image, action)
return ret
if build:
# get the functions default value and args
# Get the functions default value and args
argspec = salt.utils.args.get_function_argspec(__salt__['docker.build'])
# Map any if existing args from kwargs into the build_args dictionary
build_args = dict(list(zip(argspec.args, argspec.defaults)))
@ -218,30 +258,30 @@ def present(name,
try:
# map values passed from the state to the build args
build_args['path'] = build
build_args['image'] = image
build_args['image'] = full_image
build_args['dockerfile'] = dockerfile
image_update = __salt__['docker.build'](**build_args)
except Exception as exc:
ret['comment'] = (
'Encountered error building {0} as {1}: {2}'
.format(build, image, exc)
'Encountered error building {0} as {1}: {2}'.format(
build, full_image, exc
)
)
return ret
if image_info is None or image_update['Id'] != image_info['Id'][:12]:
ret['changes'] = image_update
elif sls:
if isinstance(sls, list):
sls = ','.join(sls)
try:
image_update = __salt__['docker.sls_build'](name=image,
image_update = __salt__['docker.sls_build'](repository=name,
tag=tag,
base=base,
mods=sls,
saltenv=saltenv)
except Exception as exc:
ret['comment'] = (
'Encountered error using sls {0} for building {1}: {2}'
.format(sls, image, exc)
'Encountered error using SLS {0} for building {1}: {2}'
.format(sls, full_image, exc)
)
return ret
if image_info is None or image_update['Id'] != image_info['Id'][:12]:
@ -249,11 +289,13 @@ def present(name,
elif load:
try:
image_update = __salt__['docker.load'](path=load, image=image)
image_update = __salt__['docker.load'](path=load,
repository=name,
tag=tag)
except Exception as exc:
ret['comment'] = (
'Encountered error loading {0} as {1}: {2}'
.format(load, image, exc)
.format(load, full_image, exc)
)
return ret
if image_info is None or image_update.get('Layers', []):
@ -262,13 +304,13 @@ def present(name,
else:
try:
image_update = __salt__['docker.pull'](
image,
name,
insecure_registry=insecure_registry,
client_timeout=client_timeout
)
except Exception as exc:
ret['comment'] = \
'Encountered error pulling {0}: {1}'.format(image, exc)
'Encountered error pulling {0}: {1}'.format(full_image, exc)
return ret
if (image_info is not None and image_info['Id'][:12] == image_update
.get('Layers', {})
@ -280,18 +322,28 @@ def present(name,
# Only add to the changes dict if layers were pulled
ret['changes'] = image_update
ret['result'] = bool(__salt__['docker.resolve_tag'](image))
try:
__salt__['docker.inspect_image'](full_image)
error = False
except CommandExecutionError:
msg = exc.__str__()
if '404' not in msg:
error = 'Failed to inspect image \'{0}\' after it was {1}: {2}'.format(
full_image, action, msg
)
if not ret['result']:
# This shouldn't happen, failure to pull should be caught above
ret['comment'] = 'Image \'{0}\' could not be {1}'.format(name, action)
elif not ret['changes']:
ret['comment'] = (
'Image \'{0}\' was {1}, but there were no changes'
.format(name, action)
)
if error:
ret['comment'] = error
else:
ret['comment'] = 'Image \'{0}\' was {1}'.format(name, action)
ret['result'] = True
if not ret['changes']:
ret['comment'] = (
'Image \'{0}\' was {1}, but there were no changes'.format(
name, action
)
)
else:
ret['comment'] = 'Image \'{0}\' was {1}'.format(full_image, action)
return ret
@ -362,19 +414,16 @@ def absent(name=None, images=None, force=False):
elif name:
targets = [name]
pre_tags = __salt__['docker.list_tags']()
to_delete = []
for target in targets:
resolved_tag = __salt__['docker.resolve_tag'](target, tags=pre_tags)
resolved_tag = __salt__['docker.resolve_tag'](target)
if resolved_tag is not False:
to_delete.append(resolved_tag)
log.debug('targets = {0}'.format(targets))
log.debug('to_delete = {0}'.format(to_delete))
if not to_delete:
ret['result'] = True
if len(targets) == 1:
ret['comment'] = 'Image \'{0}\' is not present'.format(name)
ret['comment'] = 'Image {0} is not present'.format(name)
else:
ret['comment'] = 'All specified images are not present'
return ret
@ -382,11 +431,13 @@ def absent(name=None, images=None, force=False):
if __opts__['test']:
ret['result'] = None
if len(to_delete) == 1:
ret['comment'] = ('Image \'{0}\' will be removed'
.format(to_delete[0]))
ret['comment'] = 'Image {0} will be removed'.format(to_delete[0])
else:
ret['comment'] = ('The following images will be removed: {0}'
.format(', '.join(to_delete)))
ret['comment'] = (
'The following images will be removed: {0}'.format(
', '.join(to_delete)
)
)
return ret
result = __salt__['docker.rmi'](*to_delete, force=force)
@ -397,8 +448,9 @@ def absent(name=None, images=None, force=False):
if [x for x in to_delete if x not in post_tags]:
ret['changes'] = result
ret['comment'] = (
'The following image(s) failed to be removed: {0}'
.format(', '.join(failed))
'The following image(s) failed to be removed: {0}'.format(
', '.join(failed)
)
)
else:
ret['comment'] = 'None of the specified images were removed'
@ -410,11 +462,12 @@ def absent(name=None, images=None, force=False):
else:
ret['changes'] = result
if len(to_delete) == 1:
ret['comment'] = 'Image \'{0}\' was removed'.format(to_delete[0])
ret['comment'] = 'Image {0} was removed'.format(to_delete[0])
else:
ret['comment'] = (
'The following images were removed: {0}'
.format(', '.join(to_delete))
'The following images were removed: {0}'.format(
', '.join(to_delete)
)
)
ret['result'] = True
@ -430,5 +483,5 @@ def mod_watch(name, sfun=None, **kwargs):
return {'name': name,
'changes': {},
'result': False,
'comment': ('watch requisite is not'
' implemented for {0}'.format(sfun))}
'comment': 'watch requisite is not implemented for '
'{0}'.format(sfun)}

Some files were not shown because too many files have changed in this diff Show more