Merge branch '2017.7' into vpc_peering_connection_name_fix

This commit is contained in:
Mike Place 2018-05-01 11:31:08 -05:00 committed by GitHub
commit a968965087
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
51 changed files with 879 additions and 456 deletions

6
.github/CODEOWNERS vendored
View file

@ -67,4 +67,8 @@ salt/transport/* @saltstack/team-transport
salt/utils/zeromq.py @saltstack/team-transport
# Team Windows
salt/**/*win* @saltstack/team-windows
salt/*/*win* @saltstack/team-windows
salt/modules/reg.py @saltstack/team-windows
salt/states/reg.py @saltstack/team-windows
tests/*/*win* @saltstack/team-windows
tests/*/test_reg.py @saltstack/team-windows

View file

@ -323,6 +323,7 @@ rst_prolog = """\
.. _`salt-users`: https://groups.google.com/forum/#!forum/salt-users
.. _`salt-announce`: https://groups.google.com/forum/#!forum/salt-announce
.. _`salt-packagers`: https://groups.google.com/forum/#!forum/salt-packagers
.. _`salt-slack`: https://saltstackcommunity.herokuapp.com/
.. |windownload| raw:: html
<p>Python2 x86: <a

View file

@ -60,7 +60,7 @@ Fork a Repo Guide_>`_ and is well worth reading.
isolated into separate branches.
If you're working on a bug or documentation fix, create your branch from
the oldest release branch that contains the bug or requires the documentation
the oldest **supported** main release branch that contains the bug or requires the documentation
update. See :ref:`Which Salt Branch? <which-salt-branch>`.
.. code-block:: bash
@ -212,8 +212,11 @@ There are three different kinds of branches in use: develop, main release
branches, and dot release branches.
- All feature work should go into the ``develop`` branch.
- Bug fixes and documentation changes should go into the oldest supported
**main** release branch affected by the the bug or documentation change.
- Bug fixes and documentation changes should go into the oldest **supported
main** release branch affected by the the bug or documentation change (you
can use the blame button in github to figure out when the bug was introduced).
Supported releases are the last 2 releases. For example, if the latest release
is 2018.3, the last two release are 2018.3 and 2017.7.
Main release branches are named after a year and month, such as
``2016.11`` and ``2017.7``.
- Hot fixes, as determined by SaltStack's release team, should be submitted
@ -247,7 +250,7 @@ Main Release Branches
=====================
The current release branch is the most recent stable release. Pull requests
containing bug fixes or documentation changes should be made against the main
containing bug fixes or documentation changes should be made against the oldest supported main
release branch that is affected.
The branch name will be a date-based name such as ``2016.11``.

View file

@ -212,8 +212,9 @@ on GitHub.
repository in your own account on GitHub and notify a SaltStack employee
when it is ready. We will add you to the contributors team on the
`saltstack-formulas`_ organization and help you transfer the repository
over. Ping a SaltStack employee on IRC (``#salt`` on Freenode) or send an
email to the `salt-users`_ mailing list.
over. Ping a SaltStack employee on IRC (``#salt`` on Freenode), join the
``#formulas`` channel on the `salt-slack`_ or send an email to the
`salt-users`_ mailing list.
There are a lot of repositories in that organization! Team members can
manage which repositories they are subscribed to on GitHub's watching page:

View file

@ -22,7 +22,7 @@ Changes:
This release includes a CVE Fix:
CVE-2017-7893: Compromised salt-minions can impersonate the salt-master.
CVE-2017-7893: Compromised salt-minions can impersonate the salt-master. (Discovery credit: Frank Spierings)
- **PR** `#39855`_: (*Foxlik*) Use regular expression instead of split when replacing authorized_keys
@ *2017-03-22T18:28:32Z*

View file

@ -1,4 +1,4 @@
-r base.txt
-r base-py2.txt
mock>=2.0.0
apache-libcloud>=0.14.0
@ -6,6 +6,6 @@ boto>=2.32.1
boto3>=1.2.1
moto>=0.3.6
SaltPyLint>=v2017.3.6
pytest
pytest>=3.5.0
git+https://github.com/eisensheng/pytest-catchlog.git@develop#egg=Pytest-catchlog
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt

View file

@ -1,4 +1,4 @@
-r base.txt
-r base-py3.txt
mock>=2.0.0
apache-libcloud>=0.14.0
@ -11,5 +11,5 @@ moto>=0.3.6
# prevent it from being successfully installed (at least on Python 3.4).
httpretty
SaltPyLint>=v2017.2.29
pytest
pytest>=3.5.0
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt

View file

@ -1,3 +1,3 @@
pytest
pytest>=3.5.0
pytest-helpers-namespace
pytest-tempdir

View file

@ -2044,9 +2044,10 @@ def list_input_endpoints(kwargs=None, conn=None, call=None):
ret = {}
for item in data:
if 'Role' not in item:
continue
for role in item['Role']:
if 'Role' in item:
role = item['Role']
if not isinstance(role, dict):
return ret
input_endpoint = role['ConfigurationSets']['ConfigurationSet'].get('InputEndpoints', {}).get('InputEndpoint')
if not input_endpoint:
continue
@ -2054,6 +2055,7 @@ def list_input_endpoints(kwargs=None, conn=None, call=None):
input_endpoint = [input_endpoint]
for endpoint in input_endpoint:
ret[endpoint['Name']] = endpoint
return ret
return ret

View file

@ -9,7 +9,7 @@
#
# BUGS: https://github.com/saltstack/salt-bootstrap/issues
#
# COPYRIGHT: (c) 2012-2017 by the SaltStack Team, see AUTHORS.rst for more
# COPYRIGHT: (c) 2012-2018 by the SaltStack Team, see AUTHORS.rst for more
# details.
#
# LICENSE: Apache 2.0
@ -18,7 +18,7 @@
#======================================================================================================================
set -o nounset # Treat unset variables as an error
__ScriptVersion="2017.12.13"
__ScriptVersion="2018.04.25"
__ScriptName="bootstrap-salt.sh"
__ScriptFullName="$0"
@ -249,7 +249,6 @@ _CURL_ARGS=${BS_CURL_ARGS:-}
_FETCH_ARGS=${BS_FETCH_ARGS:-}
_GPG_ARGS=${BS_GPG_ARGS:-}
_WGET_ARGS=${BS_WGET_ARGS:-}
_ENABLE_EXTERNAL_ZMQ_REPOS=${BS_ENABLE_EXTERNAL_ZMQ_REPOS:-$BS_FALSE}
_SALT_MASTER_ADDRESS=${BS_SALT_MASTER_ADDRESS:-null}
_SALT_MINION_ID="null"
# _SIMPLIFY_VERSION is mostly used in Solaris based distributions
@ -299,13 +298,13 @@ __usage() {
Examples:
- ${__ScriptName}
- ${__ScriptName} stable
- ${__ScriptName} stable 2016.3
- ${__ScriptName} stable 2016.3.1
- ${__ScriptName} stable 2017.7
- ${__ScriptName} stable 2017.7.2
- ${__ScriptName} daily
- ${__ScriptName} testing
- ${__ScriptName} git
- ${__ScriptName} git 2016.3
- ${__ScriptName} git v2016.3.1
- ${__ScriptName} git 2017.7
- ${__ScriptName} git v2017.7.2
- ${__ScriptName} git 06f249901a2e2f1ed310d58ea3921a129f214358
Options:
@ -355,8 +354,6 @@ __usage() {
per -p flag. You're responsible for providing the proper package name.
-H Use the specified HTTP proxy for all download URLs (including https://).
For example: http://myproxy.example.com:3128
-Z Enable additional package repository for newer ZeroMQ
(only available for RHEL/CentOS/Fedora/Ubuntu based distributions)
-b Assume that dependencies are already installed and software sources are
set up. If git is selected, git tree is still checked out as dependency
step.
@ -395,7 +392,7 @@ __usage() {
tested with Centos 6 and is considered experimental. This will install the
ius repo on the box if disable repo is false. This must be used in conjunction
with -x <pythonversion>. For example:
sh bootstrap.sh -P -y -x python2.7 git v2016.11.3
sh bootstrap.sh -P -y -x python2.7 git v2017.7.2
The above will install python27 and install the git version of salt using the
python2.7 executable. This only works for git and pip installations.
@ -438,7 +435,6 @@ do
p ) _EXTRA_PACKAGES="$_EXTRA_PACKAGES $OPTARG" ;;
d ) _DISABLE_SALT_CHECKS=$BS_TRUE ;;
H ) _HTTP_PROXY="$OPTARG" ;;
Z ) _ENABLE_EXTERNAL_ZMQ_REPOS=$BS_TRUE ;;
b ) _NO_DEPS=$BS_TRUE ;;
f ) _FORCE_SHALLOW_CLONE=$BS_TRUE ;;
l ) _DISABLE_SSL=$BS_TRUE ;;
@ -593,14 +589,14 @@ elif [ "$ITYPE" = "stable" ]; then
if [ "$#" -eq 0 ];then
STABLE_REV="latest"
else
if [ "$(echo "$1" | egrep '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7)$')" != "" ]; then
if [ "$(echo "$1" | egrep '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7|2018\.3)$')" != "" ]; then
STABLE_REV="$1"
shift
elif [ "$(echo "$1" | egrep '^([0-9]*\.[0-9]*\.[0-9]*)$')" != "" ]; then
STABLE_REV="archive/$1"
shift
else
echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, latest, \$MAJOR.\$MINOR.\$PATCH)"
echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, 2018.3, latest, \$MAJOR.\$MINOR.\$PATCH)"
exit 1
fi
fi
@ -1331,10 +1327,10 @@ __check_dpkg_architecture() {
if [ "${error_msg}" != "" ]; then
echoerror "${error_msg}"
if [ "$ITYPE" != "git" ]; then
echoerror "You can try git installation mode, i.e.: sh ${__ScriptName} git v2016.11.5."
echoerror "You can try git installation mode, i.e.: sh ${__ScriptName} git v2017.7.2."
echoerror "It may be necessary to use git installation mode with pip and disable the SaltStack apt repository."
echoerror "For example:"
echoerror " sh ${__ScriptName} -r -P git v2016.11.5"
echoerror " sh ${__ScriptName} -r -P git v2017.7.2"
fi
fi
@ -1372,16 +1368,10 @@ __ubuntu_codename_translation() {
DISTRO_CODENAME="trusty"
;;
"16")
if [ "$_april" ]; then
DISTRO_CODENAME="xenial"
else
DISTRO_CODENAME="yakkety"
fi
DISTRO_CODENAME="xenial"
;;
"17")
if [ "$_april" ]; then
DISTRO_CODENAME="zesty"
fi
DISTRO_CODENAME="artful"
;;
*)
DISTRO_CODENAME="trusty"
@ -1500,9 +1490,12 @@ __check_end_of_life_versions() {
# < 14.04
# = 14.10
# = 15.04, 15.10
# = 16.10
# = 17.04
if [ "$DISTRO_MAJOR_VERSION" -lt 14 ] || \
[ "$DISTRO_MAJOR_VERSION" -eq 15 ] || \
([ "$DISTRO_MAJOR_VERSION" -lt 16 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]); then
([ "$DISTRO_MAJOR_VERSION" -eq 17 ] && [ "$DISTRO_MINOR_VERSION" -eq 04 ]) || \
([ "$DISTRO_MAJOR_VERSION" -lt 17 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]); then
echoerror "End of life distributions are not supported."
echoerror "Please consider upgrading to the next stable. See:"
echoerror " https://wiki.ubuntu.com/Releases"
@ -1544,8 +1537,8 @@ __check_end_of_life_versions() {
;;
fedora)
# Fedora lower than 25 are no longer supported
if [ "$DISTRO_MAJOR_VERSION" -lt 25 ]; then
# Fedora lower than 26 are no longer supported
if [ "$DISTRO_MAJOR_VERSION" -lt 26 ]; then
echoerror "End of life distributions are not supported."
echoerror "Please consider upgrading to the next stable. See:"
echoerror " https://fedoraproject.org/wiki/Releases"
@ -1765,12 +1758,41 @@ __function_defined() {
}
#--- FUNCTION -------------------------------------------------------------------------------------------------------
# NAME: __wait_for_apt
# DESCRIPTION: Check if any apt, apt-get, aptitude, or dpkg processes are running before
# calling these again. This is useful when these process calls are part of
# a boot process, such as on AWS AMIs. This func will wait until the boot
# process is finished so the script doesn't exit on a locked proc.
#----------------------------------------------------------------------------------------------------------------------
__wait_for_apt(){
echodebug "Checking if apt process is currently running."
# Timeout set at 15 minutes
WAIT_TIMEOUT=900
while ps -C apt,apt-get,aptitude,dpkg >/dev/null; do
sleep 1
WAIT_TIMEOUT=$((WAIT_TIMEOUT - 1))
# If timeout reaches 0, abort.
if [ "$WAIT_TIMEOUT" -eq 0 ]; then
echoerror "Apt, apt-get, aptitude, or dpkg process is taking too long."
echoerror "Bootstrap script cannot proceed. Aborting."
return 1
fi
done
echodebug "No apt processes are currently running."
}
#--- FUNCTION -------------------------------------------------------------------------------------------------------
# NAME: __apt_get_install_noinput
# DESCRIPTION: (DRY) apt-get install with noinput options
# PARAMETERS: packages
#----------------------------------------------------------------------------------------------------------------------
__apt_get_install_noinput() {
__wait_for_apt
apt-get install -y -o DPkg::Options::=--force-confold "${@}"; return $?
} # ---------- end of function __apt_get_install_noinput ----------
@ -1780,6 +1802,7 @@ __apt_get_install_noinput() {
# DESCRIPTION: (DRY) apt-get upgrade with noinput options
#----------------------------------------------------------------------------------------------------------------------
__apt_get_upgrade_noinput() {
__wait_for_apt
apt-get upgrade -y -o DPkg::Options::=--force-confold; return $?
} # ---------- end of function __apt_get_upgrade_noinput ----------
@ -1790,6 +1813,7 @@ __apt_get_upgrade_noinput() {
# PARAMETERS: url
#----------------------------------------------------------------------------------------------------------------------
__apt_key_fetch() {
__wait_for_apt
url=$1
# shellcheck disable=SC2086
@ -2544,7 +2568,7 @@ __enable_universe_repository() {
__install_saltstack_ubuntu_repository() {
# Workaround for latest non-LTS ubuntu
if [ "$DISTRO_VERSION" = "16.10" ] || [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then
if [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then
echowarn "Non-LTS Ubuntu detected, but stable packages requested. Trying packages from latest LTS release. You may experience problems."
UBUNTU_VERSION=16.04
UBUNTU_CODENAME="xenial"
@ -2556,8 +2580,8 @@ __install_saltstack_ubuntu_repository() {
__PACKAGES=''
# Install downloader backend for GPG keys fetching
if [ "$DISTRO_VERSION" = "16.10" ] || [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then
__PACKAGES="${__PACKAGES} gnupg2 dirmngr"
if [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then
__PACKAGES="${__PACKAGES} gnupg dirmngr"
else
__PACKAGES="${__PACKAGES} gnupg-curl"
fi
@ -2576,6 +2600,7 @@ __install_saltstack_ubuntu_repository() {
__apt_key_fetch "$SALTSTACK_UBUNTU_URL/SALTSTACK-GPG-KEY.pub" || return 1
__wait_for_apt
apt-get update
}
@ -2588,6 +2613,7 @@ install_ubuntu_deps() {
__enable_universe_repository || return 1
__wait_for_apt
apt-get update
fi
@ -2644,6 +2670,7 @@ install_ubuntu_stable_deps() {
# No user interaction, libc6 restart services for example
export DEBIAN_FRONTEND=noninteractive
__wait_for_apt
apt-get update
if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then
@ -2664,6 +2691,7 @@ install_ubuntu_stable_deps() {
}
install_ubuntu_daily_deps() {
__wait_for_apt
install_ubuntu_stable_deps || return 1
if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then
@ -2681,6 +2709,7 @@ install_ubuntu_daily_deps() {
}
install_ubuntu_git_deps() {
__wait_for_apt
apt-get update
if ! __check_command_exists git; then
@ -2711,8 +2740,8 @@ install_ubuntu_git_deps() {
else
install_ubuntu_stable_deps || return 1
__PACKAGES="${__PACKAGES} python-crypto python-jinja2 python-msgpack python-requests"
__PACKAGES="${__PACKAGES} python-tornado python-yaml python-zmq"
__PACKAGES="${__PACKAGES} python-crypto python-jinja2 python-m2crypto python-msgpack"
__PACKAGES="${__PACKAGES} python-requests python-tornado python-yaml python-zmq"
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
# Install python-libcloud if asked to
@ -2791,7 +2820,7 @@ install_ubuntu_stable_post() {
/bin/systemctl preset salt-$fname.service > /dev/null 2>&1 &&
/bin/systemctl enable salt-$fname.service > /dev/null 2>&1
)
sleep 0.1
sleep 1
/bin/systemctl daemon-reload
elif [ -f /etc/init.d/salt-$fname ]; then
update-rc.d salt-$fname defaults
@ -2817,7 +2846,7 @@ install_ubuntu_git_post() {
[ $fname = "api" ] && continue
systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service)
sleep 0.1
sleep 1
systemctl daemon-reload
elif [ -f /sbin/initctl ]; then
_upstart_conf="/etc/init/salt-$fname.conf"
@ -2973,6 +3002,7 @@ __install_saltstack_debian_repository() {
__apt_key_fetch "$SALTSTACK_DEBIAN_URL/SALTSTACK-GPG-KEY.pub" || return 1
__wait_for_apt
apt-get update
}
@ -2984,6 +3014,7 @@ install_debian_deps() {
# No user interaction, libc6 restart services for example
export DEBIAN_FRONTEND=noninteractive
__wait_for_apt
apt-get update
if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then
@ -3030,9 +3061,9 @@ install_debian_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-backports.ssl-match-hostname python-crypto"
__PACKAGES="${__PACKAGES} python-jinja2 python-msgpack python-requests"
__PACKAGES="${__PACKAGES} python-tornado python-yaml python-zmq"
__PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-backports.ssl-match-hostname"
__PACKAGES="${__PACKAGES} python-crypto python-jinja2 python-msgpack python-m2crypto"
__PACKAGES="${__PACKAGES} python-requests python-tornado python-yaml python-zmq"
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
# Install python-libcloud if asked to
@ -3071,8 +3102,9 @@ install_debian_8_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-crypto python-jinja2 python-msgpack"
__PACKAGES="${__PACKAGES} python-requests python-systemd python-yaml python-zmq"
__PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-crypto python-jinja2"
__PACKAGES="${__PACKAGES} python-m2crypto python-msgpack python-requests python-systemd"
__PACKAGES="${__PACKAGES} python-yaml python-zmq"
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
# Install python-libcloud if asked to
@ -3081,7 +3113,7 @@ install_debian_8_git_deps() {
__PIP_PACKAGES=''
if (__check_pip_allowed >/dev/null 2>&1); then
__PIP_PACKAGES='tornado'
__PIP_PACKAGES='tornado<5.0'
# Install development environment for building tornado Python module
__PACKAGES="${__PACKAGES} build-essential python-dev"
@ -3096,6 +3128,7 @@ install_debian_8_git_deps() {
/etc/apt/sources.list.d/backports.list
fi
__wait_for_apt
apt-get update || return 1
# python-tornado package should be installed from backports repo
@ -3135,8 +3168,8 @@ install_debian_9_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES="libzmq5 lsb-release python-apt python-backports-abc python-crypto"
__PACKAGES="${__PACKAGES} python-jinja2 python-msgpack python-requests python-systemd"
__PACKAGES="${__PACKAGES} python-tornado python-yaml python-zmq"
__PACKAGES="${__PACKAGES} python-jinja2 python-m2crypto python-msgpack python-requests"
__PACKAGES="${__PACKAGES} python-systemd python-tornado python-yaml python-zmq"
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
# Install python-libcloud if asked to
@ -3330,15 +3363,8 @@ install_debian_check_services() {
install_fedora_deps() {
if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then
if [ "$_ENABLE_EXTERNAL_ZMQ_REPOS" -eq $BS_TRUE ]; then
__install_saltstack_copr_zeromq_repository || return 1
fi
__install_saltstack_copr_salt_repository || return 1
fi
__PACKAGES="PyYAML libyaml python-crypto python-jinja2 python-zmq python2-msgpack python2-requests"
__PACKAGES="libyaml m2crypto PyYAML python-crypto python-jinja2"
__PACKAGES="${__PACKAGES} python2-msgpack python2-requests python-zmq"
if [ "$DISTRO_MAJOR_VERSION" -lt 26 ]; then
__PACKAGES="${__PACKAGES} yum-utils"
@ -3395,7 +3421,7 @@ install_fedora_stable_post() {
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service)
sleep 0.1
sleep 1
systemctl daemon-reload
done
}
@ -3456,7 +3482,7 @@ install_fedora_git_post() {
[ $fname = "api" ] && continue
systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service)
sleep 0.1
sleep 1
systemctl daemon-reload
done
}
@ -3523,20 +3549,6 @@ __install_epel_repository() {
return 0
}
__install_saltstack_copr_zeromq_repository() {
echoinfo "Installing Zeromq >=4 and PyZMQ>=14 from SaltStack's COPR repository"
if [ ! -s /etc/yum.repos.d/saltstack-zeromq4.repo ]; then
if [ "${DISTRO_NAME_L}" = "fedora" ]; then
__REPOTYPE="${DISTRO_NAME_L}"
else
__REPOTYPE="epel"
fi
__fetch_url /etc/yum.repos.d/saltstack-zeromq4.repo \
"${HTTP_VAL}://copr.fedorainfracloud.org/coprs/saltstack/zeromq4/repo/${__REPOTYPE}-${DISTRO_MAJOR_VERSION}/saltstack-zeromq4-${__REPOTYPE}-${DISTRO_MAJOR_VERSION}.repo" || return 1
fi
return 0
}
__install_saltstack_rhel_repository() {
if [ "$ITYPE" = "stable" ]; then
repo_rev="$STABLE_REV"
@ -3550,7 +3562,7 @@ __install_saltstack_rhel_repository() {
gpg_key="SALTSTACK-GPG-KEY.pub"
repo_file="/etc/yum.repos.d/saltstack.repo"
if [ ! -s "$repo_file" ]; then
if [ ! -s "$repo_file" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then
cat <<_eof > "$repo_file"
[saltstack]
name=SaltStack ${repo_rev} Release Channel for RHEL/CentOS \$releasever
@ -3564,26 +3576,10 @@ _eof
fetch_url="${HTTP_VAL}://${_REPO_URL}/yum/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${repo_rev}/"
__rpm_import_gpg "${fetch_url}${gpg_key}" || return 1
fi
return 0
}
__install_saltstack_copr_salt_repository() {
echoinfo "Adding SaltStack's COPR repository"
if [ "${DISTRO_NAME_L}" = "fedora" ]; then
[ "$DISTRO_MAJOR_VERSION" -ge 22 ] && return 0
__REPOTYPE="${DISTRO_NAME_L}"
else
__REPOTYPE="epel"
fi
__REPO_FILENAME="saltstack-salt-${__REPOTYPE}-${DISTRO_MAJOR_VERSION}.repo"
if [ ! -s "/etc/yum.repos.d/${__REPO_FILENAME}" ]; then
__fetch_url "/etc/yum.repos.d/${__REPO_FILENAME}" \
"${HTTP_VAL}://copr.fedorainfracloud.org/coprs/saltstack/salt/repo/${__REPOTYPE}-${DISTRO_MAJOR_VERSION}/${__REPO_FILENAME}" || return 1
yum clean metadata || return 1
elif [ "$repo_rev" != "latest" ]; then
echowarn "saltstack.repo already exists, ignoring salt version argument."
echowarn "Use -F (forced overwrite) to install $repo_rev."
fi
return 0
@ -3688,7 +3684,8 @@ install_centos_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES="python-crypto python-futures python-msgpack python-zmq python-jinja2 python-requests python-tornado"
__PACKAGES="m2crypto python-crypto python-futures python-jinja2 python-msgpack"
__PACKAGES="${__PACKAGES} python-requests python-tornado python-zmq"
if [ "$DISTRO_MAJOR_VERSION" -ge 7 ]; then
__PACKAGES="${__PACKAGES} systemd-python"
@ -3705,7 +3702,12 @@ install_centos_git_deps() {
if [ "${_PY_EXE}" != "" ]; then
# If "-x" is defined, install dependencies with pip based on the Python version given.
_PIP_PACKAGES="jinja2 msgpack-python pycrypto PyYAML tornado zmq"
_PIP_PACKAGES="m2crypto jinja2 msgpack-python pycrypto PyYAML tornado<5.0 zmq"
# install swig and openssl on cent6
if [ "$DISTRO_MAJOR_VERSION" -eq 6 ]; then
__yum_install_noinput openssl-devel swig || return 1
fi
if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then
for SINGLE_PACKAGE in $_PIP_PACKAGES; do
@ -4275,7 +4277,7 @@ install_alpine_linux_stable_deps() {
install_alpine_linux_git_deps() {
install_alpine_linux_stable_deps || return 1
apk -U add python2 py-virtualenv py2-crypto py2-setuptools \
apk -U add python2 py-virtualenv py2-crypto py2-m2crypto py2-setuptools \
py2-jinja2 py2-yaml py2-markupsafe py2-msgpack py2-psutil \
py2-zmq zeromq py2-requests || return 1
@ -4367,6 +4369,7 @@ install_alpine_linux_restart_daemons() {
# Skip if not meant to be installed
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
# Disable stdin to fix shell session hang on killing tee pipe
/sbin/rc-service salt-$fname stop < /dev/null > /dev/null 2>&1
@ -4382,6 +4385,7 @@ install_alpine_linux_check_services() {
# Skip if not meant to be installed
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
__check_services_alpine salt-$fname || return 1
done
@ -4400,6 +4404,7 @@ daemons_running_alpine_linux() {
# Skip if not meant to be installed
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
# shellcheck disable=SC2009
if [ "$(ps wwwaux | grep -v grep | grep salt-$fname)" = "" ]; then
@ -4427,10 +4432,20 @@ install_amazon_linux_ami_deps() {
_USEAWS=$BS_FALSE
pkg_append="python"
repo_rev="$(echo "${STABLE_REV}" | sed 's|.*\/||g')"
if [ "$ITYPE" = "stable" ]; then
repo_rev="$STABLE_REV"
else
repo_rev="latest"
fi
if echo $repo_rev | egrep -q '^archive'; then
year=$(echo "$repo_rev" | cut -d '/' -f 2 | cut -c1-4)
else
year=$(echo "$repo_rev" | cut -c1-4)
fi
if echo "$repo_rev" | egrep -q '^(latest|2016\.11)$' || \
[ "$(echo "$repo_rev" | cut -c1-4)" -gt 2016 ]; then
[ "$year" -gt 2016 ]; then
_USEAWS=$BS_TRUE
pkg_append="python27"
fi
@ -4477,7 +4492,8 @@ _eof
# Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64
# which is already installed
__PACKAGES="${pkg_append}-PyYAML ${pkg_append}-crypto ${pkg_append}-msgpack ${pkg_append}-zmq ${pkg_append}-jinja2 ${pkg_append}-requests"
__PACKAGES="m2crypto ${pkg_append}-crypto ${pkg_append}-jinja2 ${pkg_append}-PyYAML"
__PACKAGES="${__PACKAGES} ${pkg_append}-msgpack ${pkg_append}-requests ${pkg_append}-zmq"
# shellcheck disable=SC2086
__yum_install_noinput ${__PACKAGES} || return 1
@ -4630,7 +4646,7 @@ install_arch_linux_git_deps() {
fi
pacman -R --noconfirm python2-distribute
pacman -Su --noconfirm --needed python2-crypto python2-setuptools python2-jinja \
python2-markupsafe python2-msgpack python2-psutil \
python2-m2crypto python2-markupsafe python2-msgpack python2-psutil \
python2-pyzmq zeromq python2-requests python2-systemd || return 1
__git_clone_and_checkout || return 1
@ -4704,7 +4720,7 @@ install_arch_linux_post() {
/usr/bin/systemctl preset salt-$fname.service > /dev/null 2>&1 &&
/usr/bin/systemctl enable salt-$fname.service > /dev/null 2>&1
)
sleep 0.1
sleep 1
/usr/bin/systemctl daemon-reload
continue
fi
@ -4732,7 +4748,7 @@ install_arch_linux_git_post() {
/usr/bin/systemctl preset salt-${fname}.service > /dev/null 2>&1 &&
/usr/bin/systemctl enable salt-${fname}.service > /dev/null 2>&1
)
sleep 0.1
sleep 1
/usr/bin/systemctl daemon-reload
continue
fi
@ -4885,9 +4901,9 @@ install_freebsd_9_stable_deps() {
__configure_freebsd_pkg_details || return 1
fi
# Now install swig
# Now install swig30
# shellcheck disable=SC2086
/usr/local/sbin/pkg install ${FROM_FREEBSD} -y swig || return 1
/usr/local/sbin/pkg install ${FROM_FREEBSD} -y swig30 || return 1
# YAML module is used for generating custom master/minion configs
# shellcheck disable=SC2086
@ -4934,7 +4950,7 @@ install_freebsd_git_deps() {
# We're on the develop branch, install whichever tornado is on the requirements file
__REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")"
if [ "${__REQUIRED_TORNADO}" != "" ]; then
/usr/local/sbin/pkg install -y www/py-tornado || return 1
/usr/local/sbin/pkg install -y www/py-tornado4 || return 1
fi
fi
@ -5098,35 +5114,11 @@ install_freebsd_restart_daemons() {
# OpenBSD Install Functions
#
__choose_openbsd_mirror() {
OPENBSD_REPO=''
MINTIME=''
MIRROR_LIST=$(ftp -w 15 -Vao - 'https://ftp.openbsd.org/cgi-bin/ftplist.cgi?dbversion=1' | awk '/^http/ {print $1}')
for MIRROR in $MIRROR_LIST; do
MIRROR_HOST=$(echo "$MIRROR" | sed -e 's|.*//||' -e 's|+*/.*$||')
TIME=$(ping -c 1 -w 1 -q "$MIRROR_HOST" | awk -F/ '/round-trip/ { print $5 }')
[ -z "$TIME" ] && continue
echodebug "ping time for $MIRROR_HOST is $TIME"
if [ -z "$MINTIME" ]; then
FASTER_MIRROR=1
else
FASTER_MIRROR=$(echo "$TIME < $MINTIME" | bc)
fi
if [ "$FASTER_MIRROR" -eq 1 ]; then
MINTIME=$TIME
OPENBSD_REPO="$MIRROR"
fi
done
}
install_openbsd_deps() {
if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then
__choose_openbsd_mirror || return 1
echoinfo "setting package repository to $OPENBSD_REPO with ping time of $MINTIME"
[ -n "$OPENBSD_REPO" ] || return 1
echo "${OPENBSD_REPO}" >>/etc/installurl || return 1
OPENBSD_REPO='https://cdn.openbsd.org/pub/OpenBSD'
echoinfo "setting package repository to $OPENBSD_REPO"
echo "${OPENBSD_REPO}" >/etc/installurl || return 1
fi
if [ "${_EXTRA_PACKAGES}" != "" ]; then
@ -5226,7 +5218,7 @@ install_openbsd_restart_daemons() {
# SmartOS Install Functions
#
install_smartos_deps() {
pkgin -y install zeromq py27-crypto py27-msgpack py27-yaml py27-jinja2 py27-zmq py27-requests || return 1
pkgin -y install zeromq py27-crypto py27-m2crypto py27-msgpack py27-yaml py27-jinja2 py27-zmq py27-requests || return 1
# Set _SALT_ETC_DIR to SmartOS default if they didn't specify
_SALT_ETC_DIR=${BS_SALT_ETC_DIR:-/opt/local/etc/salt}
@ -5456,6 +5448,13 @@ __version_lte() {
}
__zypper() {
# Check if any zypper process is running before calling zypper again.
# This is useful when a zypper call is part of a boot process and will
# wait until the zypper process is finished, such as on AWS AMIs.
while pgrep -l zypper; do
sleep 1
done
zypper --non-interactive "${@}"; return $?
}
@ -5515,7 +5514,7 @@ install_opensuse_stable_deps() {
}
install_opensuse_git_deps() {
if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then
if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ] && ! __check_command_exists update-ca-certificates; then
__zypper_install ca-certificates || return 1
fi
@ -5529,7 +5528,7 @@ install_opensuse_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES="libzmq5 python-Jinja2 python-msgpack-python python-pycrypto python-pyzmq python-xml"
__PACKAGES="libzmq5 python-Jinja2 python-m2crypto python-msgpack-python python-pycrypto python-pyzmq python-xml"
if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then
# We're on the develop branch, install whichever tornado is on the requirements file
@ -5594,7 +5593,7 @@ install_opensuse_stable_post() {
if [ -f /bin/systemctl ]; then
systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service)
sleep 0.1
sleep 1
systemctl daemon-reload
continue
fi
@ -5723,6 +5722,12 @@ install_suse_12_stable_deps() {
# shellcheck disable=SC2086,SC2090
__zypper_install ${__PACKAGES} || return 1
# SLES 11 SP3 ships with both python-M2Crypto-0.22.* and python-m2crypto-0.21 and we will be asked which
# we want to install, even with --non-interactive.
# Let's try to install the higher version first and then the lower one in case of failure
__zypper_install 'python-M2Crypto>=0.22' || __zypper_install 'python-M2Crypto>=0.21' || return 1
if [ "${_EXTRA_PACKAGES}" != "" ]; then
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
# shellcheck disable=SC2086
@ -5825,6 +5830,11 @@ install_suse_11_stable_deps() {
# shellcheck disable=SC2086,SC2090
__zypper_install ${__PACKAGES} || return 1
# SLES 11 SP3 ships with both python-M2Crypto-0.22.* and python-m2crypto-0.21 and we will be asked which
# we want to install, even with --non-interactive.
# Let's try to install the higher version first and then the lower one in case of failure
__zypper_install 'python-M2Crypto>=0.22' || __zypper_install 'python-M2Crypto>=0.21' || return 1
if [ "${_EXTRA_PACKAGES}" != "" ]; then
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
# shellcheck disable=SC2086

View file

@ -94,14 +94,15 @@ class IRCClient(object):
self.allow_nicks = allow_nicks
self.disable_query = disable_query
self.io_loop = tornado.ioloop.IOLoop(make_current=False)
self.io_loop.make_current()
self._connect()
def _connect(self):
_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
if self.ssl is True:
self._stream = tornado.iostream.SSLIOStream(_sock, ssl_options={'cert_reqs': ssl.CERT_NONE}, io_loop=self.io_loop)
self._stream = tornado.iostream.SSLIOStream(_sock, ssl_options={'cert_reqs': ssl.CERT_NONE})
else:
self._stream = tornado.iostream.IOStream(_sock, io_loop=self.io_loop)
self._stream = tornado.iostream.IOStream(_sock)
self._stream.set_close_callback(self.on_closed)
self._stream.connect((self.host, self.port), self.on_connect)

View file

@ -81,6 +81,7 @@ def start(address=None, port=5000, ssl_crt=None, ssl_key=None):
if all([ssl_crt, ssl_key]):
ssl_options = {"certfile": ssl_crt, "keyfile": ssl_key}
io_loop = tornado.ioloop.IOLoop(make_current=False)
http_server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_options, io_loop=io_loop)
io_loop.make_current()
http_server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_options)
http_server.listen(port, address=address)
io_loop.start()

View file

@ -56,7 +56,7 @@ def inet_pton(address_family, ip_string):
addr_size = ctypes.c_int(ctypes.sizeof(addr))
if WSAStringToAddressA(
ip_string,
ip_string.encode('ascii'),
address_family,
None,
ctypes.byref(addr),

View file

@ -675,13 +675,6 @@ def _virtual(osdata):
grains['virtual'] = 'kvm'
# Break out of the loop so the next log message is not issued
break
elif command == 'virt-what':
# if 'virt-what' returns nothing, it's either an undetected platform
# so we default just as virt-what to 'physical', otherwise use the
# platform detected/returned by virt-what
if output:
grains['virtual'] = output.lower()
break
elif command == 'prtdiag':
model = output.lower().split("\n")[0]
if 'vmware' in model:

View file

@ -36,13 +36,15 @@ try:
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
LOOP_CLASS = zmq.eventloop.ioloop.ZMQIOLoop
HAS_ZMQ = True
except ImportError:
import tornado.ioloop
LOOP_CLASS = tornado.ioloop.IOLoop
HAS_ZMQ = False
import tornado
TORNADO_50 = tornado.version_info >= (5,)
from salt.utils.async import LOOP_CLASS
import tornado.gen # pylint: disable=F0401
# Import salt libs
@ -856,7 +858,7 @@ class MWorker(SignalHandlingMultiprocessingProcess):
Bind to the local port
'''
# using ZMQIOLoop since we *might* need zmq in there
if HAS_ZMQ:
if HAS_ZMQ and not TORNADO_50:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS()
self.io_loop.make_current()

View file

@ -31,6 +31,7 @@ else:
import salt.ext.ipaddress as ipaddress
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
from salt.utils.async import LOOP_CLASS
# Import third party libs
try:
@ -40,13 +41,13 @@ try:
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
LOOP_CLASS = zmq.eventloop.ioloop.ZMQIOLoop
HAS_ZMQ = True
except ImportError:
import tornado.ioloop
LOOP_CLASS = tornado.ioloop.IOLoop
HAS_ZMQ = False
import tornado
TORNADO_50 = tornado.version_info >= (5,)
HAS_RANGE = False
try:
import seco.range
@ -656,7 +657,7 @@ class SMinion(MinionBase):
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
if self.opts['transport'] == 'zeromq' and HAS_ZMQ:
if self.opts['transport'] == 'zeromq' and HAS_ZMQ and not TORNADO_50:
io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
else:
io_loop = LOOP_CLASS.current()
@ -805,7 +806,7 @@ class MinionManager(MinionBase):
self.minions = []
self.jid_queue = []
if HAS_ZMQ:
if HAS_ZMQ and not TORNADO_50:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
self.process_manager = ProcessManager(name='MultiMinionProcessManager')
@ -954,7 +955,7 @@ class Minion(MinionBase):
self.periodic_callbacks = {}
if io_loop is None:
if HAS_ZMQ:
if HAS_ZMQ and not TORNADO_50:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
else:
@ -1056,7 +1057,7 @@ class Minion(MinionBase):
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
future_exception = self._connect_master_future.exc_info()
future_exception = self._connect_master_future.exception()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
@ -2250,13 +2251,15 @@ class Minion(MinionBase):
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(
handle_beacons, loop_interval * 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
@ -2309,14 +2312,15 @@ class Minion(MinionBase):
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
@ -2372,7 +2376,7 @@ class Minion(MinionBase):
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000)
self.periodic_callbacks['ping'].start()
# add handler to subscriber
@ -2632,7 +2636,7 @@ class SyndicManager(MinionBase):
self.jid_forward_cache = set()
if io_loop is None:
if HAS_ZMQ:
if HAS_ZMQ and not TORNADO_50:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
else:
@ -2816,7 +2820,7 @@ class SyndicManager(MinionBase):
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1

View file

@ -453,8 +453,8 @@ def delval(key, destructive=False):
.. versionadded:: 0.17.0
Delete a grain value from the grains config file. This will just set the
grain value to `None`. To completely remove the grain run `grains.delkey`
of pass `destructive=True` to `grains.delval`.
grain value to ``None``. To completely remove the grain, run ``grains.delkey``
or pass ``destructive=True`` to ``grains.delval``.
key
The grain key from which to delete the value.

View file

@ -205,6 +205,14 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
To pass in jump options that doesn't take arguments, pass in an empty
string.
.. note::
Whereas iptables will accept ``-p``, ``--proto[c[o[l]]]`` as synonyms
of ``--protocol``, if ``--proto`` appears in an iptables command after
the appearance of ``-m policy``, it is interpreted as the ``--proto``
option of the policy extension (see the iptables-extensions(8) man
page).
CLI Examples:
.. code-block:: bash
@ -235,7 +243,6 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
salt '*' iptables.build_rule filter INPUT command=I position=3 \\
full=True match=state state=RELATED,ESTABLISHED jump=ACCEPT \\
family=ipv6
'''
if 'target' in kwargs:
kwargs['jump'] = kwargs.pop('target')
@ -249,7 +256,7 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
del kwargs[ignore]
rule = []
proto = False
protocol = False
bang_not_pat = re.compile(r'(!|not)\s?')
def maybe_add_negation(arg):
@ -273,12 +280,15 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
rule.append('{0}-o {1}'.format(maybe_add_negation('of'), kwargs['of']))
del kwargs['of']
for proto_arg in ('protocol', 'proto'):
if proto_arg in kwargs:
if not proto:
rule.append('{0}-p {1}'.format(maybe_add_negation(proto_arg), kwargs[proto_arg]))
proto = True
del kwargs[proto_arg]
if 'proto' in kwargs and kwargs.get('match') != 'policy':
kwargs['protocol'] = kwargs['proto']
del kwargs['proto']
# Handle the case 'proto' in kwargs and kwargs.get('match') == 'policy' below
if 'protocol' in kwargs:
if not protocol:
rule.append('{0}-p {1}'.format(maybe_add_negation('protocol'), kwargs['protocol']))
protocol = True
del kwargs['protocol']
if 'match' in kwargs:
match_value = kwargs['match']
@ -289,6 +299,9 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
if 'name_' in kwargs and match.strip() in ('pknock', 'quota2', 'recent'):
rule.append('--name {0}'.format(kwargs['name_']))
del kwargs['name_']
if 'proto' in kwargs and kwargs.get('match') == 'policy':
rule.append('{0}--proto {1}'.format(maybe_add_negation('proto'), kwargs['proto']))
del kwargs['proto']
del kwargs['match']
if 'match-set' in kwargs:
@ -322,8 +335,8 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
if multiport_arg in kwargs:
if '-m multiport' not in rule:
rule.append('-m multiport')
if not proto:
return 'Error: proto must be specified'
if not protocol:
return 'Error: protocol must be specified'
mp_value = kwargs[multiport_arg]
if isinstance(mp_value, list):
@ -1033,9 +1046,9 @@ def _parse_conf(conf_file=None, in_mem=False, family='ipv4'):
def _parser():
'''
This function contains _all_ the options I could find in man 8 iptables,
listed in the first section that I found them in. They will not all be used
by all parts of the module; use them intelligently and appropriately.
This function attempts to list all the options documented in the
iptables(8) and iptables-extensions(8) man pages. They will not all be
used by all parts of the module; use them intelligently and appropriately.
'''
add_arg = None
if sys.version.startswith('2.6'):

View file

@ -48,6 +48,10 @@ def __virtual__():
Only work on select distros which still use Red Hat's /usr/bin/service for
management of either sysvinit or a hybrid sysvinit/upstart init system.
'''
# Disable when booted with systemd
if __utils__['systemd.booted'](__context__):
return (False, 'The rh_service execution module failed to load: this system was booted with systemd.')
# Enable on these platforms only.
enable = set((
'XenServer',
@ -97,15 +101,6 @@ def __virtual__():
'RedHat-based distros >= version 7 use systemd, will not '
'load rh_service.py as virtual \'service\''
)
if __grains__['os'] == 'Amazon':
if int(osrelease_major) in (2016, 2017):
return __virtualname__
else:
return (
False,
'Amazon Linux >= version 2 uses systemd. Will not '
'load rh_service.py as virtual \'service\''
)
return __virtualname__
return (False, 'Cannot load rh_service module: OS not in {0}'.format(enable))

View file

@ -87,7 +87,7 @@ def _get_username(member):
str: The username converted to domain\\username format
'''
return member.ADSPath.replace('WinNT://', '').replace(
'/', '\\').encode('ascii', 'backslashreplace')
'/', '\\')
def add(name, **kwargs):

View file

@ -128,6 +128,6 @@ def start():
raise SystemExit(1)
try:
tornado.ioloop.IOLoop.instance().start()
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt:
raise SystemExit(0)

View file

@ -205,14 +205,17 @@ import tornado.ioloop
import tornado.web
import tornado.gen
from tornado.concurrent import Future
from zmq.eventloop import ioloop
import salt.ext.six as six
import tornado
# pylint: enable=import-error
TORNADO_50 = tornado.version_info >= (5,)
# instantiate the zmq IOLoop (specialized poller)
ioloop.install()
if not TORNADO_50:
import zmq.eventloop.ioloop
# instantiate the zmq IOLoop (specialized poller)
zmq.eventloop.ioloop.install()
# salt imports
import salt.ext.six as six
import salt.netapi
import salt.utils
import salt.utils.event

View file

@ -50,66 +50,76 @@ possible to reference grains within the configuration.
to trick the master into returning secret data.
Use only the 'id' grain which is verified through the minion's key/cert.
Map Mode
--------
The ``it-admins`` configuration below returns the Pillar ``it-admins`` by:
- filtering for:
- members of the group ``it-admins``
- objects with ``objectclass=user``
- returning the data of users (``mode: map``), where each user is a dictionary
containing the configured string or list attributes.
- members of the group ``it-admins``
- objects with ``objectclass=user``
- returning the data of users, where each user is a dictionary containing the
configured string or list attributes.
**Configuration:**
Configuration
*************
.. code-block:: yaml
salt-users:
server: ldap.company.tld
port: 389
tls: true
dn: 'dc=company,dc=tld'
binddn: 'cn=salt-pillars,ou=users,dc=company,dc=tld'
bindpw: bi7ieBai5Ano
referrals: false
anonymous: false
mode: map
dn: 'ou=users,dc=company,dc=tld'
filter: '(&(memberof=cn=it-admins,ou=groups,dc=company,dc=tld)(objectclass=user))'
attrs:
- cn
- displayName
- givenName
- sn
lists:
- memberOf
server: ldap.company.tld
port: 389
tls: true
dn: 'dc=company,dc=tld'
binddn: 'cn=salt-pillars,ou=users,dc=company,dc=tld'
bindpw: bi7ieBai5Ano
referrals: false
anonymous: false
mode: map
dn: 'ou=users,dc=company,dc=tld'
filter: '(&(memberof=cn=it-admins,ou=groups,dc=company,dc=tld)(objectclass=user))'
attrs:
- cn
- displayName
- givenName
- sn
lists:
- memberOf
**Result:**
search_order:
- salt-users
.. code-block:: yaml
Result
******
salt-users:
- cn: cn=johndoe,ou=users,dc=company,dc=tld
displayName: John Doe
givenName: John
sn: Doe
memberOf:
- cn=it-admins,ou=groups,dc=company,dc=tld
- cn=team01,ou=groups,dc=company
- cn: cn=janedoe,ou=users,dc=company,dc=tld
displayName: Jane Doe
givenName: Jane
sn: Doe
memberOf:
- cn=it-admins,ou=groups,dc=company,dc=tld
- cn=team02,ou=groups,dc=company
.. code-block:: python
List Mode
---------
TODO: see also ``_result_to_dict()`` documentation
{
'salt-users': [
{
'cn': 'cn=johndoe,ou=users,dc=company,dc=tld',
'displayName': 'John Doe'
'givenName': 'John'
'sn': 'Doe'
'memberOf': [
'cn=it-admins,ou=groups,dc=company,dc=tld',
'cn=team01,ou=groups,dc=company'
]
},
{
'cn': 'cn=janedoe,ou=users,dc=company,dc=tld',
'displayName': 'Jane Doe',
'givenName': 'Jane',
'sn': 'Doe',
'memberOf': [
'cn=it-admins,ou=groups,dc=company,dc=tld',
'cn=team02,ou=groups,dc=company'
]
}
]
}
'''
# Import python libs
@ -123,7 +133,7 @@ from salt.exceptions import SaltInvocationError
# Import third party libs
import yaml
from jinja2 import Environment, FileSystemLoader
import jinja2
try:
import ldap # pylint: disable=W0611
HAS_LDAP = True
@ -149,10 +159,9 @@ def _render_template(config_file):
Render config template, substituting grains where found.
'''
dirname, filename = os.path.split(config_file)
env = Environment(loader=FileSystemLoader(dirname))
env = jinja2.Environment(loader=jinja2.FileSystemLoader(dirname))
template = env.get_template(filename)
config = template.render(__grains__)
return config
return template.render(__grains__)
def _config(name, conf):
@ -186,18 +195,18 @@ def _result_to_dict(data, result, conf, source):
For example, search result:
{ saltKeyValue': ['ntpserver=ntp.acme.local', 'foo=myfoo'],
'saltList': ['vhost=www.acme.net', 'vhost=www.acme.local' }
'saltList': ['vhost=www.acme.net', 'vhost=www.acme.local'] }
is written to the pillar data dictionary as:
{ 'ntpserver': 'ntp.acme.local', 'foo': 'myfoo',
'vhost': ['www.acme.net', 'www.acme.local' }
'vhost': ['www.acme.net', 'www.acme.local'] }
'''
attrs = _config('attrs', conf) or []
lists = _config('lists', conf) or []
# TODO:
# deprecate the default 'mode: split' and make the more
# straightforward 'mode: dict' the new default
# straightforward 'mode: map' the new default
mode = _config('mode', conf) or 'split'
if mode == 'map':
data[source] = []
@ -277,21 +286,45 @@ def ext_pillar(minion_id, # pylint: disable=W0613
'''
Execute LDAP searches and return the aggregated data
'''
if os.path.isfile(config_file):
try:
#open(config_file, 'r') as raw_config:
config = _render_template(config_file) or {}
opts = yaml.safe_load(config) or {}
opts['conf_file'] = config_file
except Exception as err:
import salt.log
msg = 'Error parsing configuration file: {0} - {1}'
if salt.log.is_console_configured():
log.warning(msg.format(config_file, err))
else:
print(msg.format(config_file, err))
config_template = None
try:
config_template = _render_template(config_file)
except jinja2.exceptions.TemplateNotFound:
log.debug('pillar_ldap: missing configuration file %s', config_file)
except Exception:
log.debug('pillar_ldap: failed to render template for %s',
config_file, exc_info=True)
if not config_template:
# We don't have a config file
return {}
try:
opts = yaml.safe_load(config_template) or {}
opts['conf_file'] = config_file
except Exception as err:
import salt.log
msg = 'pillar_ldap: error parsing configuration file: {0} - {1}'
if salt.log.is_console_configured():
log.warning(msg.format(config_file, err))
else:
print(msg.format(config_file, err))
return {}
else:
log.debug('Missing configuration file: {0}'.format(config_file))
if not isinstance(opts, dict):
log.warning(
'pillar_ldap: %s is invalidly formatted, must be a YAML '
'dictionary. See the documentation for more information.',
config_file
)
return {}
if 'search_order' not in opts:
log.warning(
'pillar_ldap: search_order missing from configuration. See the '
'documentation for more information.'
)
return {}
data = {}
for source in opts['search_order']:

View file

@ -17,7 +17,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -32,7 +32,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- comment: "Allow HTTP"
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -48,7 +48,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- connstate: NEW
- source: '127.0.0.1'
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -65,7 +65,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- connstate: NEW
- source: '! 127.0.0.1'
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -81,7 +81,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- connstate: NEW
- source: 'not 127.0.0.1'
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -94,7 +94,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -109,7 +109,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- dports:
- 80
- 443
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -122,7 +122,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -136,7 +136,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -148,7 +148,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -161,7 +161,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -174,7 +174,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -183,6 +183,55 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- chain: INPUT
- policy: ACCEPT
.. note::
Whereas iptables will accept ``-p``, ``--proto[c[o[l]]]`` as synonyms of
``--protocol``, if ``--proto`` appears in an iptables command after the
appearance of ``-m policy``, it is interpreted as the ``--proto`` option of
the policy extension (see the iptables-extensions(8) man page).
Example rules for IPSec policy:
.. code-block:: yaml
accept_esp_in:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- source: 10.20.0.0/24
- destination: 10.10.0.0/24
- in-interface: eth0
- match: policy
- dir: in
- pol: ipsec
- reqid: 1
- proto: esp
accept_esp_forward_in:
iptables.append:
- use:
- iptables: accept_esp_in
- chain: FORWARD
accept_esp_out:
iptables.append:
- table: filter
- chain: OUTPUT
- jump: ACCEPT
- source: 10.10.0.0/24
- destination: 10.20.0.0/24
- out-interface: eth0
- match: policy
- dir: out
- pol: ipsec
- reqid: 1
- proto: esp
accept_esp_forward_out:
iptables.append:
- use:
- iptables: accept_esp_out
- chain: FORWARD
.. note::
Various functions of the ``iptables`` module use the ``--check`` option. If

View file

@ -216,6 +216,13 @@ def managed(name,
Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw
result after the template was rendered.
.. note::
This argument cannot be used directly on the command line. Instead,
it can be passed through the ``pillar`` variable when executing one
of the :ref:`salt.modules.state.sls` or :ref:`salt.modules.state.apply`
functions (see an example below).
replace: False
Load and replace the configuration. Default: ``False`` (will apply load merge).
@ -266,7 +273,7 @@ def managed(name,
$ sudo salt 'juniper.device' state.sls router.config test=True
$ sudo salt -N all-routers state.sls router.config debug=True
$ sudo salt -N all-routers state.sls router.config pillar="{'debug': True}"
``router.config`` depends on the location of the SLS file (see above). Running this command, will be executed all
five steps from above. These examples above are not meant to be used in a production environment, their sole purpose
@ -334,11 +341,11 @@ def managed(name,
# the user can override the flags the equivalent CLI args
# which have higher precedence
test = __opts__.get('test', test)
debug = __opts__.get('debug', debug)
commit = __opts__.get('commit', commit)
replace = __opts__.get('replace', replace) # this might be a bit risky
skip_verify = __opts__.get('skip_verify', skip_verify)
test = __salt__['config.merge']('test', test)
debug = __salt__['config.merge']('debug', debug)
commit = __salt__['config.merge']('commit', commit)
replace = __salt__['config.merge']('replace', replace) # this might be a bit risky
skip_verify = __salt__['config.merge']('skip_verify', skip_verify)
config_update_ret = _update_config(template_name,
template_source=template_source,

View file

@ -760,6 +760,14 @@ def installed(name,
ret['comment'] = out['comment']
return ret
# No packages to install.
if not target_pkgs:
ret['result'] = True
aicomms = '\n'.join(already_installed_comments)
last_line = 'All specified packages are already installed' + (' and up-to-date' if upgrade else '')
ret['comment'] = aicomms + ('\n' if aicomms else '') + last_line
return ret
# Construct the string that will get passed to the install call
pkgs_str = ','.join([state_name for _, state_name in target_pkgs])
@ -810,12 +818,7 @@ def installed(name,
no_cache_dir=no_cache_dir
)
# Check the retcode for success, but don't fail if using pip1 and the package is
# already present. Pip1 returns a retcode of 1 (instead of 0 for pip2) if you run
# "pip install" without any arguments. See issue #21845.
if pip_install_call and \
(pip_install_call.get('retcode', 1) == 0 or pip_install_call.get('stdout', '').startswith(
'You must give at least one requirement to install')):
if pip_install_call and pip_install_call.get('retcode', 1) == 0:
ret['result'] = True
if requirements or editable:
@ -823,6 +826,8 @@ def installed(name,
if requirements:
PIP_REQUIREMENTS_NOCHANGE = [
'Requirement already satisfied',
'Requirement already up-to-date',
'Requirement not upgraded',
'Collecting',
'Cloning',
'Cleaning up...',

View file

@ -130,11 +130,11 @@ class IPCServer(object):
else:
self.sock = tornado.netutil.bind_unix_socket(self.socket_path)
tornado.netutil.add_accept_handler(
self.sock,
self.handle_connection,
io_loop=self.io_loop,
)
with salt.utils.async.current_ioloop(self.io_loop):
tornado.netutil.add_accept_handler(
self.sock,
self.handle_connection,
)
self._started = True
@tornado.gen.coroutine
@ -197,10 +197,10 @@ class IPCServer(object):
log.trace('IPCServer: Handling connection '
'to address: {0}'.format(address))
try:
stream = IOStream(
connection,
io_loop=self.io_loop,
)
with salt.utils.async.current_ioloop(self.io_loop):
stream = IOStream(
connection,
)
self.io_loop.spawn_callback(self.handle_stream, stream)
except Exception as exc:
log.error('IPC streaming error: {0}'.format(exc))
@ -296,7 +296,7 @@ class IPCClient(object):
else:
if hasattr(self, '_connecting_future'):
# read previous future result to prevent the "unhandled future exception" error
self._connecting_future.exc_info() # pylint: disable=E0203
self._connecting_future.exception() # pylint: disable=E0203
future = tornado.concurrent.Future()
self._connecting_future = future
self._connect(timeout=timeout)
@ -330,10 +330,10 @@ class IPCClient(object):
break
if self.stream is None:
self.stream = IOStream(
socket.socket(sock_type, socket.SOCK_STREAM),
io_loop=self.io_loop,
)
with salt.utils.async.current_ioloop(self.io_loop):
self.stream = IOStream(
socket.socket(sock_type, socket.SOCK_STREAM),
)
try:
log.trace('IPCClient: Connecting to socket: {0}'.format(self.socket_path))
@ -511,11 +511,11 @@ class IPCMessagePublisher(object):
else:
self.sock = tornado.netutil.bind_unix_socket(self.socket_path)
tornado.netutil.add_accept_handler(
self.sock,
self.handle_connection,
io_loop=self.io_loop,
)
with salt.utils.async.current_ioloop(self.io_loop):
tornado.netutil.add_accept_handler(
self.sock,
self.handle_connection,
)
self._started = True
@tornado.gen.coroutine
@ -546,17 +546,14 @@ class IPCMessagePublisher(object):
def handle_connection(self, connection, address):
log.trace('IPCServer: Handling connection to address: {0}'.format(address))
try:
kwargs = {}
if self.opts['ipc_write_buffer'] > 0:
kwargs['max_write_buffer_size'] = self.opts['ipc_write_buffer']
log.trace('Setting IPC connection write buffer: {0}'.format((self.opts['ipc_write_buffer'])))
with salt.utils.async.current_ioloop(self.io_loop):
stream = IOStream(
connection,
io_loop=self.io_loop,
max_write_buffer_size=self.opts['ipc_write_buffer']
)
else:
stream = IOStream(
connection,
io_loop=self.io_loop
**kwargs
)
self.streams.add(stream)
@ -756,9 +753,9 @@ class IPCMessageSubscriber(IPCClient):
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
if self._read_sync_future is not None:
self._read_sync_future.exc_info()
self._read_sync_future.exception()
if self._read_stream_future is not None:
self._read_stream_future.exc_info()
self._read_stream_future.exception()
def __del__(self):
if IPCMessageSubscriber in globals():

View file

@ -32,6 +32,7 @@ import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
import salt.ext.six as six
from salt.ext.six.moves import queue # pylint: disable=import-error
from salt.exceptions import SaltReqTimeoutError, SaltClientError
from salt.transport import iter_transport_opts
@ -556,6 +557,11 @@ class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.tra
raise exc
self._socket.close()
self._socket = None
if hasattr(self.req_server, 'stop'):
try:
self.req_server.stop()
except Exception as exc:
log.exception('TCPReqServerChannel close generated an exception: %s', str(exc))
def __del__(self):
self.close()
@ -742,15 +748,23 @@ if USE_LOAD_BALANCER:
super(LoadBalancerWorker, self).__init__(
message_handler, *args, **kwargs)
self.socket_queue = socket_queue
self._stop = threading.Event()
self.thread = threading.Thread(target=self.socket_queue_thread)
self.thread.start()
t = threading.Thread(target=self.socket_queue_thread)
t.start()
def stop(self):
self._stop.set()
self.thread.join()
def socket_queue_thread(self):
try:
while True:
client_socket, address = self.socket_queue.get(True, None)
try:
client_socket, address = self.socket_queue.get(True, 1)
except queue.Empty:
if self._stop.is_set():
break
continue
# 'self.io_loop' initialized in super class
# 'tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
@ -764,10 +778,9 @@ class TCPClientKeepAlive(tornado.tcpclient.TCPClient):
'''
Override _create_stream() in TCPClient to enable keep alive support.
'''
def __init__(self, opts, resolver=None, io_loop=None):
def __init__(self, opts, resolver=None):
self.opts = opts
super(TCPClientKeepAlive, self).__init__(
resolver=resolver, io_loop=io_loop)
super(TCPClientKeepAlive, self).__init__(resolver=resolver)
def _create_stream(self, max_buffer_size, af, addr, **kwargs): # pylint: disable=unused-argument
'''
@ -783,7 +796,6 @@ class TCPClientKeepAlive(tornado.tcpclient.TCPClient):
_set_tcp_keepalive(sock, self.opts)
stream = tornado.iostream.IOStream(
sock,
io_loop=self.io_loop,
max_buffer_size=max_buffer_size)
return stream.connect(addr)
@ -842,8 +854,8 @@ class SaltMessageClient(object):
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
self._tcp_client = TCPClientKeepAlive(
opts, io_loop=self.io_loop, resolver=resolver)
with salt.utils.async.current_ioloop(self.io_loop):
self._tcp_client = TCPClientKeepAlive(opts, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
@ -874,7 +886,7 @@ class SaltMessageClient(object):
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exc_info()
self._read_until_future.exception()
if (not self._stream_return_future.done() and
self.io_loop != tornado.ioloop.IOLoop.current(
instance=False)):
@ -932,9 +944,10 @@ class SaltMessageClient(object):
if self._closing:
break
try:
self._stream = yield self._tcp_client.connect(self.host,
self.port,
ssl_options=self.opts.get('ssl'))
with salt.utils.async.current_ioloop(self.io_loop):
self._stream = yield self._tcp_client.connect(self.host,
self.port,
ssl_options=self.opts.get('ssl'))
self._connecting_future.set_result(True)
break
except Exception as e:
@ -1126,7 +1139,7 @@ class Subscriber(object):
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exc_info()
self._read_until_future.exception()
def __del__(self):
self.close()
@ -1137,7 +1150,8 @@ class PubServer(tornado.tcpserver.TCPServer, object):
TCP publisher
'''
def __init__(self, opts, io_loop=None):
super(PubServer, self).__init__(io_loop=io_loop, ssl_options=opts.get('ssl'))
super(PubServer, self).__init__(ssl_options=opts.get('ssl'))
self.io_loop = io_loop
self.opts = opts
self._closing = False
self.clients = set()

View file

@ -50,6 +50,7 @@ PYZMQ_VERSION = tuple(map(int, zmq.pyzmq_version().split('.')))
import tornado
import tornado.gen
import tornado.concurrent
TORNADO_50 = tornado.version_info >= (5,)
# Import third party libs
try:
@ -78,7 +79,8 @@ class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop')
if io_loop is None:
zmq.eventloop.ioloop.install()
if not TORNADO_50:
zmq.eventloop.ioloop.install()
io_loop = tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
@ -146,7 +148,8 @@ class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
self._io_loop = kwargs.get('io_loop')
if self._io_loop is None:
zmq.eventloop.ioloop.install()
if not TORNADO_50:
zmq.eventloop.ioloop.install()
self._io_loop = tornado.ioloop.IOLoop.current()
if self.crypt != 'clear':
@ -290,7 +293,8 @@ class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.t
self.io_loop = kwargs.get('io_loop')
if self.io_loop is None:
zmq.eventloop.ioloop.install()
if not TORNADO_50:
zmq.eventloop.ioloop.install()
self.io_loop = tornado.ioloop.IOLoop.current()
self.hexid = hashlib.sha1(six.b(self.opts['id'])).hexdigest()
@ -897,7 +901,8 @@ class AsyncReqMessageClient(object):
self.addr = addr
self.linger = linger
if io_loop is None:
zmq.eventloop.ioloop.install()
if not TORNADO_50:
zmq.eventloop.ioloop.install()
tornado.ioloop.IOLoop.current()
else:
self.io_loop = io_loop

View file

@ -13,12 +13,19 @@ try:
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
LOOP_CLASS = zmq.eventloop.ioloop.ZMQIOLoop
HAS_ZMQ = True
except ImportError:
LOOP_CLASS = tornado.ioloop.IOLoop
HAS_ZMQ = False
import tornado
TORNADO_50 = tornado.version_info >= (5,)
if HAS_ZMQ and not TORNADO_50:
LOOP_CLASS = zmq.eventloop.ioloop.ZMQIOLoop
else:
import tornado.ioloop
LOOP_CLASS = tornado.ioloop.IOLoop
import contextlib

View file

@ -889,6 +889,7 @@ class Schedule(object):
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
# runners do not provide retcode

View file

@ -54,13 +54,17 @@ import salt.log.setup
from salt.utils.odict import OrderedDict
# Define the pytest plugins we rely on
pytest_plugins = ['pytest_catchlog', 'tempdir', 'helpers_namespace'] # pylint: disable=invalid-name
pytest_plugins = ['tempdir', 'helpers_namespace'] # pylint: disable=invalid-name
# Define where not to collect tests from
collect_ignore = ['setup.py']
log = logging.getLogger('salt.testsuite')
# Reset logging root handlers
for handler in logging.root.handlers:
logging.root.removeHandler(handler)
def pytest_tempdir_basename():
'''
@ -196,25 +200,6 @@ def pytest_configure(config):
called after command line options have been parsed
and all plugins and initial conftest files been loaded.
'''
# Configure the console logger based on the catch_log settings.
# Most importantly, shutdown Salt's null, store and temporary logging queue handlers
catch_log = config.pluginmanager.getplugin('_catch_log')
cli_logging_handler = catch_log.log_cli_handler
# Add the pytest_catchlog CLI log handler to the logging root
logging.root.addHandler(cli_logging_handler)
cli_level = cli_logging_handler.level
cli_level = config._catchlog_log_cli_level
cli_format = cli_logging_handler.formatter._fmt
cli_date_format = cli_logging_handler.formatter.datefmt
# Setup the console logger which shuts down the null and the temporary queue handlers
salt.log.setup_console_logger(
log_level=salt.log.setup.LOG_VALUES_TO_LEVELS.get(cli_level, 'error'),
log_format=cli_format,
date_format=cli_date_format
)
# Disable the store logging queue handler
salt.log.setup.setup_extended_logging({'extension_modules': ''})
config.addinivalue_line('norecursedirs', os.path.join(CODE_DIR, 'templates'))
config.addinivalue_line(
'markers',

View file

@ -21,6 +21,7 @@ import logging
# Import salt libs
import salt.utils.event
import salt.utils.async
# Import 3rd-party libs
from tornado import gen
@ -69,11 +70,11 @@ class PyTestEngine(object):
self.sock.bind(('localhost', port))
# become a server socket
self.sock.listen(5)
netutil.add_accept_handler(
self.sock,
self.handle_connection,
io_loop=self.io_loop,
)
with salt.utils.async.current_ioloop(self.io_loop):
netutil.add_accept_handler(
self.sock,
self.handle_connection,
)
def handle_connection(self, connection, address):
log.warning('Accepted connection from %s. Role: %s', address, self.opts['__role'])

View file

@ -0,0 +1,110 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.helpers import destructiveTest
# Import Salt Libs
import salt.utils
@skipIf(not salt.utils.is_windows(), 'Tests for only Windows')
class FirewallTest(ModuleCase):
'''
Validate windows firewall module
'''
def _pre_firewall_status(self, pre_run):
post_run = self.run_function('firewall.get_config')
network = ['Domain', 'Public', 'Private']
# compare the status of the firewall before and after test
# and re-enable or disable depending on status before test run
for net in network:
if post_run[net] != pre_run[net]:
if pre_run[net]:
self.assertTrue(self.run_function('firewall.enable', profile=net))
else:
self.assertTrue(self.run_function('firewall.disable', profile=net))
@destructiveTest
def test_firewall_get_config(self):
'''
test firewall.get_config
'''
pre_run = self.run_function('firewall.get_config')
# ensure all networks are enabled then test status
self.assertTrue(self.run_function('firewall.enable', profile='allprofiles'))
ret = self.run_function('firewall.get_config')
network = ['Domain', 'Public', 'Private']
for net in network:
self.assertTrue(ret[net])
self._pre_firewall_status(pre_run)
@destructiveTest
def test_firewall_disable(self):
'''
test firewall.disable
'''
pre_run = self.run_function('firewall.get_config')
network = 'Private'
ret = self.run_function('firewall.get_config')[network]
if not ret:
self.assertTrue(self.run_function('firewall.enable', profile=network))
self.assertTrue(self.run_function('firewall.disable', profile=network))
ret = self.run_function('firewall.get_config')[network]
self.assertFalse(ret)
self._pre_firewall_status(pre_run)
@destructiveTest
def test_firewall_enable(self):
'''
test firewall.enable
'''
pre_run = self.run_function('firewall.get_config')
network = 'Private'
ret = self.run_function('firewall.get_config')[network]
if ret:
self.assertTrue(self.run_function('firewall.disable', profile=network))
self.assertTrue(self.run_function('firewall.enable', profile=network))
ret = self.run_function('firewall.get_config')[network]
self.assertTrue(ret)
self._pre_firewall_status(pre_run)
def test_firewall_get_rule(self):
'''
test firewall.get_rule
'''
rule = 'Remote Event Log Management (NP-In)'
ret = self.run_function('firewall.get_rule', [rule])
checks = ['Private', 'LocalPort', 'RemotePort']
for check in checks:
self.assertIn(check, ret[rule])
@destructiveTest
def test_firewall_add_delete_rule(self):
'''
test firewall.add_rule and delete_rule
'''
rule = 'test rule'
port = '8080'
# test adding firewall rule
add_rule = self.run_function('firewall.add_rule', [rule, port])
ret = self.run_function('firewall.get_rule', [rule])
self.assertIn(rule, ret[rule])
self.assertIn(port, ret[rule])
# test deleting firewall rule
self.assertTrue(self.run_function('firewall.delete_rule', [rule, port]))
ret = self.run_function('firewall.get_rule', [rule])
self.assertNotIn(rule, ret)
self.assertNotIn(port, ret)
self.assertIn('No rules match the specified criteria.', ret)

View file

@ -26,6 +26,7 @@ class NetworkTest(ModuleCase):
for out in exp_out:
self.assertIn(out, ret.lower())
@skipIf(salt.utils.is_darwin(), 'not supported on macosx')
def test_network_netstat(self):
'''
network.netstat

View file

@ -0,0 +1,30 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.helpers import destructiveTest
# Import Salt Libs
import salt.utils
@skipIf(not salt.utils.is_windows(), 'Tests for only Windows')
class NTPTest(ModuleCase):
'''
Validate windows ntp module
'''
@destructiveTest
def test_ntp_set_servers(self):
'''
test ntp get and set servers
'''
ntp_srv = 'pool.ntp.org'
set_srv = self.run_function('ntp.set_servers', [ntp_srv])
self.assertTrue(set_srv)
get_srv = self.run_function('ntp.get_servers')
self.assertEqual(ntp_srv, get_srv[0])

View file

@ -333,3 +333,65 @@ class SystemModuleTest(ModuleCase):
if self.run_function('grains.get', ['os_family']) == 'NILinuxRT':
self.assertTrue(self.run_function('system._has_settable_hwclock'))
self.assertTrue(self._hwclock_has_compare())
@skipIf(not salt.utils.is_windows(), 'These tests can only be run on windows')
class WinSystemModuleTest(ModuleCase):
'''
Validate the date/time functions in the win_system module
'''
def test_get_computer_name(self):
'''
Test getting the computer name
'''
ret = self.run_function('system.get_computer_name')
self.assertTrue(isinstance(ret, str))
import socket
name = socket.gethostname()
self.assertEqual(name, ret)
@destructiveTest
def test_set_computer_desc(self):
'''
Test setting the computer description
'''
desc = 'test description'
set_desc = self.run_function('system.set_computer_desc', [desc])
self.assertTrue(set_desc)
get_desc = self.run_function('system.get_computer_desc')
self.assertEqual(set_desc['Computer Description'], get_desc)
def test_get_system_time(self):
'''
Test getting the system time
'''
ret = self.run_function('system.get_system_time')
now = datetime.datetime.now()
self.assertEqual(now.strftime("%I:%M"), ret.rsplit(':', 1)[0])
@destructiveTest
def test_set_system_time(self):
'''
Test setting the system time
'''
test_time = '10:55'
set_time = self.run_function('system.set_system_time', [test_time + ' AM'])
get_time = self.run_function('system.get_system_time').rsplit(':', 1)[0]
self.assertEqual(get_time, test_time)
def test_get_system_date(self):
'''
Test getting system date
'''
ret = self.run_function('system.get_system_date')
date = datetime.datetime.now().date().strftime("%m/%d/%Y")
self.assertEqual(date, ret)
@destructiveTest
def test_set_system_date(self):
'''
Test setting system date
'''
self.assertTrue(self.run_function('system.set_system_date', ['3/25/2018']))

View file

@ -590,7 +590,7 @@ class PipStateTest(ModuleCase, SaltReturnAssertsMixin):
self.assertEqual(
ret[key]['comment'],
('Python package carbon < 1.3 was already installed\n'
'All packages were successfully installed'))
'All specified packages are already installed'))
break
else:
raise Exception('Expected state did not run')

View file

@ -121,7 +121,7 @@ class ShellTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
data = '\n'.join(data)
self.assertIn('minion', data)
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
arg_str = '-c {0} -t {1} {2}'.format(self.get_config_dir(), timeout, arg_str)
return self.run_script('salt', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=timeout)
def run_ssh(self, arg_str, with_retcode=False, timeout=25,

View file

@ -21,6 +21,7 @@ import logging
import os
import signal
import socket
import subprocess
import sys
import tempfile
import threading
@ -54,6 +55,31 @@ from tests.support.paths import FILES, TMP
log = logging.getLogger(__name__)
HAS_SYMLINKS = None
def no_symlinks():
'''
Check if git is installed and has symlinks enabled in the configuration.
'''
global HAS_SYMLINKS
if HAS_SYMLINKS is not None:
return not HAS_SYMLINKS
output = ''
try:
output = subprocess.check_output('git config --get core.symlinks', shell=True)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
except subprocess.CalledProcessError:
# git returned non-zero status
pass
HAS_SYMLINKS = False
if output.strip() == 'true':
HAS_SYMLINKS = True
return not HAS_SYMLINKS
def destructiveTest(caller):
'''
Mark a test case as a destructive test for example adding or removing users

View file

@ -220,6 +220,8 @@ class GitFSTest(TestCase, LoaderModuleMockMixin):
repo.index.add([x for x in os.listdir(self.tmp_repo_dir)
if x != '.git'])
repo.index.commit('Test')
if hasattr(repo, 'close'):
repo.close()
gitfs.update()
def tearDown(self):

View file

@ -696,3 +696,21 @@ PATCHLEVEL = 3
MagicMock(return_value=resolv_mock)):
get_dns = core.dns()
self.assertEqual(get_dns, ret)
def test_core_virtual(self):
'''
test virtual grain with cmd virt-what
'''
virt = 'kvm'
with patch.object(salt.utils, 'is_windows',
MagicMock(return_value=False)):
with patch.object(salt.utils, 'which',
MagicMock(return_value=True)):
with patch.dict(core.__salt__, {'cmd.run_all':
MagicMock(return_value={'pid': 78,
'retcode': 0,
'stderr': '',
'stdout': virt})}):
osdata = {'kernel': 'test', }
ret = core._virtual(osdata)
self.assertEqual(ret['virtual'], virt)

View file

@ -19,11 +19,10 @@
# Import Python Libs
from __future__ import absolute_import
import os
import errno
import subprocess
# Import Salt Testing Libs
from tests.support.unit import TestCase, skipIf
from tests.support.helpers import no_symlinks
from tests.support.mock import (
MagicMock,
patch,
@ -35,31 +34,6 @@ from tests.support.mock import (
from salt.modules.inspectlib.collector import Inspector
HAS_SYMLINKS = None
def no_symlinks():
'''
Check if git is installed and has symlinks enabled in the configuration.
'''
global HAS_SYMLINKS
if HAS_SYMLINKS is not None:
return not HAS_SYMLINKS
output = ''
try:
output = subprocess.check_output('git config --get core.symlinks', shell=True)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
except subprocess.CalledProcessError:
# git returned non-zero status
pass
HAS_SYMLINKS = False
if output.strip() == 'true':
HAS_SYMLINKS = True
return not HAS_SYMLINKS
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(no_symlinks(), "Git missing 'core.symlinks=true' config")
class InspectorCollectorTestCase(TestCase):

View file

@ -60,38 +60,38 @@ class IptablesTestCase(TestCase, LoaderModuleMockMixin):
self.assertEqual(iptables.build_rule(**{'if': 'not eth0'}),
'! -i eth0')
self.assertEqual(iptables.build_rule(**{'proto': 'tcp', 'syn': '!'}),
self.assertEqual(iptables.build_rule(**{'protocol': 'tcp', 'syn': '!'}),
'-p tcp ! --syn')
self.assertEqual(iptables.build_rule(dports=[80, 443], proto='tcp'),
self.assertEqual(iptables.build_rule(dports=[80, 443], protocol='tcp'),
'-p tcp -m multiport --dports 80,443')
self.assertEqual(iptables.build_rule(dports='80,443', proto='tcp'),
self.assertEqual(iptables.build_rule(dports='80,443', protocol='tcp'),
'-p tcp -m multiport --dports 80,443')
# Should it really behave this way?
self.assertEqual(iptables.build_rule(dports=['!80', 443],
proto='tcp'),
protocol='tcp'),
'-p tcp -m multiport ! --dports 80,443')
self.assertEqual(iptables.build_rule(dports='!80,443', proto='tcp'),
self.assertEqual(iptables.build_rule(dports='!80,443', protocol='tcp'),
'-p tcp -m multiport ! --dports 80,443')
self.assertEqual(iptables.build_rule(sports=[80, 443], proto='tcp'),
self.assertEqual(iptables.build_rule(sports=[80, 443], protocol='tcp'),
'-p tcp -m multiport --sports 80,443')
self.assertEqual(iptables.build_rule(sports='80,443', proto='tcp'),
self.assertEqual(iptables.build_rule(sports='80,443', protocol='tcp'),
'-p tcp -m multiport --sports 80,443')
self.assertEqual(iptables.build_rule('filter', 'INPUT', command='I',
position='3', full=True,
dports='proto', jump='ACCEPT'),
'Error: proto must be specified')
dports='protocol', jump='ACCEPT'),
'Error: protocol must be specified')
self.assertEqual(iptables.build_rule('filter', 'INPUT', command='I',
position='3', full=True,
sports='proto', jump='ACCEPT'),
'Error: proto must be specified')
sports='protocol', jump='ACCEPT'),
'Error: protocol must be specified')
self.assertEqual(iptables.build_rule('', 'INPUT', command='I',
position='3', full='True',

View file

@ -10,6 +10,7 @@ from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
# Import salt libs
import salt.ext.six
import salt.utils
import salt.modules.pip as pip
from salt.exceptions import CommandExecutionError
@ -300,7 +301,9 @@ class PipTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
if salt.utils.is_windows():
venv_path = 'c:\\test_env'
bin_path = os.path.join(venv_path, 'Scripts', 'pip.exe').encode('string-escape')
bin_path = os.path.join(venv_path, 'Scripts', 'pip.exe')
if salt.ext.six.PY2:
bin_path = bin_path.encode('string-escape')
else:
venv_path = '/test_env'
bin_path = os.path.join(venv_path, 'bin', 'pip')

View file

@ -93,18 +93,16 @@ class SSHAuthKeyTestCase(TestCase, LoaderModuleMockMixin):
comment_line = '# this is a comment \n'
# Write out the authorized key to a temporary file
if salt.utils.is_windows():
temp_file = tempfile.NamedTemporaryFile(delete=False)
else:
temp_file = tempfile.NamedTemporaryFile(delete=False, mode='w+')
# Add comment
temp_file.write(comment_line)
# Add empty line for #41335
temp_file.write(empty_line)
temp_file.write('{0} {1} {2} {3}'.format(options, enc, key, email))
temp_file = tempfile.NamedTemporaryFile(delete=False, mode='w+')
temp_file.close()
with salt.utils.fopen(temp_file.name, 'w') as _fh:
# Add comment
_fh.write(comment_line)
# Add empty line for #41335
_fh.write(empty_line)
_fh.write('{0} {1} {2} {3}'.format(options, enc, key, email))
with patch.dict(ssh.__salt__, {'user.info': MagicMock(return_value={})}):
with patch('salt.modules.ssh._get_config_file', MagicMock(return_value=temp_file.name)):
ssh._replace_auth_key('foo', key, config=temp_file.name)

View file

@ -520,6 +520,21 @@ class MysqlPillarTestCase(TestCase):
)
def test_301_process_results_with_lists(self):
'''
Validates the following results:
{'a': [
{'c': [
{'e': 1},
{'g': 2}
]
},
{'h': [
{'j': 3, 'k': 4}
]
}
]}
'''
return_data = mysql.MySQLExtPillar()
return_data.as_list = False
return_data.with_lists = [1, 3]
@ -529,22 +544,49 @@ class MysqlPillarTestCase(TestCase):
['a', 'b', 'c', 'f', 'g', 2],
['a', 'z', 'h', 'y', 'j', 3],
['a', 'z', 'h', 'y', 'k', 4]])
self.assertEqual(
{'a': [
{'c': [
{'e': 1},
{'g': 2}
]
},
{'h': [
{'j': 3, 'k': 4}
]
}
]},
return_data.result
)
assert 'a' in return_data.result
for x in return_data.result['a']:
if 'c' in x:
assert list(x.keys()) == ['c'], x.keys()
for y in x['c']:
if 'e' in y:
assert list(y.keys()) == ['e']
assert y['e'] == 1
elif 'g' in y:
assert list(y.keys()) == ['g']
assert y['g'] == 2
else:
raise ValueError("Unexpected value {0}".format(y))
elif 'h' in x:
assert len(x['h']) == 1
for y in x['h']:
if 'j' in y:
assert len(y.keys()) == 2
assert y['j'] == 3
elif 'h' in y:
assert len(y.keys()) == 2
assert y['k'] == 4
else:
raise ValueError("Unexpected value {0}".format(y))
else:
raise ValueError("Unexpected value {0}".format(x))
def test_302_process_results_with_lists_consecutive(self):
'''
Validates the following results:
{'a': [
[[
{'e': 1},
{'g': 2}
]
],
[[
{'j': 3, 'k': 4}
]
]
]}
'''
return_data = mysql.MySQLExtPillar()
return_data.as_list = False
return_data.with_lists = [1, 2, 3]
@ -554,17 +596,31 @@ class MysqlPillarTestCase(TestCase):
['a', 'b', 'c', 'f', 'g', 2],
['a', 'z', 'h', 'y', 'j', 3],
['a', 'z', 'h', 'y', 'k', 4]])
self.assertEqual(
{'a': [
[[
{'e': 1},
{'g': 2}
]
],
[[
{'j': 3, 'k': 4}
]
]
]},
return_data.result
)
assert 'a' in return_data.result
for x in return_data.result['a']:
assert len(x) == 1
if len(x[0][0]) == 1:
for y in x[0]:
if 'e' in y:
assert list(y.keys()) == ['e']
assert y['e'] == 1
elif 'g' in y:
assert list(y.keys()) == ['g']
assert y['g'] == 2
else:
raise ValueError("Unexpected value {0}".format(y))
elif len(x[0][0]) == 2:
for y in x[0]:
if 'j' in y:
assert len(y.keys()) == 2
assert y['j'] == 3
elif 'k' in y:
assert len(y.keys()) == 2
assert y['k'] == 4
else:
raise ValueError(
"Unexpected value {0}".format(len(x[0][0]))
)
else:
raise ValueError("Unexpected value {0}".format(x))

View file

@ -207,7 +207,7 @@ class PipStateTest(TestCase, SaltReturnAssertsMixin, LoaderModuleMockMixin):
)
self.assertSaltTrueReturn({'test': ret})
self.assertInSaltComment(
'successfully installed',
'packages are already installed',
{'test': ret}
)
@ -241,7 +241,7 @@ class PipStateTest(TestCase, SaltReturnAssertsMixin, LoaderModuleMockMixin):
)
self.assertSaltTrueReturn({'test': ret})
self.assertInSaltComment(
'were successfully installed',
'packages are already installed',
{'test': ret}
)
@ -264,7 +264,7 @@ class PipStateTest(TestCase, SaltReturnAssertsMixin, LoaderModuleMockMixin):
)
self.assertSaltTrueReturn({'test': ret})
self.assertInSaltComment(
'were successfully installed',
'packages are already installed',
{'test': ret}
)

View file

@ -11,6 +11,7 @@ import os
# Import Salt Testing libs
from tests.support.unit import TestCase, skipIf
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.helpers import skip_if_not_root
# Import salt libs
from salt import minion
@ -24,7 +25,7 @@ __opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MinionTestCase(TestCase, tornado.testing.AsyncTestCase):
class MinionTestCase(TestCase, tornado.testing.AsyncTestCase, AdaptedConfigurationTestCaseMixin):
def test_invalid_master_address(self):
with patch.dict(__opts__, {'ipv6': False, 'master': float('127.0'), 'master_port': '4555', 'retry_dns': False}):
self.assertRaises(SaltSystemExit, minion.resolve_dns, __opts__)
@ -145,7 +146,7 @@ class MinionTestCase(TestCase, tornado.testing.AsyncTestCase):
patch('salt.minion.Minion.sync_connect_master', MagicMock(side_effect=RuntimeError('stop execution'))), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True)), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True)):
mock_opts = copy.copy(salt.config.DEFAULT_MINION_OPTS)
mock_opts = self.get_config('minion', from_scratch=True)
mock_opts['beacons_before_connect'] = True
try:
minion = salt.minion.Minion(mock_opts, io_loop=tornado.ioloop.IOLoop())
@ -169,7 +170,7 @@ class MinionTestCase(TestCase, tornado.testing.AsyncTestCase):
patch('salt.minion.Minion.sync_connect_master', MagicMock(side_effect=RuntimeError('stop execution'))), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True)), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True)):
mock_opts = copy.copy(salt.config.DEFAULT_MINION_OPTS)
mock_opts = self.get_config('minion', from_scratch=True)
mock_opts['scheduler_before_connect'] = True
try:
minion = salt.minion.Minion(mock_opts, io_loop=tornado.ioloop.IOLoop())

View file

@ -8,8 +8,9 @@ from shutil import rmtree
from tempfile import mkdtemp
# Import Salt Testing libs
from tests.support.unit import TestCase
from tests.support.unit import TestCase, skipIf
from tests.support.paths import TMP
from tests.support.helpers import no_symlinks
# Import salt libs
import salt.utils
@ -18,6 +19,7 @@ import salt.utils.find
class TestUtils(TestCase):
@skipIf(no_symlinks(), "Git missing 'core.symlinks=true' config")
def test_safe_walk_symlink_recursion(self):
tmp = mkdtemp(dir=TMP)
try:

View file

@ -10,18 +10,21 @@ integration.modules.test_config
integration.modules.test_cp
integration.modules.test_data
integration.modules.test_disk
integration.modules.test_firewall
integration.modules.test_git
integration.modules.test_grains
integration.modules.test_groupadd
integration.modules.test_hosts
integration.modules.test_mine
integration.modules.test_network
integration.modules.test_ntp
integration.modules.test_pillar
integration.modules.test_pkg
integration.modules.test_publish
integration.modules.test_state
integration.modules.test_status
integration.modules.test_sysmod
integration.modules.test_system
integration.modules.test_test
integration.modules.test_useradd
integration.reactor.test_reactor