Merge branch 'develop' into pypsexec

This commit is contained in:
Daniel Wozniak 2018-05-04 09:57:44 -07:00 committed by GitHub
commit 439806b2bb
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
88 changed files with 3735 additions and 1416 deletions

32
.github/CODEOWNERS vendored
View file

@ -9,43 +9,45 @@
# See https://help.github.com/articles/about-codeowners/
# for more info about the CODEOWNERS file
# This file uses an fnmatch-style matching pattern.
# Team Boto
salt/**/*boto* @saltstack/team-boto
# Team Core
salt/auth/ @saltstack/team-core
salt/cache/ @saltstack/team-core
salt/cli/ @saltstack/team-core
salt/auth/* @saltstack/team-core
salt/cache/* @saltstack/team-core
salt/cli/* @saltstack/team-core
salt/client/* @saltstack/team-core
salt/config/* @saltstack/team-core
salt/daemons/ @saltstack/team-core
salt/pillar/ @saltstack/team-core
salt/daemons/* @saltstack/team-core
salt/pillar/* @saltstack/team-core
salt/loader.py @saltstack/team-core
salt/payload.py @saltstack/team-core
salt/**/master* @saltstack/team-core
salt/**/minion* @saltstack/team-core
# Team Cloud
salt/cloud/ @saltstack/team-cloud
salt/utils/openstack/ @saltstack/team-cloud
salt/cloud/* @saltstack/team-cloud
salt/utils/openstack/* @saltstack/team-cloud
salt/utils/aws.py @saltstack/team-cloud
salt/**/*cloud* @saltstack/team-cloud
# Team NetAPI
salt/cli/api.py @saltstack/team-netapi
salt/client/netapi.py @saltstack/team-netapi
salt/netapi/ @saltstack/team-netapi
salt/netapi/* @saltstack/team-netapi
# Team Network
salt/proxy/ @saltstack/team-proxy
salt/proxy/* @saltstack/team-proxy
# Team SPM
salt/cli/spm.py @saltstack/team-spm
salt/spm/ @saltstack/team-spm
salt/spm/* @saltstack/team-spm
# Team SSH
salt/cli/ssh.py @saltstack/team-ssh
salt/client/ssh/ @saltstack/team-ssh
salt/client/ssh/* @saltstack/team-ssh
salt/runners/ssh.py @saltstack/team-ssh
salt/**/thin.py @saltstack/team-ssh
@ -61,8 +63,12 @@ salt/**/*xfs* @saltstack/team-suse
salt/**/*zypper* @saltstack/team-suse
# Team Transport
salt/transport/ @saltstack/team-transport
salt/transport/* @saltstack/team-transport
salt/utils/zeromq.py @saltstack/team-transport
# Team Windows
salt/**/*win* @saltstack/team-windows
salt/*/*win* @saltstack/team-windows
salt/modules/reg.py @saltstack/team-windows
salt/states/reg.py @saltstack/team-windows
tests/*/*win* @saltstack/team-windows
tests/*/test_reg.py @saltstack/team-windows

4
.github/stale.yml vendored
View file

@ -1,8 +1,8 @@
# Probot Stale configuration file
# Number of days of inactivity before an issue becomes stale
# 730 is approximately 2 years
daysUntilStale: 730
# 720 is approximately 2 years
daysUntilStale: 720
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 7

View file

@ -39,7 +39,7 @@ provisioner:
max_retries: 2
remote_states:
name: git://github.com/saltstack/salt-jenkins.git
branch: 2018.3
branch: master
repo: git
testingdir: /testing
salt_copy_filter:

View file

@ -323,6 +323,7 @@ rst_prolog = """\
.. _`salt-users`: https://groups.google.com/forum/#!forum/salt-users
.. _`salt-announce`: https://groups.google.com/forum/#!forum/salt-announce
.. _`salt-packagers`: https://groups.google.com/forum/#!forum/salt-packagers
.. _`salt-slack`: https://saltstackcommunity.herokuapp.com/
.. |windownload| raw:: html
<p>Python2 x86: <a

View file

@ -248,6 +248,11 @@ subnet
Optional. The subnet inside the virtual network that the VM will be spun up in.
Default is ``default``.
allocate_public_ip
------------------
Optional. Default is ``False``. If set to ``True``, a public IP will
be created and assigned to the VM.
load_balancer
-------------
Optional. The load-balancer for the VM's network interface to join. If

View file

@ -238,7 +238,7 @@ presence of the instance will be managed statefully.
my-instance-name:
cloud.present:
- provider: my-ec2-config
- cloud_provider: my-ec2-config
- image: ami-1624987f
- size: 't1.micro'
- ssh_username: ec2-user

View file

@ -60,7 +60,7 @@ Fork a Repo Guide_>`_ and is well worth reading.
isolated into separate branches.
If you're working on a bug or documentation fix, create your branch from
the oldest release branch that contains the bug or requires the documentation
the oldest **supported** main release branch that contains the bug or requires the documentation
update. See :ref:`Which Salt Branch? <which-salt-branch>`.
.. code-block:: bash
@ -212,8 +212,11 @@ There are three different kinds of branches in use: develop, main release
branches, and dot release branches.
- All feature work should go into the ``develop`` branch.
- Bug fixes and documentation changes should go into the oldest supported
**main** release branch affected by the the bug or documentation change.
- Bug fixes and documentation changes should go into the oldest **supported
main** release branch affected by the the bug or documentation change (you
can use the blame button in github to figure out when the bug was introduced).
Supported releases are the last 2 releases. For example, if the latest release
is 2018.3, the last two release are 2018.3 and 2017.7.
Main release branches are named after a year and month, such as
``2016.11`` and ``2017.7``.
- Hot fixes, as determined by SaltStack's release team, should be submitted
@ -247,7 +250,7 @@ Main Release Branches
=====================
The current release branch is the most recent stable release. Pull requests
containing bug fixes or documentation changes should be made against the main
containing bug fixes or documentation changes should be made against the oldest supported main
release branch that is affected.
The branch name will be a date-based name such as ``2016.11``.

View file

@ -221,8 +221,9 @@ The best way to create new Formula repositories for now is to create a
repository in your own account on GitHub and notify a SaltStack employee when
it is ready. We will add you to the Contributors team on the
`saltstack-formulas`_ organization and help you transfer the repository over.
Ping a SaltStack employee on IRC (``#salt`` on Freenode) or send an email to
the `salt-users`_ mailing list.
Ping a SaltStack employee on IRC (``#salt`` on Freenode), join the
``#formulas`` channel on the `salt-slack`_ or send an email to the
`salt-users`_ mailing list.
There are a lot of repositories in that organization! Team members can manage
which repositories they are subscribed to on GitHub's watching page:

View file

@ -20,6 +20,9 @@ Statistics:
Changes:
This release includes a CVE Fix:
CVE-2017-7893: Compromised salt-minions can impersonate the salt-master. (Discovery credit: Frank Spierings)
- **PR** `#39855`_: (*Foxlik*) Use regular expression instead of split when replacing authorized_keys
@ *2017-03-22T18:28:32Z*

View file

@ -0,0 +1,14 @@
===========================
Salt 2018.3.1 Release Notes
===========================
Version 2018.3.1 is a bugfix release for :ref:`2018.3.0 <release-2018-3-0>`.
Changes to Slack Engine pillars
-------------------------------
When using ``groups_pillar_name`` for the slack engine, the engine should be
used as part of a salt-minion process running on the master. This will allow
the minion to have pillars assigned to it, and will still allow the engine to
create a LocalClient connection to the master ipc sockets to control
environments.

View file

@ -434,7 +434,7 @@ The ``vault`` utils module had the following changes:
Please see the :mod:`vault execution module <salt.modules.vault>` documentation for
details on the new configuration schema.
=======
=====================
SaltSSH major updates
=====================
@ -471,6 +471,7 @@ a minimal tarball using runners and include that. But this is only possible, whe
Salt version is also available on the Master machine, although does not need to be directly
installed together with the older Python interpreter.
========================
Salt-Cloud major updates
========================
@ -486,3 +487,23 @@ been deprecated in favor of ``pypsexec``.
Salt-Cloud has deprecated the use ``impacket`` in favor of ``smbprotocol``.
This changes was made because ``impacket`` is not compatible with Python 3.
====================
State Module Changes
====================
states.saltmod
--------------
The 'test' option now defaults to None. A value of True or False set here is
passed to the state being run and can be used to override a ``test:True`` option
set in the minion's config file. In previous releases the minion's config option
would take precedence and it would be impossible to run an orchestration on a
minion with test mode set to True in the config file.
If a minion is not in permanent test mode due to the config file and the 'test'
argument here is left as None then a value of ``test=True`` on the command-line is
passed correctly to the minion to run an orchestration in test mode. At present
it is not possible to pass ``test=False`` on the command-line to override a
minion in permanent test mode and so the ``test:False`` option must still be set
in the orchestration file.

View file

@ -118,15 +118,15 @@ xcopy /Q /Y "%SrcDir%\conf\minion" "%CnfDir%\"
:: Make sure the "prereq" directory exists
If NOT Exist "%PreDir%" mkdir "%PreDir%"
:: Set the location of the nssm to download
Set Url64="https://repo.saltstack.com/windows/dependencies/64/nssm-2.24-101-g897c7ad.exe"
Set Url32="https://repo.saltstack.com/windows/dependencies/32/nssm-2.24-101-g897c7ad.exe"
:: Set the location of the ssm to download
Set Url64="https://repo.saltstack.com/windows/dependencies/64/ssm-2.24-103-gdee49fc.exe"
Set Url32="https://repo.saltstack.com/windows/dependencies/32/ssm-2.24-103-gdee49fc.exe"
:: Check for 64 bit by finding the Program Files (x86) directory
If Defined ProgramFiles(x86) (
powershell -ExecutionPolicy RemoteSigned -File download_url_file.ps1 -url "%Url64%" -file "%BldDir%\nssm.exe"
powershell -ExecutionPolicy RemoteSigned -File download_url_file.ps1 -url "%Url64%" -file "%BinDir%\ssm.exe"
) Else (
powershell -ExecutionPolicy RemoteSigned -File download_url_file.ps1 -url "%Url32%" -file "%BldDir%\nssm.exe"
powershell -ExecutionPolicy RemoteSigned -File download_url_file.ps1 -url "%Url32%" -file "%BinDir%\ssm.exe"
)
@echo.

View file

@ -658,11 +658,11 @@ Section -Post
WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "Path" "$INSTDIR\bin\"
# Register the Salt-Minion Service
nsExec::Exec "nssm.exe install salt-minion $INSTDIR\bin\python.exe -E -s $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet"
nsExec::Exec "nssm.exe set salt-minion Description Salt Minion from saltstack.com"
nsExec::Exec "nssm.exe set salt-minion Start SERVICE_AUTO_START"
nsExec::Exec "nssm.exe set salt-minion AppStopMethodConsole 24000"
nsExec::Exec "nssm.exe set salt-minion AppStopMethodWindow 2000"
nsExec::Exec "$INSTDIR\bin\ssm.exe install salt-minion $INSTDIR\bin\python.exe -E -s $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet"
nsExec::Exec "$INSTDIR\bin\ssm.exe set salt-minion Description Salt Minion from saltstack.com"
nsExec::Exec "$INSTDIR\bin\ssm.exe set salt-minion Start SERVICE_AUTO_START"
nsExec::Exec "$INSTDIR\bin\ssm.exe set salt-minion AppStopMethodConsole 24000"
nsExec::Exec "$INSTDIR\bin\ssm.exe set salt-minion AppStopMethodWindow 2000"
${IfNot} $ConfigType_State == "Existing Config" # If not using Existing Config
Call updateMinionConfig
@ -680,7 +680,7 @@ Function .onInstSuccess
# If StartMinionDelayed is 1, then set the service to start delayed
${If} $StartMinionDelayed == 1
nsExec::Exec "nssm.exe set salt-minion Start SERVICE_DELAYED_AUTO_START"
nsExec::Exec "$INSTDIR\bin\ssm.exe set salt-minion Start SERVICE_DELAYED_AUTO_START"
${EndIf}
# If start-minion is 1, then start the service

View file

@ -1,4 +1,4 @@
-r base.txt
-r base-py2.txt
mock>=2.0.0
apache-libcloud>=0.14.0
@ -6,7 +6,7 @@ boto>=2.32.1
boto3>=1.2.1
moto>=0.3.6
SaltPyLint>=v2017.3.6
pytest
pytest>=3.5.0
git+https://github.com/eisensheng/pytest-catchlog.git@develop#egg=Pytest-catchlog
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt
testinfra>=1.7.0

View file

@ -1,4 +1,4 @@
-r base.txt
-r base-py3.txt
mock>=2.0.0
apache-libcloud>=0.14.0
@ -11,7 +11,7 @@ moto>=0.3.6
# prevent it from being successfully installed (at least on Python 3.4).
httpretty
SaltPyLint>=v2017.2.29
pytest
pytest>=3.5.0
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt
git+https://github.com/eisensheng/pytest-catchlog.git@develop#egg=Pytest-catchlog
testinfra>=1.7.0

View file

@ -1,3 +1,3 @@
pytest
pytest>=3.5.0
pytest-helpers-namespace
pytest-tempdir

View file

@ -74,7 +74,7 @@ cluster.startup_nodes:
A list of host, port dictionaries pointing to cluster members. At least one is required
but multiple nodes are better
.. code-block::yaml
.. code-block:: yaml
cache.redis.cluster.startup_nodes
- host: redis-member-1
@ -100,9 +100,15 @@ db: ``'0'``
password:
Redis connection password.
unix_socket_path:
.. versionadded:: 2018.3.1
Path to a UNIX socket for access. Overrides `host` / `port`.
Configuration Example:
.. code-block::yaml
.. code-block:: yaml
cache.redis.host: localhost
cache.redis.port: 6379
@ -115,7 +121,7 @@ Configuration Example:
Cluster Configuration Example:
.. code-block::yaml
.. code-block:: yaml
cache.redis.cluster_mode: true
cache.redis.cluster.skip_full_coverage_check: true
@ -205,6 +211,7 @@ def _get_redis_cache_opts():
return {
'host': __opts__.get('cache.redis.host', 'localhost'),
'port': __opts__.get('cache.redis.port', 6379),
'unix_socket_path': __opts__.get('cache.redis.unix_socket_path', None),
'db': __opts__.get('cache.redis.db', '0'),
'password': __opts__.get('cache.redis.password', ''),
'cluster_mode': __opts__.get('cache.redis.cluster_mode', False),
@ -231,6 +238,7 @@ def _get_redis_server(opts=None):
else:
REDIS_SERVER = redis.StrictRedis(opts['host'],
opts['port'],
unix_socket_path=opts['unix_socket_path'],
db=opts['db'],
password=opts['password'])
return REDIS_SERVER

View file

@ -9,7 +9,7 @@
#
# BUGS: https://github.com/saltstack/salt-bootstrap/issues
#
# COPYRIGHT: (c) 2012-2017 by the SaltStack Team, see AUTHORS.rst for more
# COPYRIGHT: (c) 2012-2018 by the SaltStack Team, see AUTHORS.rst for more
# details.
#
# LICENSE: Apache 2.0
@ -18,7 +18,7 @@
#======================================================================================================================
set -o nounset # Treat unset variables as an error
__ScriptVersion="2017.12.13"
__ScriptVersion="2018.04.25"
__ScriptName="bootstrap-salt.sh"
__ScriptFullName="$0"
@ -249,7 +249,6 @@ _CURL_ARGS=${BS_CURL_ARGS:-}
_FETCH_ARGS=${BS_FETCH_ARGS:-}
_GPG_ARGS=${BS_GPG_ARGS:-}
_WGET_ARGS=${BS_WGET_ARGS:-}
_ENABLE_EXTERNAL_ZMQ_REPOS=${BS_ENABLE_EXTERNAL_ZMQ_REPOS:-$BS_FALSE}
_SALT_MASTER_ADDRESS=${BS_SALT_MASTER_ADDRESS:-null}
_SALT_MINION_ID="null"
# _SIMPLIFY_VERSION is mostly used in Solaris based distributions
@ -299,13 +298,13 @@ __usage() {
Examples:
- ${__ScriptName}
- ${__ScriptName} stable
- ${__ScriptName} stable 2016.3
- ${__ScriptName} stable 2016.3.1
- ${__ScriptName} stable 2017.7
- ${__ScriptName} stable 2017.7.2
- ${__ScriptName} daily
- ${__ScriptName} testing
- ${__ScriptName} git
- ${__ScriptName} git 2016.3
- ${__ScriptName} git v2016.3.1
- ${__ScriptName} git 2017.7
- ${__ScriptName} git v2017.7.2
- ${__ScriptName} git 06f249901a2e2f1ed310d58ea3921a129f214358
Options:
@ -355,8 +354,6 @@ __usage() {
per -p flag. You're responsible for providing the proper package name.
-H Use the specified HTTP proxy for all download URLs (including https://).
For example: http://myproxy.example.com:3128
-Z Enable additional package repository for newer ZeroMQ
(only available for RHEL/CentOS/Fedora/Ubuntu based distributions)
-b Assume that dependencies are already installed and software sources are
set up. If git is selected, git tree is still checked out as dependency
step.
@ -395,7 +392,7 @@ __usage() {
tested with Centos 6 and is considered experimental. This will install the
ius repo on the box if disable repo is false. This must be used in conjunction
with -x <pythonversion>. For example:
sh bootstrap.sh -P -y -x python2.7 git v2016.11.3
sh bootstrap.sh -P -y -x python2.7 git v2017.7.2
The above will install python27 and install the git version of salt using the
python2.7 executable. This only works for git and pip installations.
@ -438,7 +435,6 @@ do
p ) _EXTRA_PACKAGES="$_EXTRA_PACKAGES $OPTARG" ;;
d ) _DISABLE_SALT_CHECKS=$BS_TRUE ;;
H ) _HTTP_PROXY="$OPTARG" ;;
Z ) _ENABLE_EXTERNAL_ZMQ_REPOS=$BS_TRUE ;;
b ) _NO_DEPS=$BS_TRUE ;;
f ) _FORCE_SHALLOW_CLONE=$BS_TRUE ;;
l ) _DISABLE_SSL=$BS_TRUE ;;
@ -593,14 +589,14 @@ elif [ "$ITYPE" = "stable" ]; then
if [ "$#" -eq 0 ];then
STABLE_REV="latest"
else
if [ "$(echo "$1" | egrep '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7)$')" != "" ]; then
if [ "$(echo "$1" | egrep '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7|2018\.3)$')" != "" ]; then
STABLE_REV="$1"
shift
elif [ "$(echo "$1" | egrep '^([0-9]*\.[0-9]*\.[0-9]*)$')" != "" ]; then
STABLE_REV="archive/$1"
shift
else
echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, latest, \$MAJOR.\$MINOR.\$PATCH)"
echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, 2018.3, latest, \$MAJOR.\$MINOR.\$PATCH)"
exit 1
fi
fi
@ -1331,10 +1327,10 @@ __check_dpkg_architecture() {
if [ "${error_msg}" != "" ]; then
echoerror "${error_msg}"
if [ "$ITYPE" != "git" ]; then
echoerror "You can try git installation mode, i.e.: sh ${__ScriptName} git v2016.11.5."
echoerror "You can try git installation mode, i.e.: sh ${__ScriptName} git v2017.7.2."
echoerror "It may be necessary to use git installation mode with pip and disable the SaltStack apt repository."
echoerror "For example:"
echoerror " sh ${__ScriptName} -r -P git v2016.11.5"
echoerror " sh ${__ScriptName} -r -P git v2017.7.2"
fi
fi
@ -1372,16 +1368,10 @@ __ubuntu_codename_translation() {
DISTRO_CODENAME="trusty"
;;
"16")
if [ "$_april" ]; then
DISTRO_CODENAME="xenial"
else
DISTRO_CODENAME="yakkety"
fi
DISTRO_CODENAME="xenial"
;;
"17")
if [ "$_april" ]; then
DISTRO_CODENAME="zesty"
fi
DISTRO_CODENAME="artful"
;;
*)
DISTRO_CODENAME="trusty"
@ -1500,9 +1490,12 @@ __check_end_of_life_versions() {
# < 14.04
# = 14.10
# = 15.04, 15.10
# = 16.10
# = 17.04
if [ "$DISTRO_MAJOR_VERSION" -lt 14 ] || \
[ "$DISTRO_MAJOR_VERSION" -eq 15 ] || \
([ "$DISTRO_MAJOR_VERSION" -lt 16 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]); then
([ "$DISTRO_MAJOR_VERSION" -eq 17 ] && [ "$DISTRO_MINOR_VERSION" -eq 04 ]) || \
([ "$DISTRO_MAJOR_VERSION" -lt 17 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]); then
echoerror "End of life distributions are not supported."
echoerror "Please consider upgrading to the next stable. See:"
echoerror " https://wiki.ubuntu.com/Releases"
@ -1544,8 +1537,8 @@ __check_end_of_life_versions() {
;;
fedora)
# Fedora lower than 25 are no longer supported
if [ "$DISTRO_MAJOR_VERSION" -lt 25 ]; then
# Fedora lower than 26 are no longer supported
if [ "$DISTRO_MAJOR_VERSION" -lt 26 ]; then
echoerror "End of life distributions are not supported."
echoerror "Please consider upgrading to the next stable. See:"
echoerror " https://fedoraproject.org/wiki/Releases"
@ -1765,12 +1758,41 @@ __function_defined() {
}
#--- FUNCTION -------------------------------------------------------------------------------------------------------
# NAME: __wait_for_apt
# DESCRIPTION: Check if any apt, apt-get, aptitude, or dpkg processes are running before
# calling these again. This is useful when these process calls are part of
# a boot process, such as on AWS AMIs. This func will wait until the boot
# process is finished so the script doesn't exit on a locked proc.
#----------------------------------------------------------------------------------------------------------------------
__wait_for_apt(){
echodebug "Checking if apt process is currently running."
# Timeout set at 15 minutes
WAIT_TIMEOUT=900
while ps -C apt,apt-get,aptitude,dpkg >/dev/null; do
sleep 1
WAIT_TIMEOUT=$((WAIT_TIMEOUT - 1))
# If timeout reaches 0, abort.
if [ "$WAIT_TIMEOUT" -eq 0 ]; then
echoerror "Apt, apt-get, aptitude, or dpkg process is taking too long."
echoerror "Bootstrap script cannot proceed. Aborting."
return 1
fi
done
echodebug "No apt processes are currently running."
}
#--- FUNCTION -------------------------------------------------------------------------------------------------------
# NAME: __apt_get_install_noinput
# DESCRIPTION: (DRY) apt-get install with noinput options
# PARAMETERS: packages
#----------------------------------------------------------------------------------------------------------------------
__apt_get_install_noinput() {
__wait_for_apt
apt-get install -y -o DPkg::Options::=--force-confold "${@}"; return $?
} # ---------- end of function __apt_get_install_noinput ----------
@ -1780,6 +1802,7 @@ __apt_get_install_noinput() {
# DESCRIPTION: (DRY) apt-get upgrade with noinput options
#----------------------------------------------------------------------------------------------------------------------
__apt_get_upgrade_noinput() {
__wait_for_apt
apt-get upgrade -y -o DPkg::Options::=--force-confold; return $?
} # ---------- end of function __apt_get_upgrade_noinput ----------
@ -1790,6 +1813,7 @@ __apt_get_upgrade_noinput() {
# PARAMETERS: url
#----------------------------------------------------------------------------------------------------------------------
__apt_key_fetch() {
__wait_for_apt
url=$1
# shellcheck disable=SC2086
@ -2544,7 +2568,7 @@ __enable_universe_repository() {
__install_saltstack_ubuntu_repository() {
# Workaround for latest non-LTS ubuntu
if [ "$DISTRO_VERSION" = "16.10" ] || [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then
if [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then
echowarn "Non-LTS Ubuntu detected, but stable packages requested. Trying packages from latest LTS release. You may experience problems."
UBUNTU_VERSION=16.04
UBUNTU_CODENAME="xenial"
@ -2556,8 +2580,8 @@ __install_saltstack_ubuntu_repository() {
__PACKAGES=''
# Install downloader backend for GPG keys fetching
if [ "$DISTRO_VERSION" = "16.10" ] || [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then
__PACKAGES="${__PACKAGES} gnupg2 dirmngr"
if [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then
__PACKAGES="${__PACKAGES} gnupg dirmngr"
else
__PACKAGES="${__PACKAGES} gnupg-curl"
fi
@ -2576,6 +2600,7 @@ __install_saltstack_ubuntu_repository() {
__apt_key_fetch "$SALTSTACK_UBUNTU_URL/SALTSTACK-GPG-KEY.pub" || return 1
__wait_for_apt
apt-get update
}
@ -2588,6 +2613,7 @@ install_ubuntu_deps() {
__enable_universe_repository || return 1
__wait_for_apt
apt-get update
fi
@ -2644,6 +2670,7 @@ install_ubuntu_stable_deps() {
# No user interaction, libc6 restart services for example
export DEBIAN_FRONTEND=noninteractive
__wait_for_apt
apt-get update
if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then
@ -2664,6 +2691,7 @@ install_ubuntu_stable_deps() {
}
install_ubuntu_daily_deps() {
__wait_for_apt
install_ubuntu_stable_deps || return 1
if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then
@ -2681,6 +2709,7 @@ install_ubuntu_daily_deps() {
}
install_ubuntu_git_deps() {
__wait_for_apt
apt-get update
if ! __check_command_exists git; then
@ -2711,8 +2740,8 @@ install_ubuntu_git_deps() {
else
install_ubuntu_stable_deps || return 1
__PACKAGES="${__PACKAGES} python-crypto python-jinja2 python-msgpack python-requests"
__PACKAGES="${__PACKAGES} python-tornado python-yaml python-zmq"
__PACKAGES="${__PACKAGES} python-crypto python-jinja2 python-m2crypto python-msgpack"
__PACKAGES="${__PACKAGES} python-requests python-tornado python-yaml python-zmq"
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
# Install python-libcloud if asked to
@ -2791,7 +2820,7 @@ install_ubuntu_stable_post() {
/bin/systemctl preset salt-$fname.service > /dev/null 2>&1 &&
/bin/systemctl enable salt-$fname.service > /dev/null 2>&1
)
sleep 0.1
sleep 1
/bin/systemctl daemon-reload
elif [ -f /etc/init.d/salt-$fname ]; then
update-rc.d salt-$fname defaults
@ -2817,7 +2846,7 @@ install_ubuntu_git_post() {
[ $fname = "api" ] && continue
systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service)
sleep 0.1
sleep 1
systemctl daemon-reload
elif [ -f /sbin/initctl ]; then
_upstart_conf="/etc/init/salt-$fname.conf"
@ -2973,6 +3002,7 @@ __install_saltstack_debian_repository() {
__apt_key_fetch "$SALTSTACK_DEBIAN_URL/SALTSTACK-GPG-KEY.pub" || return 1
__wait_for_apt
apt-get update
}
@ -2984,6 +3014,7 @@ install_debian_deps() {
# No user interaction, libc6 restart services for example
export DEBIAN_FRONTEND=noninteractive
__wait_for_apt
apt-get update
if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then
@ -3030,9 +3061,9 @@ install_debian_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-backports.ssl-match-hostname python-crypto"
__PACKAGES="${__PACKAGES} python-jinja2 python-msgpack python-requests"
__PACKAGES="${__PACKAGES} python-tornado python-yaml python-zmq"
__PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-backports.ssl-match-hostname"
__PACKAGES="${__PACKAGES} python-crypto python-jinja2 python-msgpack python-m2crypto"
__PACKAGES="${__PACKAGES} python-requests python-tornado python-yaml python-zmq"
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
# Install python-libcloud if asked to
@ -3071,8 +3102,9 @@ install_debian_8_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-crypto python-jinja2 python-msgpack"
__PACKAGES="${__PACKAGES} python-requests python-systemd python-yaml python-zmq"
__PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-crypto python-jinja2"
__PACKAGES="${__PACKAGES} python-m2crypto python-msgpack python-requests python-systemd"
__PACKAGES="${__PACKAGES} python-yaml python-zmq"
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
# Install python-libcloud if asked to
@ -3081,7 +3113,7 @@ install_debian_8_git_deps() {
__PIP_PACKAGES=''
if (__check_pip_allowed >/dev/null 2>&1); then
__PIP_PACKAGES='tornado'
__PIP_PACKAGES='tornado<5.0'
# Install development environment for building tornado Python module
__PACKAGES="${__PACKAGES} build-essential python-dev"
@ -3096,6 +3128,7 @@ install_debian_8_git_deps() {
/etc/apt/sources.list.d/backports.list
fi
__wait_for_apt
apt-get update || return 1
# python-tornado package should be installed from backports repo
@ -3135,8 +3168,8 @@ install_debian_9_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES="libzmq5 lsb-release python-apt python-backports-abc python-crypto"
__PACKAGES="${__PACKAGES} python-jinja2 python-msgpack python-requests python-systemd"
__PACKAGES="${__PACKAGES} python-tornado python-yaml python-zmq"
__PACKAGES="${__PACKAGES} python-jinja2 python-m2crypto python-msgpack python-requests"
__PACKAGES="${__PACKAGES} python-systemd python-tornado python-yaml python-zmq"
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
# Install python-libcloud if asked to
@ -3330,15 +3363,8 @@ install_debian_check_services() {
install_fedora_deps() {
if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then
if [ "$_ENABLE_EXTERNAL_ZMQ_REPOS" -eq $BS_TRUE ]; then
__install_saltstack_copr_zeromq_repository || return 1
fi
__install_saltstack_copr_salt_repository || return 1
fi
__PACKAGES="PyYAML libyaml python-crypto python-jinja2 python-zmq python2-msgpack python2-requests"
__PACKAGES="libyaml m2crypto PyYAML python-crypto python-jinja2"
__PACKAGES="${__PACKAGES} python2-msgpack python2-requests python-zmq"
if [ "$DISTRO_MAJOR_VERSION" -lt 26 ]; then
__PACKAGES="${__PACKAGES} yum-utils"
@ -3395,7 +3421,7 @@ install_fedora_stable_post() {
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service)
sleep 0.1
sleep 1
systemctl daemon-reload
done
}
@ -3456,7 +3482,7 @@ install_fedora_git_post() {
[ $fname = "api" ] && continue
systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service)
sleep 0.1
sleep 1
systemctl daemon-reload
done
}
@ -3523,20 +3549,6 @@ __install_epel_repository() {
return 0
}
__install_saltstack_copr_zeromq_repository() {
echoinfo "Installing Zeromq >=4 and PyZMQ>=14 from SaltStack's COPR repository"
if [ ! -s /etc/yum.repos.d/saltstack-zeromq4.repo ]; then
if [ "${DISTRO_NAME_L}" = "fedora" ]; then
__REPOTYPE="${DISTRO_NAME_L}"
else
__REPOTYPE="epel"
fi
__fetch_url /etc/yum.repos.d/saltstack-zeromq4.repo \
"${HTTP_VAL}://copr.fedorainfracloud.org/coprs/saltstack/zeromq4/repo/${__REPOTYPE}-${DISTRO_MAJOR_VERSION}/saltstack-zeromq4-${__REPOTYPE}-${DISTRO_MAJOR_VERSION}.repo" || return 1
fi
return 0
}
__install_saltstack_rhel_repository() {
if [ "$ITYPE" = "stable" ]; then
repo_rev="$STABLE_REV"
@ -3550,7 +3562,7 @@ __install_saltstack_rhel_repository() {
gpg_key="SALTSTACK-GPG-KEY.pub"
repo_file="/etc/yum.repos.d/saltstack.repo"
if [ ! -s "$repo_file" ]; then
if [ ! -s "$repo_file" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then
cat <<_eof > "$repo_file"
[saltstack]
name=SaltStack ${repo_rev} Release Channel for RHEL/CentOS \$releasever
@ -3564,26 +3576,10 @@ _eof
fetch_url="${HTTP_VAL}://${_REPO_URL}/yum/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${repo_rev}/"
__rpm_import_gpg "${fetch_url}${gpg_key}" || return 1
fi
return 0
}
__install_saltstack_copr_salt_repository() {
echoinfo "Adding SaltStack's COPR repository"
if [ "${DISTRO_NAME_L}" = "fedora" ]; then
[ "$DISTRO_MAJOR_VERSION" -ge 22 ] && return 0
__REPOTYPE="${DISTRO_NAME_L}"
else
__REPOTYPE="epel"
fi
__REPO_FILENAME="saltstack-salt-${__REPOTYPE}-${DISTRO_MAJOR_VERSION}.repo"
if [ ! -s "/etc/yum.repos.d/${__REPO_FILENAME}" ]; then
__fetch_url "/etc/yum.repos.d/${__REPO_FILENAME}" \
"${HTTP_VAL}://copr.fedorainfracloud.org/coprs/saltstack/salt/repo/${__REPOTYPE}-${DISTRO_MAJOR_VERSION}/${__REPO_FILENAME}" || return 1
yum clean metadata || return 1
elif [ "$repo_rev" != "latest" ]; then
echowarn "saltstack.repo already exists, ignoring salt version argument."
echowarn "Use -F (forced overwrite) to install $repo_rev."
fi
return 0
@ -3688,7 +3684,8 @@ install_centos_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES="python-crypto python-futures python-msgpack python-zmq python-jinja2 python-requests python-tornado"
__PACKAGES="m2crypto python-crypto python-futures python-jinja2 python-msgpack"
__PACKAGES="${__PACKAGES} python-requests python-tornado python-zmq"
if [ "$DISTRO_MAJOR_VERSION" -ge 7 ]; then
__PACKAGES="${__PACKAGES} systemd-python"
@ -3705,7 +3702,12 @@ install_centos_git_deps() {
if [ "${_PY_EXE}" != "" ]; then
# If "-x" is defined, install dependencies with pip based on the Python version given.
_PIP_PACKAGES="jinja2 msgpack-python pycrypto PyYAML tornado zmq"
_PIP_PACKAGES="m2crypto jinja2 msgpack-python pycrypto PyYAML tornado<5.0 zmq"
# install swig and openssl on cent6
if [ "$DISTRO_MAJOR_VERSION" -eq 6 ]; then
__yum_install_noinput openssl-devel swig || return 1
fi
if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then
for SINGLE_PACKAGE in $_PIP_PACKAGES; do
@ -4275,7 +4277,7 @@ install_alpine_linux_stable_deps() {
install_alpine_linux_git_deps() {
install_alpine_linux_stable_deps || return 1
apk -U add python2 py-virtualenv py2-crypto py2-setuptools \
apk -U add python2 py-virtualenv py2-crypto py2-m2crypto py2-setuptools \
py2-jinja2 py2-yaml py2-markupsafe py2-msgpack py2-psutil \
py2-zmq zeromq py2-requests || return 1
@ -4367,6 +4369,7 @@ install_alpine_linux_restart_daemons() {
# Skip if not meant to be installed
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
# Disable stdin to fix shell session hang on killing tee pipe
/sbin/rc-service salt-$fname stop < /dev/null > /dev/null 2>&1
@ -4382,6 +4385,7 @@ install_alpine_linux_check_services() {
# Skip if not meant to be installed
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
__check_services_alpine salt-$fname || return 1
done
@ -4400,6 +4404,7 @@ daemons_running_alpine_linux() {
# Skip if not meant to be installed
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
# shellcheck disable=SC2009
if [ "$(ps wwwaux | grep -v grep | grep salt-$fname)" = "" ]; then
@ -4427,10 +4432,20 @@ install_amazon_linux_ami_deps() {
_USEAWS=$BS_FALSE
pkg_append="python"
repo_rev="$(echo "${STABLE_REV}" | sed 's|.*\/||g')"
if [ "$ITYPE" = "stable" ]; then
repo_rev="$STABLE_REV"
else
repo_rev="latest"
fi
if echo $repo_rev | egrep -q '^archive'; then
year=$(echo "$repo_rev" | cut -d '/' -f 2 | cut -c1-4)
else
year=$(echo "$repo_rev" | cut -c1-4)
fi
if echo "$repo_rev" | egrep -q '^(latest|2016\.11)$' || \
[ "$(echo "$repo_rev" | cut -c1-4)" -gt 2016 ]; then
[ "$year" -gt 2016 ]; then
_USEAWS=$BS_TRUE
pkg_append="python27"
fi
@ -4477,7 +4492,8 @@ _eof
# Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64
# which is already installed
__PACKAGES="${pkg_append}-PyYAML ${pkg_append}-crypto ${pkg_append}-msgpack ${pkg_append}-zmq ${pkg_append}-jinja2 ${pkg_append}-requests"
__PACKAGES="m2crypto ${pkg_append}-crypto ${pkg_append}-jinja2 ${pkg_append}-PyYAML"
__PACKAGES="${__PACKAGES} ${pkg_append}-msgpack ${pkg_append}-requests ${pkg_append}-zmq"
# shellcheck disable=SC2086
__yum_install_noinput ${__PACKAGES} || return 1
@ -4630,7 +4646,7 @@ install_arch_linux_git_deps() {
fi
pacman -R --noconfirm python2-distribute
pacman -Su --noconfirm --needed python2-crypto python2-setuptools python2-jinja \
python2-markupsafe python2-msgpack python2-psutil \
python2-m2crypto python2-markupsafe python2-msgpack python2-psutil \
python2-pyzmq zeromq python2-requests python2-systemd || return 1
__git_clone_and_checkout || return 1
@ -4704,7 +4720,7 @@ install_arch_linux_post() {
/usr/bin/systemctl preset salt-$fname.service > /dev/null 2>&1 &&
/usr/bin/systemctl enable salt-$fname.service > /dev/null 2>&1
)
sleep 0.1
sleep 1
/usr/bin/systemctl daemon-reload
continue
fi
@ -4732,7 +4748,7 @@ install_arch_linux_git_post() {
/usr/bin/systemctl preset salt-${fname}.service > /dev/null 2>&1 &&
/usr/bin/systemctl enable salt-${fname}.service > /dev/null 2>&1
)
sleep 0.1
sleep 1
/usr/bin/systemctl daemon-reload
continue
fi
@ -4885,9 +4901,9 @@ install_freebsd_9_stable_deps() {
__configure_freebsd_pkg_details || return 1
fi
# Now install swig
# Now install swig30
# shellcheck disable=SC2086
/usr/local/sbin/pkg install ${FROM_FREEBSD} -y swig || return 1
/usr/local/sbin/pkg install ${FROM_FREEBSD} -y swig30 || return 1
# YAML module is used for generating custom master/minion configs
# shellcheck disable=SC2086
@ -4934,7 +4950,7 @@ install_freebsd_git_deps() {
# We're on the develop branch, install whichever tornado is on the requirements file
__REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")"
if [ "${__REQUIRED_TORNADO}" != "" ]; then
/usr/local/sbin/pkg install -y www/py-tornado || return 1
/usr/local/sbin/pkg install -y www/py-tornado4 || return 1
fi
fi
@ -5098,35 +5114,11 @@ install_freebsd_restart_daemons() {
# OpenBSD Install Functions
#
__choose_openbsd_mirror() {
OPENBSD_REPO=''
MINTIME=''
MIRROR_LIST=$(ftp -w 15 -Vao - 'https://ftp.openbsd.org/cgi-bin/ftplist.cgi?dbversion=1' | awk '/^http/ {print $1}')
for MIRROR in $MIRROR_LIST; do
MIRROR_HOST=$(echo "$MIRROR" | sed -e 's|.*//||' -e 's|+*/.*$||')
TIME=$(ping -c 1 -w 1 -q "$MIRROR_HOST" | awk -F/ '/round-trip/ { print $5 }')
[ -z "$TIME" ] && continue
echodebug "ping time for $MIRROR_HOST is $TIME"
if [ -z "$MINTIME" ]; then
FASTER_MIRROR=1
else
FASTER_MIRROR=$(echo "$TIME < $MINTIME" | bc)
fi
if [ "$FASTER_MIRROR" -eq 1 ]; then
MINTIME=$TIME
OPENBSD_REPO="$MIRROR"
fi
done
}
install_openbsd_deps() {
if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then
__choose_openbsd_mirror || return 1
echoinfo "setting package repository to $OPENBSD_REPO with ping time of $MINTIME"
[ -n "$OPENBSD_REPO" ] || return 1
echo "${OPENBSD_REPO}" >>/etc/installurl || return 1
OPENBSD_REPO='https://cdn.openbsd.org/pub/OpenBSD'
echoinfo "setting package repository to $OPENBSD_REPO"
echo "${OPENBSD_REPO}" >/etc/installurl || return 1
fi
if [ "${_EXTRA_PACKAGES}" != "" ]; then
@ -5226,7 +5218,7 @@ install_openbsd_restart_daemons() {
# SmartOS Install Functions
#
install_smartos_deps() {
pkgin -y install zeromq py27-crypto py27-msgpack py27-yaml py27-jinja2 py27-zmq py27-requests || return 1
pkgin -y install zeromq py27-crypto py27-m2crypto py27-msgpack py27-yaml py27-jinja2 py27-zmq py27-requests || return 1
# Set _SALT_ETC_DIR to SmartOS default if they didn't specify
_SALT_ETC_DIR=${BS_SALT_ETC_DIR:-/opt/local/etc/salt}
@ -5456,6 +5448,13 @@ __version_lte() {
}
__zypper() {
# Check if any zypper process is running before calling zypper again.
# This is useful when a zypper call is part of a boot process and will
# wait until the zypper process is finished, such as on AWS AMIs.
while pgrep -l zypper; do
sleep 1
done
zypper --non-interactive "${@}"; return $?
}
@ -5515,7 +5514,7 @@ install_opensuse_stable_deps() {
}
install_opensuse_git_deps() {
if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then
if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ] && ! __check_command_exists update-ca-certificates; then
__zypper_install ca-certificates || return 1
fi
@ -5529,7 +5528,7 @@ install_opensuse_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES="libzmq5 python-Jinja2 python-msgpack-python python-pycrypto python-pyzmq python-xml"
__PACKAGES="libzmq5 python-Jinja2 python-m2crypto python-msgpack-python python-pycrypto python-pyzmq python-xml"
if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then
# We're on the develop branch, install whichever tornado is on the requirements file
@ -5594,7 +5593,7 @@ install_opensuse_stable_post() {
if [ -f /bin/systemctl ]; then
systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service)
sleep 0.1
sleep 1
systemctl daemon-reload
continue
fi
@ -5723,6 +5722,12 @@ install_suse_12_stable_deps() {
# shellcheck disable=SC2086,SC2090
__zypper_install ${__PACKAGES} || return 1
# SLES 11 SP3 ships with both python-M2Crypto-0.22.* and python-m2crypto-0.21 and we will be asked which
# we want to install, even with --non-interactive.
# Let's try to install the higher version first and then the lower one in case of failure
__zypper_install 'python-M2Crypto>=0.22' || __zypper_install 'python-M2Crypto>=0.21' || return 1
if [ "${_EXTRA_PACKAGES}" != "" ]; then
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
# shellcheck disable=SC2086
@ -5825,6 +5830,11 @@ install_suse_11_stable_deps() {
# shellcheck disable=SC2086,SC2090
__zypper_install ${__PACKAGES} || return 1
# SLES 11 SP3 ships with both python-M2Crypto-0.22.* and python-m2crypto-0.21 and we will be asked which
# we want to install, even with --non-interactive.
# Let's try to install the higher version first and then the lower one in case of failure
__zypper_install 'python-M2Crypto>=0.22' || __zypper_install 'python-M2Crypto>=0.21' || return 1
if [ "${_EXTRA_PACKAGES}" != "" ]; then
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
# shellcheck disable=SC2086

View file

@ -224,7 +224,7 @@ def sign_message(privkey_path, message, passphrase=None):
log.debug('salt.crypt.sign_message: Signing message.')
if HAS_M2:
md = EVP.MessageDigest('sha1')
md.update(message)
md.update(salt.utils.stringutils.to_bytes(message))
digest = md.final()
return key.sign(digest)
else:
@ -242,7 +242,7 @@ def verify_signature(pubkey_path, message, signature):
log.debug('salt.crypt.verify_signature: Verifying signature')
if HAS_M2:
md = EVP.MessageDigest('sha1')
md.update(message)
md.update(salt.utils.stringutils.to_bytes(message))
digest = md.final()
return pubkey.verify(digest, signature)
else:

View file

@ -97,14 +97,15 @@ class IRCClient(object):
self.allow_nicks = allow_nicks
self.disable_query = disable_query
self.io_loop = tornado.ioloop.IOLoop(make_current=False)
self.io_loop.make_current()
self._connect()
def _connect(self):
_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
if self.ssl is True:
self._stream = tornado.iostream.SSLIOStream(_sock, ssl_options={'cert_reqs': ssl.CERT_NONE}, io_loop=self.io_loop)
self._stream = tornado.iostream.SSLIOStream(_sock, ssl_options={'cert_reqs': ssl.CERT_NONE})
else:
self._stream = tornado.iostream.IOStream(_sock, io_loop=self.io_loop)
self._stream = tornado.iostream.IOStream(_sock)
self._stream.set_close_callback(self.on_closed)
self._stream.connect((self.host, self.port), self.on_connect)

View file

@ -313,10 +313,9 @@ def _domain_event_graphics_cb(conn, domain, phase, local, remote, auth, subject,
'''
transform address structure into event data piece
'''
data = {'family': _get_libvirt_enum_string('{0}_ADDRESS_'.format(prefix), addr.family),
'node': addr.node}
if addr.service is not None:
data['service'] = addr.service
data = {'family': _get_libvirt_enum_string('{0}_ADDRESS_'.format(prefix), addr['family']),
'node': addr['node'],
'service': addr['service']}
return addr
_salt_send_domain_event(opaque, conn, domain, opaque['event'], {
@ -324,10 +323,7 @@ def _domain_event_graphics_cb(conn, domain, phase, local, remote, auth, subject,
'local': get_address(local),
'remote': get_address(remote),
'authScheme': auth,
'subject': {
'type': subject.type,
'name': subject.name
}
'subject': [{'type': item[0], 'name': item[1]} for item in subject]
})

View file

@ -88,6 +88,13 @@ In addition, other groups are being loaded from pillars.
:depends: slackclient
.. note:: groups_pillar_name
In order to use this, the engine must be running as a minion running on
the master, so that the ``Caller`` client can be used to retrieve that
minions pillar data, because the master process does not have pillars.
'''
# Import python libraries
@ -237,10 +244,8 @@ class SlackClient(object):
XXX: instead of using Caller, make the minion to use configurable so there could be some
restrictions placed on what pillars can be used.
'''
if pillar_name:
caller = salt.client.Caller()
pillar_groups = caller.cmd('pillar.get', pillar_name)
# pillar_groups = __salt__['pillar.get'](pillar_name, {})
if pillar_name and __opts__['__role'] == 'minion':
pillar_groups = __salt__['pillar.get'](pillar_name, {})
log.debug('Got pillar groups %s from pillar %s', pillar_groups, pillar_name)
log.debug('pillar groups is %s', pillar_groups)
log.debug('pillar groups type is %s', type(pillar_groups))

View file

@ -81,6 +81,7 @@ def start(address=None, port=5000, ssl_crt=None, ssl_key=None):
if all([ssl_crt, ssl_key]):
ssl_options = {"certfile": ssl_crt, "keyfile": ssl_key}
io_loop = tornado.ioloop.IOLoop(make_current=False)
http_server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_options, io_loop=io_loop)
io_loop.make_current()
http_server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_options)
http_server.listen(port, address=address)
io_loop.start()

View file

@ -56,7 +56,7 @@ def inet_pton(address_family, ip_string):
addr_size = ctypes.c_int(ctypes.sizeof(addr))
if WSAStringToAddressA(
ip_string,
ip_string.encode('ascii'),
address_family,
None,
ctypes.byref(addr),

View file

@ -457,7 +457,7 @@ def _bsd_memdata(osdata):
mem = __salt__['cmd.run']('{0} -n hw.physmem64'.format(sysctl))
grains['mem_total'] = int(mem) // 1024 // 1024
if osdata['kernel'] == 'OpenBSD':
if osdata['kernel'] in ['OpenBSD', 'NetBSD']:
swapctl = salt.utils.path.which('swapctl')
swap_total = __salt__['cmd.run']('{0} -sk'.format(swapctl)).split(' ')[1]
else:
@ -771,13 +771,6 @@ def _virtual(osdata):
grains['virtual'] = 'kvm'
# Break out of the loop so the next log message is not issued
break
elif command == 'virt-what':
# if 'virt-what' returns nothing, it's either an undetected platform
# so we default just as virt-what to 'physical', otherwise use the
# platform detected/returned by virt-what
if output:
grains['virtual'] = output.lower()
break
elif command == 'prtdiag':
model = output.lower().split("\n")[0]
if 'vmware' in model:

View file

@ -24,11 +24,8 @@ GRAINS_CACHE = {}
def __virtual__():
try:
if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'fx2':
return __virtualname__
except KeyError:
pass
if salt.utils.platform.is_proxy() and 'proxy' in __opts__ and __opts__['proxy'].get('proxytype') == 'fx2':
return __virtualname__
return False

View file

@ -14,10 +14,9 @@ __virtualname__ = 'marathon'
def __virtual__():
if not salt.utils.platform.is_proxy() or 'proxy' not in __opts__:
return False
else:
if salt.utils.platform.is_proxy() and 'proxy' in __opts__ and __opts__['proxy'].get('proxytype') == 'marathon':
return __virtualname__
return False
def kernel():

View file

@ -413,10 +413,10 @@ def host_dns(proxy=None):
'AAAA': []
}
}
dns_a = salt.utils.dns.query(device_host_value, 'A')
dns_a = salt.utils.dns.lookup(device_host_value, 'A')
if dns_a:
host_dns_ret['host_dns']['A'] = dns_a
dns_aaaa = salt.utils.dns.query(device_host_value, 'AAAA')
dns_aaaa = salt.utils.dns.lookup(device_host_value, 'AAAA')
if dns_aaaa:
host_dns_ret['host_dns']['AAAA'] = dns_aaaa
return host_dns_ret

View file

@ -414,10 +414,10 @@ class FileserverUpdate(salt.utils.process.SignalHandlingMultiprocessingProcess):
interval = self.opts[interval_key]
except KeyError:
interval = DEFAULT_INTERVAL
log.error(
'%s key missing from master configuration. This is '
'a bug, please report it. Falling back to default '
'interval of %d seconds', interval_key, interval
log.warning(
'%s key missing from configuration. Falling back to '
'default interval of %d seconds',
interval_key, interval
)
self.buckets.setdefault(
interval, OrderedDict())[(backend, update_func)] = None

View file

@ -2498,13 +2498,15 @@ class Minion(MinionBase):
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(
handle_beacons, loop_interval * 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
@ -2557,14 +2559,15 @@ class Minion(MinionBase):
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
@ -2621,7 +2624,7 @@ class Minion(MinionBase):
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000)
self.periodic_callbacks['ping'].start()
# add handler to subscriber
@ -3092,7 +3095,7 @@ class SyndicManager(MinionBase):
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1

View file

@ -28,12 +28,15 @@ import salt.utils.stringutils
import salt.utils.vt
from salt.exceptions import SaltInvocationError, CommandExecutionError
# Import 3rd-party libs
# Import third-party libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module,import-error
HAS_LIBS = False
SIGN_PROMPT_RE = re.compile(r'Enter passphrase: ', re.M)
REPREPRO_SIGN_PROMPT_RE = re.compile(r'Passphrase: ', re.M)
try:
import gnupg # pylint: disable=unused-import
import salt.modules.gpg
@ -61,7 +64,8 @@ def __virtual__():
if HAS_LIBS and not missing_util:
return __virtualname__
else:
return False, 'The debbuild module could not be loaded: requires python-gnupg, gpg, debuild, pbuilder and reprepro utilities to be installed'
return False, ('The debbuild module could not be loaded: requires python-gnupg, gpg, debuild, '
'pbuilder and reprepro utilities to be installed')
else:
return (False, 'The debbuild module could not be loaded: unsupported OS family')
@ -78,7 +82,7 @@ def _check_repo_sign_utils_support(name):
)
def _check_repo_gpg_phrase_utils_support():
def _check_repo_gpg_phrase_utils():
'''
Check for /usr/lib/gnupg2/gpg-preset-passphrase is installed
'''
@ -270,7 +274,7 @@ def _mk_tree():
return basedir
def _get_spec(tree_base, spec, template, saltenv='base'):
def _get_spec(tree_base, spec, saltenv='base'):
'''
Get the spec file (tarball of the debian sub-dir to use)
and place it in build area
@ -294,7 +298,7 @@ def _get_src(tree_base, source, saltenv='base'):
shutil.copy(source, dest)
def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base'):
def make_src_pkg(dest_dir, spec, sources, env=None, saltenv='base'):
'''
Create a platform specific source package from the given platform spec/control file and sources
@ -304,7 +308,9 @@ def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base
.. code-block:: bash
salt '*' pkgbuild.make_src_pkg /var/www/html/ https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/deb/python-libnacl.control.tar.xz https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
salt '*' pkgbuild.make_src_pkg /var/www/html/
https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/deb/python-libnacl.control.tar.xz
https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl SOURCE package and place it in
/var/www/html/ on the minion
@ -315,7 +321,7 @@ def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
spec_pathfile = _get_spec(tree_base, spec, template, saltenv)
spec_pathfile = _get_spec(tree_base, spec, saltenv)
# build salt equivalents from scratch
if isinstance(sources, six.string_types):
@ -350,26 +356,36 @@ def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base
debname += '+ds'
debname_orig = debname + '.orig.tar.gz'
abspath_debname = os.path.join(tree_base, debname)
retrc = 0
cmd = 'tar -xvzf {0}'.format(salttarball)
__salt__['cmd.run'](cmd, cwd=tree_base)
retrc = __salt__['cmd.retcode'](cmd, cwd=tree_base)
cmd = 'mv {0} {1}'.format(salttar_name, debname)
__salt__['cmd.run'](cmd, cwd=tree_base)
retrc |= __salt__['cmd.retcode'](cmd, cwd=tree_base)
cmd = 'tar -cvzf {0} {1}'.format(os.path.join(tree_base, debname_orig), debname)
__salt__['cmd.run'](cmd, cwd=tree_base)
retrc |= __salt__['cmd.retcode'](cmd, cwd=tree_base)
cmd = 'rm -f {0}'.format(salttarball)
__salt__['cmd.run'](cmd, cwd=tree_base)
retrc |= __salt__['cmd.retcode'](cmd, cwd=tree_base, env=env)
cmd = 'cp {0} {1}'.format(spec_pathfile, abspath_debname)
__salt__['cmd.run'](cmd, cwd=abspath_debname)
retrc |= __salt__['cmd.retcode'](cmd, cwd=abspath_debname)
cmd = 'tar -xvJf {0}'.format(spec_pathfile)
__salt__['cmd.run'](cmd, cwd=abspath_debname)
retrc |= __salt__['cmd.retcode'](cmd, cwd=abspath_debname, env=env)
cmd = 'rm -f {0}'.format(os.path.basename(spec_pathfile))
__salt__['cmd.run'](cmd, cwd=abspath_debname)
retrc |= __salt__['cmd.retcode'](cmd, cwd=abspath_debname)
cmd = 'debuild -S -uc -us -sa'
__salt__['cmd.run'](cmd, cwd=abspath_debname, python_shell=True)
retrc |= __salt__['cmd.retcode'](cmd, cwd=abspath_debname, python_shell=True, env=env)
cmd = 'rm -fR {0}'.format(abspath_debname)
__salt__['cmd.run'](cmd)
retrc |= __salt__['cmd.retcode'](cmd)
if retrc != 0:
raise SaltInvocationError(
'Make source package for destination directory {0}, spec {1}, sources {2}, failed '
'with return error {3}, check logs for further details'.format(
dest_dir,
spec,
sources,
retrc)
)
for dfile in os.listdir(tree_base):
if not dfile.endswith('.build'):
@ -401,12 +417,15 @@ def build(runas,
.. code-block:: bash
salt '*' pkgbuild.make_src_pkg deb-8-x86_64 /var/www/html https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/deb/python-libnacl.control https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
salt '*' pkgbuild.make_src_pkg deb-8-x86_64 /var/www/html
https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/deb/python-libnacl.control
https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl package for Debian using pbuilder
and place it in /var/www/html/ on the minion
'''
ret = {}
retrc = 0
try:
os.makedirs(dest_dir)
except OSError as exc:
@ -414,36 +433,45 @@ def build(runas,
raise
dsc_dir = tempfile.mkdtemp()
try:
dscs = make_src_pkg(dsc_dir, spec, sources, env, template, saltenv)
dscs = make_src_pkg(dsc_dir, spec, sources, env, saltenv)
except Exception as exc:
shutil.rmtree(dsc_dir)
log.error('Failed to make src package')
return ret
cmd = 'pbuilder --create'
__salt__['cmd.run'](cmd, runas=runas, python_shell=True)
retrc = __salt__['cmd.retcode'](cmd, runas=runas, python_shell=True, env=env)
if retrc != 0:
raise SaltInvocationError(
'pbuilder create failed with return error {0}, check logs for further details'.format(retrc))
# use default /var/cache/pbuilder/result
results_dir = '/var/cache/pbuilder/result'
## ensure clean
__salt__['cmd.run']('rm -fR {0}'.format(results_dir))
# ensure clean
retrc |= __salt__['cmd.retcode']('rm -fR {0}'.format(results_dir))
# dscs should only contain salt orig and debian tarballs and dsc file
for dsc in dscs:
afile = os.path.basename(dsc)
adist = os.path.join(dest_dir, afile)
os.path.join(dest_dir, afile)
if dsc.endswith('.dsc'):
dbase = os.path.dirname(dsc)
try:
__salt__['cmd.run']('chown {0} -R {1}'.format(runas, dbase))
retrc |= __salt__['cmd.retcode']('chown {0} -R {1}'.format(runas, dbase))
cmd = 'pbuilder update --override-config'
__salt__['cmd.run'](cmd, runas=runas, python_shell=True)
retrc |= __salt__['cmd.retcode'](cmd, runas=runas, python_shell=True, env=env)
cmd = 'pbuilder build --debbuildopts "-sa" {0}'.format(dsc)
__salt__['cmd.run'](cmd, runas=runas, python_shell=True)
retrc |= __salt__['cmd.retcode'](cmd, runas=runas, python_shell=True, env=env)
if retrc != 0:
raise SaltInvocationError(
'pbuilder build or update failed with return error {0}, '
'check logs for further details'.format(retrc)
)
# ignore local deps generated package file
for bfile in os.listdir(results_dir):
@ -487,44 +515,20 @@ def make_repo(repodir,
.. versionchanged:: 2016.3.0
Optional Key ID to use in signing packages and repository.
This consists of the last 8 hex digits of the GPG key ID.
Utilizes Public and Private keys associated with keyid which have
been loaded into the minion's Pillar data. Leverages gpg-agent and
gpg-preset-passphrase for caching keys, etc.
These pillar values are assumed to be filenames which are present
in ``gnupghome``. The pillar keys shown below have to match exactly.
For example, contents from a Pillar data file with named Public
and Private keys as follows:
.. code-block:: yaml
gpg_pkg_priv_key: |
-----BEGIN PGP PRIVATE KEY BLOCK-----
Version: GnuPG v1
lQO+BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
Ebe+8JCQTwqSXPRTzXmy/b5WXDeM79CkLWvuGpXFor76D+ECMRPv/rawukEcNptn
R5OmgHqvydEnO4pWbn8JzQO9YX/Us0SMHBVzLC8eIi5ZIopzalvX
=JvW8
-----END PGP PRIVATE KEY BLOCK-----
gpg_pkg_priv_keyname: gpg_pkg_key.pem
gpg_pkg_pub_key: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta+4alcWivvoP
4QIxE+/+trC6QRw2m2dHk6aAeq/J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki
inNqW9c=
=s1CX
-----END PGP PUBLIC KEY BLOCK-----
gpg_pkg_pub_keyname: gpg_pkg_key.pub
env
@ -571,12 +575,17 @@ def make_repo(repodir,
salt '*' pkgbuild.make_repo /var/www/html
'''
res = {'retcode': 1,
res = {
'retcode': 1,
'stdout': '',
'stderr': 'initialization value'}
'stderr': 'initialization value'
}
SIGN_PROMPT_RE = re.compile(r'Enter passphrase: ', re.M)
REPREPRO_SIGN_PROMPT_RE = re.compile(r'Passphrase: ', re.M)
retrc = 0
if gnupghome and env is None:
env = {}
env['GNUPGHOME'] = gnupghome
repoconf = os.path.join(repodir, 'conf')
if not os.path.isdir(repoconf):
@ -600,7 +609,6 @@ def make_repo(repodir,
# preset passphase and interaction with gpg-agent
gpg_info_file = '{0}/gpg-agent-info-salt'.format(gnupghome)
gpg_tty_info_file = '{0}/gpg-tty-info-salt'.format(gnupghome)
gpg_tty_info_dict = {}
# if using older than gnupg 2.1, then env file exists
older_gnupg = __salt__['file.file_exists'](gpg_info_file)
@ -639,9 +647,13 @@ def make_repo(repodir,
break
if not older_gnupg:
_check_repo_sign_utils_support('gpg2')
cmd = '{0} --with-keygrip --list-secret-keys'.format(salt.utils.path.which('gpg2'))
local_keys2_keygrip = __salt__['cmd.run'](cmd, runas=runas)
try:
_check_repo_sign_utils_support('gpg2')
cmd = 'gpg2 --with-keygrip --list-secret-keys'
except CommandExecutionError:
# later gpg versions have dispensed with gpg2 - Ubuntu 18.04
cmd = 'gpg --with-keygrip --list-secret-keys'
local_keys2_keygrip = __salt__['cmd.run'](cmd, runas=runas, env=env)
local_keys2 = iter(local_keys2_keygrip.splitlines())
try:
for line in local_keys2:
@ -672,25 +684,25 @@ def make_repo(repodir,
for gpg_info_line in gpg_raw_info:
gpg_info_line = salt.utils.stringutils.to_unicode(gpg_info_line)
gpg_info = gpg_info_line.split('=')
gpg_info_dict = {gpg_info[0]: gpg_info[1]}
__salt__['environ.setenv'](gpg_info_dict)
env[gpg_info[0]] = gpg_info[1]
break
else:
with salt.utils.files.fopen(gpg_tty_info_file, 'r') as fow:
gpg_raw_info = fow.readlines()
for gpg_tty_info_line in gpg_raw_info:
gpg_info_line = salt.utils.stringutils.to_unicode(gpg_info_line)
gpg_tty_info_line = salt.utils.stringutils.to_unicode(gpg_tty_info_line)
gpg_tty_info = gpg_tty_info_line.split('=')
gpg_tty_info_dict = {gpg_tty_info[0]: gpg_tty_info[1]}
__salt__['environ.setenv'](gpg_tty_info_dict)
env[gpg_tty_info[0]] = gpg_tty_info[1]
break
if use_passphrase:
_check_repo_gpg_phrase_utils_support()
_check_repo_gpg_phrase_utils()
phrase = __salt__['pillar.get']('gpg_passphrase')
cmd = '/usr/lib/gnupg2/gpg-preset-passphrase --verbose --preset --passphrase "{0}" {1}'.format(phrase, local_keygrip_to_use)
__salt__['cmd.run'](cmd, runas=runas)
cmd = '/usr/lib/gnupg2/gpg-preset-passphrase --verbose --preset --passphrase "{0}" {1}'.format(
phrase,
local_keygrip_to_use)
retrc |= __salt__['cmd.retcode'](cmd, runas=runas, env=env)
for debfile in os.listdir(repodir):
abs_file = os.path.join(repodir, debfile)
@ -702,10 +714,12 @@ def make_repo(repodir,
if older_gnupg:
if local_keyid is not None:
cmd = 'debsign --re-sign -k {0} {1}'.format(keyid, abs_file)
__salt__['cmd.run'](cmd, cwd=repodir, use_vt=True)
retrc |= __salt__['cmd.retcode'](cmd, cwd=repodir, use_vt=True, env=env)
cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedsc {0} {1}'.format(codename, abs_file)
__salt__['cmd.run'](cmd, cwd=repodir, use_vt=True)
cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedsc {0} {1}'.format(
codename,
abs_file)
retrc |= __salt__['cmd.retcode'](cmd, cwd=repodir, use_vt=True, env=env)
else:
# interval of 0.125 is really too fast on some systems
interval = 0.5
@ -715,15 +729,15 @@ def make_repo(repodir,
error_msg = 'Failed to debsign file {0}'.format(abs_file)
cmd = 'debsign --re-sign -k {0} {1}'.format(keyid, abs_file)
try:
stdout, stderr = None, None
proc = salt.utils.vt.Terminal(
cmd,
env=env,
shell=True,
stream_stdout=True,
stream_stderr=True
)
while proc.has_unread_data:
stdout, stderr = proc.recv()
stdout, _ = proc.recv()
if stdout and SIGN_PROMPT_RE.search(stdout):
# have the prompt for inputting the passphrase
proc.sendline(phrase)
@ -732,21 +746,24 @@ def make_repo(repodir,
if times_looped > number_retries:
raise SaltInvocationError(
'Attempting to sign file {0} failed, timed out after {1} seconds'
.format(abs_file, int(times_looped * interval))
'Attempting to sign file {0} failed, timed out after {1} seconds'.format(
abs_file,
int(times_looped * interval))
)
time.sleep(interval)
proc_exitstatus = proc.exitstatus
if proc_exitstatus != 0:
raise SaltInvocationError(
'Signing file {0} failed with proc.status {1}'
.format(abs_file, proc_exitstatus)
'Signing file {0} failed with proc.status {1}'.format(
abs_file,
proc_exitstatus)
)
except salt.utils.vt.TerminalException as err:
trace = traceback.format_exc()
log.error(error_msg, err, trace)
res = {'retcode': 1,
res = {
'retcode': 1,
'stdout': '',
'stderr': trace}
finally:
@ -755,19 +772,21 @@ def make_repo(repodir,
number_retries = timeout / interval
times_looped = 0
error_msg = 'Failed to reprepro includedsc file {0}'.format(abs_file)
cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedsc {0} {1}'.format(codename, abs_file)
cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedsc {0} {1}'.format(
codename,
abs_file)
try:
stdout, stderr = None, None
proc = salt.utils.vt.Terminal(
cmd,
env=env,
shell=True,
cwd=repodir,
env=gpg_tty_info_dict,
stream_stdout=True,
stream_stderr=True
)
while proc.has_unread_data:
stdout, stderr = proc.recv()
stdout, _ = proc.recv()
if stdout and REPREPRO_SIGN_PROMPT_RE.search(stdout):
# have the prompt for inputting the passphrase
proc.sendline(phrase)
@ -776,26 +795,38 @@ def make_repo(repodir,
if times_looped > number_retries:
raise SaltInvocationError(
'Attempting to reprepro includedsc for file {0} failed, timed out after {1} loops'.format(abs_file, times_looped)
'Attempting to reprepro includedsc for file {0} failed, timed out after {1} loops'
.format(abs_file, times_looped)
)
time.sleep(interval)
proc_exitstatus = proc.exitstatus
if proc_exitstatus != 0:
raise SaltInvocationError(
'Reprepro includedsc for codename {0} and file {1} failed with proc.status {2}'.format(codename, abs_file, proc_exitstatus)
)
'Reprepro includedsc for codename {0} and file {1} failed with proc.status {2}'.format(
codename,
abs_file,
proc_exitstatus)
)
except salt.utils.vt.TerminalException as err:
trace = traceback.format_exc()
log.error(error_msg, err, trace)
res = {'retcode': 1,
res = {
'retcode': 1,
'stdout': '',
'stderr': trace}
'stderr': trace
}
finally:
proc.close(terminate=True, kill=True)
if retrc != 0:
raise SaltInvocationError(
'Making a repo encountered errors, return error {0}, check logs for further details'.format(retrc))
if debfile.endswith('.deb'):
cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedeb {0} {1}'.format(codename, abs_file)
res = __salt__['cmd.run_all'](cmd, cwd=repodir, use_vt=True)
cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedeb {0} {1}'.format(
codename,
abs_file)
res = __salt__['cmd.run_all'](cmd, cwd=repodir, use_vt=True, env=env)
return res

View file

@ -23,6 +23,7 @@ from salt.ext import six
from salt.ext.six.moves import StringIO # pylint: disable=import-error,no-name-in-module
# Import salt libs
import salt.utils.dns
import salt.utils.files
import salt.utils.odict
import salt.utils.stringutils
@ -228,38 +229,23 @@ def _read_file(path):
def _parse_resolve():
'''
Parse /etc/resolv.conf and return domainname
Parse /etc/resolv.conf
'''
contents = _read_file(_DEB_RESOLV_FILE)
return contents
return salt.utils.dns.parse_resolv(_DEB_RESOLV_FILE)
def _parse_domainname():
'''
Parse /etc/resolv.conf and return domainname
'''
contents = _read_file(_DEB_RESOLV_FILE)
pattern = r'domain\s+(?P<domain_name>\S+)'
prog = re.compile(pattern)
for item in contents:
match = prog.match(item)
if match:
return match.group('domain_name')
return ''
return _parse_resolve().get('domain', '')
def _parse_searchdomain():
'''
Parse /etc/resolv.conf and return searchdomain
'''
contents = _read_file(_DEB_RESOLV_FILE)
pattern = r'search\s+(?P<search_domain>\S+)'
prog = re.compile(pattern)
for item in contents:
match = prog.match(item)
if match:
return match.group('search_domain')
return ''
return _parse_resolve().get('search', '')
def _parse_hostname():
@ -2075,38 +2061,29 @@ def build_network_settings(**settings):
# If the domain changes, then we should write the resolv.conf file.
if new_domain or new_search:
# Look for existing domain line and update if necessary
contents = _parse_resolve()
domain_prog = re.compile(r'domain\s+(?P<domain_name>\S+)')
search_prog = re.compile(r'search\s+(?P<search_domain>\S+)')
resolve = _parse_resolve()
domain_prog = re.compile(r'domain\s+')
search_prog = re.compile(r'search\s+')
new_contents = []
found_domain = False
found_search = False
for item in contents:
domain_match = domain_prog.match(item)
search_match = search_prog.match(item)
if domain_match:
new_contents.append('domain {0}\n' . format(domainname))
found_domain = True
elif search_match:
new_contents.append('search {0}\n' . format(searchdomain))
found_search = True
else:
new_contents.append(item)
for item in _read_file(_DEB_RESOLV_FILE):
if domain_prog.match(item):
item = 'domain {0}'.format(domainname)
elif search_prog.match(item):
item = 'search {0}'.format(searchdomain)
new_contents.append(item)
# A domain line didn't exist so we'll add one in
# with the new domainname
if not found_domain:
new_contents.insert(0, 'domain {0}\n' . format(domainname))
if 'domain' not in resolve:
new_contents.insert(0, 'domain {0}' . format(domainname))
# A search line didn't exist so we'll add one in
# with the new search domain
if not found_search:
if new_contents[0].startswith('domain'):
new_contents.insert(1, 'search {0}\n' . format(searchdomain))
else:
new_contents.insert(0, 'search {0}\n' . format(searchdomain))
if 'search' not in resolve:
new_contents.insert('domain' in resolve, 'search {0}'.format(searchdomain))
new_resolv = ''.join(new_contents)
new_resolv = '\n'.join(new_contents)
# Write /etc/resolv.conf
if not ('test' in settings and settings['test']):

View file

@ -483,20 +483,6 @@ def _clear_context():
pass
def _pull_if_needed(image, client_timeout):
'''
Pull the desired image if not present, and return the image ID or name
'''
image_id = resolve_image_id(image)
if not image_id:
pull(image, client_timeout=client_timeout)
# Avoid another inspect and just use the passed image. docker-py
# will do the right thing and resolve the tag for us if we pass it
# a tagged image.
image_id = image
return image_id
def _get_md5(name, path):
'''
Get the MD5 checksum of a file from a container
@ -3115,8 +3101,8 @@ def create(image,
# Create a CentOS 7 container that will stay running once started
salt myminion docker.create centos:7 name=mycent7 interactive=True tty=True command=bash
'''
image_id = image if not kwargs.pop('inspect', True) \
else _pull_if_needed(image, client_timeout)
if kwargs.pop('inspect', True) and not resolve_image_id(image):
pull(image, client_timeout=client_timeout)
kwargs, unused_kwargs = _get_create_kwargs(
skip_translate=skip_translate,
@ -3138,7 +3124,7 @@ def create(image,
)
time_started = time.time()
response = _client_wrapper('create_container',
image_id,
image,
name=name,
**kwargs)
response['Time_Elapsed'] = time.time() - time_started
@ -3232,8 +3218,8 @@ def run_container(image,
# net1 using automatic IP, net2 using static IPv4 address
salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks='{"net1": {}, "net2": {"ipv4_address": "192.168.27.12"}}'
'''
image_id = image if not kwargs.pop('inspect', True) \
else _pull_if_needed(image, client_timeout)
if kwargs.pop('inspect', True) and not resolve_image_id(image):
pull(image, client_timeout=client_timeout)
removed_ids = None
if name is not None:
@ -3303,7 +3289,7 @@ def run_container(image,
time_started = time.time()
# Create the container
ret = _client_wrapper('create_container', image_id, name=name, **kwargs)
ret = _client_wrapper('create_container', image, name=name, **kwargs)
if removed_ids:
ret['Replaces'] = removed_ids

View file

@ -4982,7 +4982,7 @@ def get_diff(file1,
)
args = []
for idx, filename in enumerate(files):
for filename in files:
try:
with salt.utils.files.fopen(filename, 'rb') as fp_:
args.append(fp_.readlines())
@ -5011,7 +5011,8 @@ def get_diff(file1,
*salt.utils.data.decode(args)
)
)
return ret
return ret
return ''
def manage_file(name,

View file

@ -213,6 +213,14 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
To pass in jump options that doesn't take arguments, pass in an empty
string.
.. note::
Whereas iptables will accept ``-p``, ``--proto[c[o[l]]]`` as synonyms
of ``--protocol``, if ``--proto`` appears in an iptables command after
the appearance of ``-m policy``, it is interpreted as the ``--proto``
option of the policy extension (see the iptables-extensions(8) man
page).
CLI Examples:
.. code-block:: bash
@ -243,7 +251,6 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
salt '*' iptables.build_rule filter INPUT command=I position=3 \\
full=True match=state connstate=RELATED,ESTABLISHED jump=ACCEPT \\
family=ipv6
'''
if 'target' in kwargs:
kwargs['jump'] = kwargs.pop('target')
@ -257,7 +264,7 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
del kwargs[ignore]
rule = []
proto = False
protocol = False
bang_not_pat = re.compile(r'(!|not)\s?')
def maybe_add_negation(arg):
@ -281,12 +288,15 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
rule.append('{0}-o {1}'.format(maybe_add_negation('of'), kwargs['of']))
del kwargs['of']
for proto_arg in ('protocol', 'proto'):
if proto_arg in kwargs:
if not proto:
rule.append('{0}-p {1}'.format(maybe_add_negation(proto_arg), kwargs[proto_arg]))
proto = True
del kwargs[proto_arg]
if 'proto' in kwargs and kwargs.get('match') != 'policy':
kwargs['protocol'] = kwargs['proto']
del kwargs['proto']
# Handle the case 'proto' in kwargs and kwargs.get('match') == 'policy' below
if 'protocol' in kwargs:
if not protocol:
rule.append('{0}-p {1}'.format(maybe_add_negation('protocol'), kwargs['protocol']))
protocol = True
del kwargs['protocol']
if 'match' in kwargs:
match_value = kwargs['match']
@ -297,6 +307,9 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
if 'name_' in kwargs and match.strip() in ('pknock', 'quota2', 'recent'):
rule.append('--name {0}'.format(kwargs['name_']))
del kwargs['name_']
if 'proto' in kwargs and kwargs.get('match') == 'policy':
rule.append('{0}--proto {1}'.format(maybe_add_negation('proto'), kwargs['proto']))
del kwargs['proto']
del kwargs['match']
if 'match-set' in kwargs:
@ -330,8 +343,8 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
if multiport_arg in kwargs:
if '-m multiport' not in rule:
rule.append('-m multiport')
if not proto:
return 'Error: proto must be specified'
if not protocol:
return 'Error: protocol must be specified'
mp_value = kwargs[multiport_arg]
if isinstance(mp_value, list):
@ -1042,9 +1055,9 @@ def _parse_conf(conf_file=None, in_mem=False, family='ipv4'):
def _parser():
'''
This function contains _all_ the options I could find in man 8 iptables,
listed in the first section that I found them in. They will not all be used
by all parts of the module; use them intelligently and appropriately.
This function attempts to list all the options documented in the
iptables(8) and iptables-extensions(8) man pages. They will not all be
used by all parts of the module; use them intelligently and appropriately.
'''
add_arg = None
if sys.version.startswith('2.6'):

View file

@ -1,14 +1,15 @@
# -*- coding: utf-8 -*-
'''
r'''
Manage the Windows registry
-----
Hives
-----
Hives are the main sections of the registry and all begin with the word HKEY.
- HKEY_LOCAL_MACHINE
- HKEY_CURRENT_USER
- HKEY_USER
- HKEY_LOCAL_MACHINE
- HKEY_CURRENT_USER
- HKEY_USER
----
Keys
@ -19,10 +20,41 @@ can have a value assigned to them under the (Default)
-----------------
Values or Entries
-----------------
Values/Entries are name/data pairs. There can be many values in a key. The
(Default) value corresponds to the Key, the rest are their own value pairs.
:depends: - PyWin32
Values or Entries are the name/data pairs beneath the keys and subkeys. All keys
have a default name/data pair. The name is ``(Default)`` with a displayed value
of ``(value not set)``. The actual value is Null.
-------
Example
-------
The following example is an export from the Windows startup portion of the
registry:
.. code-block:: bash
[HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Run]
"RTHDVCPL"="\"C:\\Program Files\\Realtek\\Audio\\HDA\\RtkNGUI64.exe\" -s"
"NvBackend"="\"C:\\Program Files (x86)\\NVIDIA Corporation\\Update Core\\NvBackend.exe\""
"BTMTrayAgent"="rundll32.exe \"C:\\Program Files (x86)\\Intel\\Bluetooth\\btmshellex.dll\",TrayApp"
In this example these are the values for each:
Hive:
``HKEY_LOCAL_MACHINE``
Key and subkeys:
``SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run``
Value:
- There are 3 value names:
- `RTHDVCPL`
- `NvBackend`
- `BTMTrayAgent`
- Each value name has a corresponding value
:depends: - salt.utils.win_reg
'''
# When production windows installer is using Python 3, Python 2 code can be removed
from __future__ import absolute_import, print_function, unicode_literals
@ -42,7 +74,7 @@ __virtualname__ = 'reg'
def __virtual__():
'''
Only works on Windows systems with the PyWin32
Only works on Windows systems with PyWin32
'''
if not salt.utils.platform.is_windows():
return (False, 'reg execution module failed to load: '
@ -56,16 +88,26 @@ def __virtual__():
def key_exists(hive, key, use_32bit_registry=False):
'''
r'''
Check that the key is found in the registry. This refers to keys and not
value/data pairs.
:param str hive: The hive to connect to.
:param str key: The key to check
:param bool use_32bit_registry: Look in the 32bit portion of the registry
Args:
:return: Returns True if found, False if not found
:rtype: bool
hive (str): The hive to connect to
key (str): The key to check
use_32bit_registry (bool): Look in the 32bit portion of the registry
Returns:
bool: True if exists, otherwise False
CLI Example:
.. code-block:: bash
salt '*' reg.key_exists HKLM SOFTWARE\Microsoft
'''
return __utils__['reg.key_exists'](hive=hive,
key=key,
@ -76,13 +118,18 @@ def broadcast_change():
'''
Refresh the windows environment.
Returns (bool): True if successful, otherwise False
.. note::
This will only effect new processes and windows. Services will not see
the change until the system restarts.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
.. code-block:: bash
salt '*' reg.broadcast_change
salt '*' reg.broadcast_change
'''
return salt.utils.win_functions.broadcast_setting_change('Environment')
@ -91,28 +138,33 @@ def list_keys(hive, key=None, use_32bit_registry=False):
'''
Enumerates the subkeys in a registry key or hive.
:param str hive: The name of the hive. Can be one of the following
Args:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
hive (str):
The name of the hive. Can be one of the following:
:param str key: The key (looks like a path) to the value name. If a key is
not passed, the keys under the hive will be returned.
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
:param bool use_32bit_registry: Accesses the 32bit portion of the registry
on 64 bit installations. On 32bit machines this is ignored.
key (str):
The key (looks like a path) to the value name. If a key is not
passed, the keys under the hive will be returned.
:return: A list of keys/subkeys under the hive or key.
:rtype: list
use_32bit_registry (bool):
Accesses the 32bit portion of the registry on 64 bit installations.
On 32bit machines this is ignored.
Returns:
list: A list of keys/subkeys under the hive or key.
CLI Example:
.. code-block:: bash
.. code-block:: bash
salt '*' reg.list_keys HKLM 'SOFTWARE'
salt '*' reg.list_keys HKLM 'SOFTWARE'
'''
return __utils__['reg.list_keys'](hive=hive,
key=key,
@ -123,30 +175,36 @@ def list_values(hive, key=None, use_32bit_registry=False, include_default=True):
'''
Enumerates the values in a registry key or hive.
:param str hive: The name of the hive. Can be one of the following
Args:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
hive (str):
The name of the hive. Can be one of the following:
:param str key: The key (looks like a path) to the value name. If a key is
not passed, the values under the hive will be returned.
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
:param bool use_32bit_registry: Accesses the 32bit portion of the registry
on 64 bit installations. On 32bit machines this is ignored.
key (str):
The key (looks like a path) to the value name. If a key is not
passed, the values under the hive will be returned.
:param bool include_default: Toggle whether to include the '(Default)' value.
use_32bit_registry (bool):
Accesses the 32bit portion of the registry on 64 bit installations.
On 32bit machines this is ignored.
:return: A list of values under the hive or key.
:rtype: list
include_default (bool):
Toggle whether to include the '(Default)' value.
Returns:
list: A list of values under the hive or key.
CLI Example:
.. code-block:: bash
.. code-block:: bash
salt '*' reg.list_values HKLM 'SYSTEM\\CurrentControlSet\\Services\\Tcpip'
salt '*' reg.list_values HKLM 'SYSTEM\\CurrentControlSet\\Services\\Tcpip'
'''
return __utils__['reg.list_values'](hive=hive,
key=key,
@ -156,40 +214,58 @@ def list_values(hive, key=None, use_32bit_registry=False, include_default=True):
def read_value(hive, key, vname=None, use_32bit_registry=False):
r'''
Reads a registry value entry or the default value for a key.
Reads a registry value entry or the default value for a key. To read the
default value, don't pass ``vname``
:param str hive: The name of the hive. Can be one of the following
Args:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
hive (str): The name of the hive. Can be one of the following:
:param str key: The key (looks like a path) to the value name.
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
:param str vname: The value name. These are the individual name/data pairs
under the key. If not passed, the key (Default) value will be returned
key (str):
The key (looks like a path) to the value name.
:param bool use_32bit_registry: Accesses the 32bit portion of the registry
on 64bit installations. On 32bit machines this is ignored.
vname (str):
The value name. These are the individual name/data pairs under the
key. If not passed, the key (Default) value will be returned.
:return: A dictionary containing the passed settings as well as the
value_data if successful. If unsuccessful, sets success to False.
use_32bit_registry (bool):
Accesses the 32bit portion of the registry on 64bit installations.
On 32bit machines this is ignored.
:rtype: dict
Returns:
dict: A dictionary containing the passed settings as well as the
value_data if successful. If unsuccessful, sets success to False.
If vname is not passed:
bool: Returns False if the key is not found
- Returns the first unnamed value (Default) as a string.
- Returns none if first unnamed value is empty.
- Returns False if key not found.
If vname is not passed:
- Returns the first unnamed value (Default) as a string.
- Returns none if first unnamed value is empty.
CLI Example:
.. code-block:: bash
The following will get the value of the ``version`` value name in the
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` key
salt '*' reg.read_value HKEY_LOCAL_MACHINE 'SOFTWARE\Salt' 'version'
.. code-block:: bash
salt '*' reg.read_value HKEY_LOCAL_MACHINE 'SOFTWARE\Salt' 'version'
CLI Example:
The following will get the default value of the
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` key
.. code-block:: bash
salt '*' reg.read_value HKEY_LOCAL_MACHINE 'SOFTWARE\Salt'
'''
return __utils__['reg.read_value'](hive=hive,
key=key,
@ -205,98 +281,114 @@ def set_value(hive,
use_32bit_registry=False,
volatile=False):
'''
Sets a registry value entry or the default value for a key.
Sets a value in the registry. If ``vname`` is passed, it will be the value
for that value name, otherwise it will be the default value for the
specified key
:param str hive: The name of the hive. Can be one of the following
Args:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
hive (str):
The name of the hive. Can be one of the following
:param str key: The key (looks like a path) to the value name.
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
:param str vname: The value name. These are the individual name/data pairs
under the key. If not passed, the key (Default) value will be set.
key (str):
The key (looks like a path) to the value name.
:param object vdata: The value data to be set.
What the type of this parameter
should be is determined by the value of the vtype
parameter. The correspondence
is as follows:
vname (str):
The value name. These are the individual name/data pairs under the
key. If not passed, the key (Default) value will be set.
.. glossary::
vdata (str, int, list, bytes):
The value you'd like to set. If a value name (vname) is passed, this
will be the data for that value name. If not, this will be the
(Default) value for the key.
REG_BINARY
binary data (i.e. str in python version < 3 and bytes in version >=3)
REG_DWORD
int
REG_EXPAND_SZ
str
REG_MULTI_SZ
list of objects of type str
REG_SZ
str
The type of data this parameter expects is determined by the value
type specified in ``vtype``. The correspondence is as follows:
:param str vtype: The value type.
The possible values of the vtype parameter are indicated
above in the description of the vdata parameter.
- REG_BINARY: Binary data (str in Py2, bytes in Py3)
- REG_DWORD: int
- REG_EXPAND_SZ: str
- REG_MULTI_SZ: list of str
- REG_QWORD: int
- REG_SZ: str
:param bool use_32bit_registry: Sets the 32bit portion of the registry on
64bit installations. On 32bit machines this is ignored.
.. note::
When setting REG_BINARY, string data will be converted to
binary.
:param bool volatile: When this parameter has a value of True, the registry key will be
made volatile (i.e. it will not persist beyond a system reset or shutdown).
This parameter only has an effect when a key is being created and at no
other time.
.. note::
The type for the (Default) value is always REG_SZ and cannot be
changed.
:return: Returns True if successful, False if not
:rtype: bool
.. note::
This parameter is optional. If ``vdata`` is not passed, the Key
will be created with no associated item/value pairs.
vtype (str):
The value type. The possible values of the vtype parameter are
indicated above in the description of the vdata parameter.
use_32bit_registry (bool):
Sets the 32bit portion of the registry on 64bit installations. On
32bit machines this is ignored.
volatile (bool):
When this parameter has a value of True, the registry key will be
made volatile (i.e. it will not persist beyond a system reset or
shutdown). This parameter only has an effect when a key is being
created and at no other time.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
This will set the version value to 2015.5.2 in the SOFTWARE\\Salt key in
the HKEY_LOCAL_MACHINE hive
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2'
.. code-block:: bash
This function is strict about the type of vdata. For instance the
the next example will fail because vtype has a value of REG_SZ and vdata
has a type of int (as opposed to str as expected).
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2'
CLI Example:
.. code-block:: bash
This function is strict about the type of vdata. For instance this
example will fail because vtype has a value of REG_SZ and vdata has a
type of int (as opposed to str as expected).
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2' \\
vtype=REG_SZ vdata=0
.. code-block:: bash
However, this next example where vdata is properly quoted should succeed.
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'str_data' 1.2
CLI Example:
.. code-block:: bash
In this next example vdata is properly quoted and should succeed.
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2' \\
vtype=REG_SZ vdata="'0'"
.. code-block:: bash
An example of using vtype REG_BINARY is as follows:
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'str_data' vtype=REG_SZ vdata="'1.2'"
CLI Example:
.. code-block:: bash
This is an example of using vtype REG_BINARY.
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2' \\
vtype=REG_BINARY vdata='!!binary d2hhdCdzIHRoZSBwb2ludA=='
.. code-block:: bash
An example of using vtype REG_LIST is as follows:
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'bin_data' vtype=REG_BINARY vdata='Salty Data'
CLI Example:
.. code-block:: bash
An example of using vtype REG_MULTI_SZ is as follows:
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2' \\
vtype=REG_LIST vdata='[a,b,c]'
.. code-block:: bash
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'list_data' vtype=REG_MULTI_SZ vdata='["Salt", "is", "great"]'
'''
return __utils__['reg.set_value'](hive=hive,
key=key,
@ -311,33 +403,38 @@ def delete_key_recursive(hive, key, use_32bit_registry=False):
'''
.. versionadded:: 2015.5.4
Delete a registry key to include all subkeys.
Delete a registry key to include all subkeys and value/data pairs.
:param hive: The name of the hive. Can be one of the following
Args:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
hive (str):
The name of the hive. Can be one of the following
:param key: The key to remove (looks like a path)
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
:param bool use_32bit_registry: Deletes the 32bit portion of the registry on
64bit installations. On 32bit machines this is ignored.
key (str):
The key to remove (looks like a path)
:return: A dictionary listing the keys that deleted successfully as well as
those that failed to delete.
:rtype: dict
use_32bit_registry (bool):
Deletes the 32bit portion of the registry on 64bit
installations. On 32bit machines this is ignored.
The following example will remove ``salt`` and all its subkeys from the
``SOFTWARE`` key in ``HKEY_LOCAL_MACHINE``:
Returns:
dict: A dictionary listing the keys that deleted successfully as well as
those that failed to delete.
CLI Example:
.. code-block:: bash
The following example will remove ``delete_me`` and all its subkeys from the
``SOFTWARE`` key in ``HKEY_LOCAL_MACHINE``:
salt '*' reg.delete_key_recursive HKLM SOFTWARE\\salt
.. code-block:: bash
salt '*' reg.delete_key_recursive HKLM SOFTWARE\\delete_me
'''
return __utils__['reg.delete_key_recursive'](hive=hive,
key=key,
@ -348,30 +445,36 @@ def delete_value(hive, key, vname=None, use_32bit_registry=False):
'''
Delete a registry value entry or the default value for a key.
:param str hive: The name of the hive. Can be one of the following
Args:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
hive (str):
The name of the hive. Can be one of the following
:param str key: The key (looks like a path) to the value name.
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
:param str vname: The value name. These are the individual name/data pairs
under the key. If not passed, the key (Default) value will be deleted.
key (str):
The key (looks like a path) to the value name.
:param bool use_32bit_registry: Deletes the 32bit portion of the registry on
64bit installations. On 32bit machines this is ignored.
vname (str):
The value name. These are the individual name/data pairs under the
key. If not passed, the key (Default) value will be deleted.
:return: Returns True if successful, False if not
:rtype: bool
use_32bit_registry (bool):
Deletes the 32bit portion of the registry on 64bit installations. On
32bit machines this is ignored.
Return:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
.. code-block:: bash
salt '*' reg.delete_value HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version'
salt '*' reg.delete_value HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version'
'''
return __utils__['reg.delete_value'](hive=hive,
key=key,
@ -385,32 +488,30 @@ def import_file(source, use_32bit_registry=False):
.. versionadded:: 2018.3.0
Usage:
Args:
source (str):
The full path of the ``REG`` file. This can be either a local file
path or a URL type supported by salt (e.g. ``salt://salt_master_path``)
use_32bit_registry (bool):
If the value of this parameter is ``True`` then the ``REG`` file
will be imported into the Windows 32 bit registry. Otherwise the
Windows 64 bit registry will be used.
Returns:
bool: True if successful, otherwise an error is raised
Raises:
ValueError: If the value of ``source`` is an invalid path or otherwise
causes ``cp.cache_file`` to return ``False``
CommandExecutionError: If ``reg.exe`` exits with a non-0 exit code
CLI Example:
.. code-block:: bash
.. code-block:: bash
salt machine1 reg.import_file salt://win/printer_config/110_Canon/postinstall_config.reg
:param str source: The full path of the ``REG`` file. This
can be either a local file path or a URL type supported by salt
(e.g. ``salt://salt_master_path``).
:param bool use_32bit_registry: If the value of this parameter is ``True``
then the ``REG`` file will be imported into the Windows 32 bit registry.
Otherwise the Windows 64 bit registry will be used.
:return: If the value of ``source`` is an invalid path or otherwise
causes ``cp.cache_file`` to return ``False`` then
the function will not return and
a ``ValueError`` exception will be raised.
If ``reg.exe`` exits with a non-0 exit code, then
a ``CommandExecutionError`` exception will be
raised. On success this function will return
``True``.
:rtype: bool
salt machine1 reg.import_file salt://win/printer_config/110_Canon/postinstall_config.reg
'''
cache_path = __salt__['cp.cache_file'](source)

View file

@ -17,8 +17,11 @@ import os
import re
import subprocess
import sys
import time
# Import salt libs
import salt.exceptions
import salt.utils.args
import salt.utils.files
import salt.utils.path
@ -32,6 +35,51 @@ try:
except ImportError:
pass
LIST_DIRS = [
# We don't care about log files
'^/var/log/',
'^/var/local/log/',
# Or about files under temporary locations
'^/var/run/',
'^/var/local/run/',
# Or about files under /tmp
'^/tmp/',
# Or about files under /dev/shm
'^/dev/shm/',
# Or about files under /run
'^/run/',
# Or about files under /drm
'^/drm',
# Or about files under /var/tmp and /var/local/tmp
'^/var/tmp/',
'^/var/local/tmp/',
# Or /dev/zero
'^/dev/zero',
# Or /dev/pts (used by gpm)
'^/dev/pts/',
# Or /usr/lib/locale
'^/usr/lib/locale/',
# Skip files from the user's home directories
# many processes hold temporafy files there
'^/home/',
# Skip automatically generated files
'^.*icon-theme.cache',
# Skip font files
'^/var/cache/fontconfig/',
# Skip Nagios Spool
'^/var/lib/nagios3/spool/',
# Skip nagios spool files
'^/var/lib/nagios3/spool/checkresults/',
# Skip Postgresql files
'^/var/lib/postgresql/',
# Skip VDR lib files
'^/var/lib/vdr/',
# Skip Aio files found in MySQL servers
'^/[aio]',
# ignore files under /SYSV
'^/SYSV'
]
def __virtual__():
'''
@ -55,63 +103,9 @@ def _valid_deleted_file(path):
ret = True
if re.compile(r"\(path inode=[0-9]+\)$").search(path):
ret = True
# We don't care about log files
if path.startswith('/var/log/') or path.startswith('/var/local/log/'):
ret = False
# Or about files under temporary locations
if path.startswith('/var/run/') or path.startswith('/var/local/run/'):
ret = False
# Or about files under /tmp
if path.startswith('/tmp/'):
ret = False
# Or about files under /dev/shm
if path.startswith('/dev/shm/'):
ret = False
# Or about files under /run
if path.startswith('/run/'):
ret = False
# Or about files under /drm
if path.startswith('/drm'):
ret = False
# Or about files under /var/tmp and /var/local/tmp
if path.startswith('/var/tmp/') or path.startswith('/var/local/tmp/'):
ret = False
# Or /dev/zero
if path.startswith('/dev/zero'):
ret = False
# Or /dev/pts (used by gpm)
if path.startswith('/dev/pts/'):
ret = False
# Or /usr/lib/locale
if path.startswith('/usr/lib/locale/'):
ret = False
# Skip files from the user's home directories
# many processes hold temporafy files there
if path.startswith('/home/'):
ret = False
# Skip automatically generated files
if path.endswith('icon-theme.cache'):
ret = False
# Skip font files
if path.startswith('/var/cache/fontconfig/'):
ret = False
# Skip Nagios Spool
if path.startswith('/var/lib/nagios3/spool/'):
ret = False
# Skip nagios spool files
if path.startswith('/var/lib/nagios3/spool/checkresults/'):
ret = False
# Skip Postgresql files
if path.startswith('/var/lib/postgresql/'):
ret = False
# Skip VDR lib files
if path.startswith('/var/lib/vdr/'):
ret = False
# Skip Aio files found in MySQL servers
if path.startswith('/[aio]'):
ret = False
# ignore files under /SYSV
if path.startswith('/SYSV'):
regex = re.compile("|".join(LIST_DIRS))
if regex.match(path):
ret = False
return ret
@ -126,32 +120,36 @@ def _deleted_files():
'''
deleted_files = []
for proc in psutil.process_iter():
for proc in psutil.process_iter(): # pylint: disable=too-many-nested-blocks
try:
pinfo = proc.as_dict(attrs=['pid', 'name'])
try:
maps = salt.utils.files.fopen('/proc/{0}/maps'.format(pinfo['pid'])) # pylint: disable=resource-leakage
dirpath = '/proc/' + six.text_type(pinfo['pid']) + '/fd/'
listdir = os.listdir(dirpath)
with salt.utils.files.fopen('/proc/{0}/maps'.format(pinfo['pid'])) as maps: # pylint: disable=resource-leakage
dirpath = '/proc/' + six.text_type(pinfo['pid']) + '/fd/'
listdir = os.listdir(dirpath)
maplines = maps.readlines()
except (OSError, IOError):
return False
yield False
# /proc/PID/maps
maplines = maps.readlines()
maps.close()
mapline = re.compile(r'^[\da-f]+-[\da-f]+ [r-][w-][x-][sp-] '
r'[\da-f]+ [\da-f]{2}:[\da-f]{2} (\d+) *(.+)( \(deleted\))?\n$')
for line in maplines:
line = salt.utils.stringutils.to_unicode(line)
matched = mapline.match(line)
if matched:
path = matched.group(2)
if path:
if _valid_deleted_file(path):
val = (pinfo['name'], pinfo['pid'], path[0:-10])
if val not in deleted_files:
deleted_files.append(val)
if not matched:
continue
path = matched.group(2)
if not path:
continue
valid = _valid_deleted_file(path)
if not valid:
continue
val = (pinfo['name'], pinfo['pid'], path[0:-10])
if val not in deleted_files:
deleted_files.append(val)
yield val
# /proc/PID/fd
try:
@ -168,18 +166,19 @@ def _deleted_files():
filenames.append(os.path.join(root, name))
for filename in filenames:
if _valid_deleted_file(filename):
val = (pinfo['name'], pinfo['pid'], filename)
if val not in deleted_files:
deleted_files.append(val)
valid = _valid_deleted_file(filename)
if not valid:
continue
val = (pinfo['name'], pinfo['pid'], filename)
if val not in deleted_files:
deleted_files.append(val)
yield val
except OSError:
pass
except psutil.NoSuchProcess:
pass
return deleted_files
def _format_output(kernel_restart, packages, verbose, restartable, nonrestartable, restartservicecommands,
restartinitcommands):
@ -213,7 +212,7 @@ def _format_output(kernel_restart, packages, verbose, restartable, nonrestartabl
ret += "Found {0} processes using old versions of upgraded files.\n".format(len(packages))
ret += "These are the packages:\n"
if len(restartable) > 0:
if restartable:
ret += "Of these, {0} seem to contain systemd service definitions or init scripts " \
"which can be used to restart them:\n".format(len(restartable))
for package in restartable:
@ -221,15 +220,15 @@ def _format_output(kernel_restart, packages, verbose, restartable, nonrestartabl
for program in packages[package]['processes']:
ret += program + '\n'
if len(restartservicecommands) > 0:
if restartservicecommands:
ret += "\n\nThese are the systemd services:\n"
ret += '\n'.join(restartservicecommands)
if len(restartinitcommands) > 0:
if restartinitcommands:
ret += "\n\nThese are the initd scripts:\n"
ret += '\n'.join(restartinitcommands)
if len(nonrestartable) > 0:
if nonrestartable:
ret += "\n\nThese processes {0} do not seem to have an associated init script " \
"to restart them:\n".format(len(nonrestartable))
for package in nonrestartable:
@ -315,7 +314,20 @@ def _kernel_versions_nilrt():
return kernel_versions
def restartcheck(ignorelist=None, blacklist=None, excludepid=None, verbose=True):
def _check_timeout(start_time, timeout):
'''
Name of the last installed kernel, for Red Hat based systems.
Returns:
List with name of last installed kernel as it is interpreted in output of `uname -a` command.
'''
timeout_milisec = timeout * 60000
if timeout_milisec < (int(round(time.time() * 1000)) - start_time):
raise salt.exceptions.TimeoutError('Timeout expired.')
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
def restartcheck(ignorelist=None, blacklist=None, excludepid=None, **kwargs):
'''
Analyzes files openeded by running processes and seeks for packages which need to be restarted.
@ -324,6 +336,7 @@ def restartcheck(ignorelist=None, blacklist=None, excludepid=None, verbose=True)
blacklist: string or list of file paths to be ignored
excludepid: string or list of process IDs to be ignored
verbose: boolean, enables extensive output
timeout: int, timeout in minute
Returns:
True if no packages for restart found.
@ -337,7 +350,11 @@ def restartcheck(ignorelist=None, blacklist=None, excludepid=None, verbose=True)
salt '*' restartcheck.restartcheck
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
start_time = int(round(time.time() * 1000))
kernel_restart = True
verbose = kwargs.pop('verbose', True)
timeout = kwargs.pop('timeout', 5)
if __grains__.get('os_family') == 'Debian':
cmd_pkg_query = 'dpkg-query --listfiles '
systemd_folder = '/lib/systemd/system/'
@ -358,11 +375,14 @@ def restartcheck(ignorelist=None, blacklist=None, excludepid=None, verbose=True)
# Check kernel versions
kernel_current = __salt__['cmd.run']('uname -a')
for kernel in kernel_versions:
_check_timeout(start_time, timeout)
if kernel in kernel_current:
kernel_restart = False
break
packages = {}
running_services = {}
restart_services = []
if ignorelist:
if not isinstance(ignorelist, list):
@ -382,15 +402,19 @@ def restartcheck(ignorelist=None, blacklist=None, excludepid=None, verbose=True)
else:
excludepid = []
deleted_files = _deleted_files()
if not isinstance(deleted_files, list):
return {'result': False, 'comment': 'Could not get list of processes. '
'(Do you have root access?)'}
for service in __salt__['service.get_running']():
_check_timeout(start_time, timeout)
service_show = __salt__['service.show'](service)
if 'ExecMainPID' in service_show:
running_services[service] = int(service_show['ExecMainPID'])
owners_cache = {}
for deleted_file in _deleted_files():
if deleted_file is False:
return {'result': False, 'comment': 'Could not get list of processes.'
' (Do you have root access?)'}
for deleted_file in deleted_files:
_check_timeout(start_time, timeout)
name, pid, path = deleted_file[0], deleted_file[1], deleted_file[2]
if path in blacklist or pid in excludepid:
continue
@ -406,6 +430,12 @@ def restartcheck(ignorelist=None, blacklist=None, excludepid=None, verbose=True)
if not packagename:
packagename = name
owners_cache[readlink] = packagename
for running_service in running_services:
_check_timeout(start_time, timeout)
if running_service not in restart_services and pid == running_services[running_service]:
if packagename and packagename not in ignorelist:
restart_services.append(running_service)
name = running_service
if packagename and packagename not in ignorelist:
program = '\t' + six.text_type(pid) + ' ' + readlink + ' (file: ' + six.text_type(path) + ')'
if packagename not in packages:
@ -415,14 +445,16 @@ def restartcheck(ignorelist=None, blacklist=None, excludepid=None, verbose=True)
if program not in packages[packagename]['processes']:
packages[packagename]['processes'].append(program)
if len(packages) == 0 and not kernel_restart:
if not packages and not kernel_restart:
return 'No packages seem to need to be restarted.'
for package in packages:
_check_timeout(start_time, timeout)
cmd = cmd_pkg_query + package
paths = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
while True:
_check_timeout(start_time, timeout)
line = paths.stdout.readline()
if not line:
break
@ -455,7 +487,8 @@ def restartcheck(ignorelist=None, blacklist=None, excludepid=None, verbose=True)
# Alternatively, find init.d script or service that match the process name
for package in packages:
if len(packages[package]['systemdservice']) == 0 and len(packages[package]['initscripts']) == 0:
_check_timeout(start_time, timeout)
if not packages[package]['systemdservice'] and not packages[package]['initscripts']:
service = __salt__['service.available'](packages[package]['process_name'])
if service:
@ -470,14 +503,21 @@ def restartcheck(ignorelist=None, blacklist=None, excludepid=None, verbose=True)
restartservicecommands = []
for package in packages:
if len(packages[package]['initscripts']) > 0:
_check_timeout(start_time, timeout)
if packages[package]['initscripts']:
restartable.append(package)
restartinitcommands.extend(['service ' + s + ' restart' for s in packages[package]['initscripts']])
elif len(packages[package]['systemdservice']) > 0:
elif packages[package]['systemdservice']:
restartable.append(package)
restartservicecommands.extend(['systemctl restart ' + s for s in packages[package]['systemdservice']])
else:
nonrestartable.append(package)
if packages[package]['process_name'] in restart_services:
restart_services.remove(packages[package]['process_name'])
for restart_service in restart_services:
_check_timeout(start_time, timeout)
restartservicecommands.extend(['systemctl restart ' + restart_service])
ret = _format_output(kernel_restart, packages, verbose, restartable, nonrestartable,
restartservicecommands, restartinitcommands)

View file

@ -50,6 +50,10 @@ def __virtual__():
Only work on select distros which still use Red Hat's /usr/bin/service for
management of either sysvinit or a hybrid sysvinit/upstart init system.
'''
# Disable when booted with systemd
if __utils__['systemd.booted'](__context__):
return (False, 'The rh_service execution module failed to load: this system was booted with systemd.')
# Enable on these platforms only.
enable = set((
'XenServer',
@ -99,15 +103,6 @@ def __virtual__():
'RedHat-based distros >= version 7 use systemd, will not '
'load rh_service.py as virtual \'service\''
)
if __grains__['os'] == 'Amazon':
if int(osrelease_major) in (2016, 2017):
return __virtualname__
else:
return (
False,
'Amazon Linux >= version 2 uses systemd. Will not '
'load rh_service.py as virtual \'service\''
)
return __virtualname__
return (False, 'Cannot load rh_service module: OS not in {0}'.format(enable))

View file

@ -87,7 +87,7 @@ def _get_username(member):
str: The username converted to domain\\username format
'''
return member.ADSPath.replace('WinNT://', '').replace(
'/', '\\').encode('ascii', 'backslashreplace')
'/', '\\')
def add(name, **kwargs):

View file

@ -429,6 +429,103 @@ class _policy_info(object):
None: 'Not Defined',
'(value not set)': 'Not Defined',
}
self.force_key_protection = {
0: 'User input is not required when new keys are stored and used',
1: 'User is prompted when the key is first used',
2: 'User must enter a password each time they use a key',
None: 'Not Defined',
'(value not set)': 'Not Defined'
}
self.krb_encryption_types = {
0: 'No minimum',
1: 'DES_CBC_CRC',
2: 'DES_CBD_MD5',
4: 'RC4_MHAC_MD5',
8: 'AES128_HMAC_SHA1',
16: 'AES256_HMAC_SHA1',
2147483616: 'Future Encryption Types',
None: 'Not Defined',
'(value not set)': 'Not Defined',
}
self.lm_compat_levels = {
0: 'Send LM & NTLM response',
1: 'Send LM & NTLM - use NTLMv2 session security if negotiated',
2: 'Send NTLM response only',
3: 'Send NTLMv2 response only',
4: 'Send NTLMv2 response only. Refuse LM',
5: 'Send NTLMv2 response only. Refuse LM & NTLM',
None: 'Not Defined',
'(value not set)': 'Not Defined',
}
self.ldap_signing_reqs = {
0: 'None',
1: 'Negotiate signing',
2: 'Require signing',
None: 'Not Defined',
'(value not set)': 'Not Defined',
}
self.ntlm_session_security_levels = {
0: 'No minimum',
524288: 'Require NTLMv2 session security',
536870912: 'Require 128-bit encryption',
None: 'Not Defined',
'(value not set)': 'Not Defined',
}
self.ntlm_audit_settings = {
0: 'Disable',
1: 'Enable auditing for domain accounts',
2: 'Enable auditing for all accounts',
None: 'Not Defined',
'(value not set)': 'Not Defined'
}
self.ntlm_domain_audit_settings = {
0: 'Disable',
1: 'Enable for domain accounts to domain servers',
3: 'Enable for domain accounts',
5: 'Enable for domain servers',
7: 'Enable all',
None: 'Not Defined',
'(value not set)': 'Not Defined'
}
self.incoming_ntlm_settings = {
0: 'Allow all',
1: 'Deny all domain accounts',
2: 'Deny all accounts',
None: 'Not Defined',
'(value not set)': 'Not Defined'
}
self.ntlm_domain_auth_settings = {
0: 'Disable',
1: 'Deny for domain accounts to domain servers',
3: 'Deny for domain accounts',
5: 'Deny for domain servers',
7: 'Deny all',
None: 'Not Defined',
'(value not set)': 'Not Defined'
}
self.outgoing_ntlm_settings = {
0: 'Allow all',
1: 'Audit all',
2: 'Deny all',
None: 'Not Defined',
'(value not set)': 'Not Defined'
}
self.enabled_one_disabled_zero_no_not_defined = {
0: 'Disabled',
1: 'Enabled',
}
self.enabled_one_disabled_zero_no_not_defined_transform = {
'Get': '_dict_lookup',
'Put': '_dict_lookup',
'GetArgs': {
'lookup': self.enabled_one_disabled_zero_no_not_defined,
'value_lookup': False,
},
'PutArgs': {
'lookup': self.enabled_one_disabled_zero_no_not_defined,
'value_lookup': True,
},
}
self.policies = {
'Machine': {
'lgpo_section': 'Computer Configuration',
@ -547,12 +644,12 @@ class _policy_info(object):
'Policy': 'Network access: Allow anonymous SID/Name '
'translation',
'lgpo_section': self.password_policy_gpedit_path,
'Settings': self.enabled_one_disabled_zero.keys(),
'Settings': self.enabled_one_disabled_zero_no_not_defined.keys(),
'Secedit': {
'Option': 'LSAAnonymousNameLookup',
'Section': 'System Access',
},
'Transform': self.enabled_one_disabled_zero_transform,
'Transform': self.enabled_one_disabled_zero_no_not_defined_transform,
},
'RestrictAnonymousSam': {
'Policy': 'Network access: Do not allow anonymous '
@ -618,6 +715,9 @@ class _policy_info(object):
'Value': 'NullSessionPipes',
'Type': 'REG_MULTI_SZ'
},
'Transform': {
'Put': '_multi_string_put_transform'
}
},
'RemoteRegistryExactPaths': {
'Policy': 'Network access: Remotely accessible '
@ -631,6 +731,9 @@ class _policy_info(object):
'Value': 'Machine',
'Type': 'REG_MULTI_SZ'
},
'Transform': {
'Put': '_multi_string_put_transform'
}
},
'RemoteRegistryPaths': {
'Policy': 'Network access: Remotely accessible '
@ -643,6 +746,9 @@ class _policy_info(object):
'Value': 'Machine',
'Type': 'REG_MULTI_SZ'
},
'Transform': {
'Put': '_multi_string_put_transform'
}
},
'RestrictNullSessAccess': {
'Policy': 'Network access: Restrict anonymous access '
@ -669,6 +775,9 @@ class _policy_info(object):
'Value': 'NullSessionShares',
'Type': 'REG_MULTI_SZ'
},
'Transform': {
'Put': '_multi_string_put_transform'
}
},
'ForceGuest': {
'Policy': 'Network access: Sharing and security model '
@ -757,32 +866,32 @@ class _policy_info(object):
'PasswordComplexity': {
'Policy': 'Password must meet complexity requirements',
'lgpo_section': self.password_policy_gpedit_path,
'Settings': self.enabled_one_disabled_zero.keys(),
'Settings': self.enabled_one_disabled_zero_no_not_defined.keys(),
'Secedit': {
'Option': 'PasswordComplexity',
'Section': 'System Access',
},
'Transform': self.enabled_one_disabled_zero_transform,
'Transform': self.enabled_one_disabled_zero_no_not_defined_transform,
},
'ClearTextPasswords': {
'Policy': 'Store passwords using reversible encryption',
'lgpo_section': self.password_policy_gpedit_path,
'Settings': self.enabled_one_disabled_zero.keys(),
'Settings': self.enabled_one_disabled_zero_no_not_defined.keys(),
'Secedit': {
'Option': 'ClearTextPassword',
'Section': 'System Access',
},
'Transform': self.enabled_one_disabled_zero_transform,
'Transform': self.enabled_one_disabled_zero_no_not_defined_transform,
},
'AdminAccountStatus': {
'Policy': 'Accounts: Administrator account status',
'Settings': self.enabled_one_disabled_zero.keys(),
'Settings': self.enabled_one_disabled_zero_no_not_defined.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Secedit': {
'Option': 'EnableAdminAccount',
'Section': 'System Access',
},
'Transform': self.enabled_one_disabled_zero_transform,
'Transform': self.enabled_one_disabled_zero_no_not_defined_transform,
},
'NoConnectedUser': {
'Policy': 'Accounts: Block Microsoft accounts',
@ -810,13 +919,13 @@ class _policy_info(object):
},
'GuestAccountStatus': {
'Policy': 'Accounts: Guest account status',
'Settings': self.enabled_one_disabled_zero.keys(),
'Settings': self.enabled_one_disabled_zero_no_not_defined.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Secedit': {
'Option': 'EnableGuestAccount',
'Section': 'System Access',
},
'Transform': self.enabled_one_disabled_zero_transform,
'Transform': self.enabled_one_disabled_zero_no_not_defined_transform,
},
'LimitBlankPasswordUse': {
'Policy': 'Accounts: Limit local account use of blank '
@ -1193,6 +1302,9 @@ class _policy_info(object):
'Value': 'legalnoticetext',
'Type': 'REG_SZ',
},
'Transform': {
'Put': '_string_put_transform'
}
},
'legalnoticecaption': {
'Policy': 'Interactive logon: Message title for users '
@ -1205,6 +1317,9 @@ class _policy_info(object):
'Value': 'legalnoticecaption',
'Type': 'REG_SZ',
},
'Transform': {
'Put': '_string_put_transform'
}
},
'DontDisplayLockedUserId': {
'Policy': 'Interactive logon: Display user information '
@ -2291,7 +2406,7 @@ class _policy_info(object):
},
},
'SeTakeOwnershipPrivilege': {
'Policy': 'Take ownership of files and other objects',
'Policy': 'Take ownership of files or other objects',
'lgpo_section': self.user_rights_assignment_gpedit_path,
'Settings': None,
'LsaRights': {
@ -2330,6 +2445,494 @@ class _policy_info(object):
},
'Transform': self.enabled_one_disabled_zero_transform,
},
'ForceKeyProtection': {
'Policy': 'System Cryptography: Force strong key protection for '
'user keys stored on the computer',
'Settings': self.force_key_protection.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'Software\\Policies\\Microsoft\\Cryptography',
'Value': 'ForceKeyProtection',
'Type': 'REG_DWORD',
},
'Transform': {
'Get': '_dict_lookup',
'Put': '_dict_lookup',
'GetArgs': {
'lookup': self.force_key_protection,
'value_lookup': False,
},
'PutArgs': {
'lookup': self.force_key_protection,
'value_lookup': True,
},
},
},
'FIPSAlgorithmPolicy': {
'Policy': 'System Cryptography: Use FIPS compliant algorithms '
'for encryption, hashing, and signing',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'System\\CurrentControlSet\\Control\\Lsa\\FIPSAlgorithmPolicy',
'Value': 'Enabled',
'Type': 'REG_DWORD',
},
'Transform': self.enabled_one_disabled_zero_transform,
},
'MachineAccessRestriction': {
'Policy': 'DCOM: Machine Access Restrictions in Security Descriptor '
'Definition Language (SDDL) syntax',
'Settings': None,
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'Software\\Policies\\Microsoft\\Windows NT\\DCOM',
'Value': 'MachineAccessRestriction',
'Type': 'REG_SZ',
},
'Transform': {
'Put': '_string_put_transform'
}
},
'MachineLaunchRestriction': {
'Policy': 'DCOM: Machine Launch Restrictions in Security Descriptor '
'Definition Language (SDDL) syntax',
'Settings': None,
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'Software\\Policies\\Microsoft\\Windows NT\\DCOM',
'Value': 'MachineLaunchRestriction',
'Type': 'REG_SZ',
},
'Transform': {
'Put': '_string_put_transform'
}
},
'AddPrinterDrivers': {
'Policy': 'Devices: Prevent users from installing printer drivers',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'SYSTEM\\CurrentControlSet\\Control\\Print\\'
'Providers\\LanMan Print Services\\Servers',
'Value': 'AddPrinterDrivers',
'Type': 'REG_DWORD',
},
'Transform': self.enabled_one_disabled_zero_transform,
},
'UseMachineId': {
'Policy': 'Network security: Allow Local System to use computer '
'identity for NTLM',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'SYSTEM\\CurrentControlSet\\Control\\Lsa',
'Value': 'UseMachineId',
'Type': 'REG_DWORD',
},
'Transform': self.enabled_one_disabled_zero_transform,
},
'allownullsessionfallback': {
'Policy': 'Network security: Allow LocalSystem NULL session fallback',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'SYSTEM\\CurrentControlSet\\Control\\Lsa\\MSV1_0',
'Value': 'allownullsessionfallback',
'Type': 'REG_DWORD',
},
'Transform': self.enabled_one_disabled_zero_transform,
},
'AllowOnlineID': {
'Policy': 'Network security: Allow PKU2U authentication requests '
'to this computer to use online identities.',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'SYSTEM\\CurrentControlSet\\Control\\Lsa\\pku2u',
'Value': 'AllowOnlineID',
'Type': 'REG_DWORD',
},
'Transform': self.enabled_one_disabled_zero_transform,
},
'KrbSupportedEncryptionTypes': {
'Policy': 'Network security: Configure encryption types allowed '
'for Kerberos',
'Settings': None,
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\policies'
'\\system\\Kerberos\\Parameters',
'Value': 'SupportedEncryptionTypes',
'Type': 'REG_DWORD',
},
'Transform': {
'Get': '_dict_lookup_bitwise_add',
'Put': '_dict_lookup_bitwise_add',
'GetArgs': {
'lookup': self.krb_encryption_types,
'value_lookup': False,
},
'PutArgs': {
'lookup': self.krb_encryption_types,
'value_lookup': True,
},
},
},
'NoLMHash': {
'Policy': 'Network security: Do not store LAN Manager hash value '
'on next password change',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'SYSTEM\\CurrentControlSet\\Control\\Lsa',
'Value': 'NoLMHash',
'Type': 'REG_DWORD',
},
'Transform': self.enabled_one_disabled_zero_transform,
},
'ForceLogoffWhenHourExpire': {
'Policy': 'Network security: Force logoff when logon hours expire',
'lgpo_section': self.security_options_gpedit_path,
'Settings': self.enabled_one_disabled_zero_no_not_defined.keys(),
'Secedit': {
'Option': 'ForceLogoffWhenHourExpire',
'Section': 'System Access',
},
'Transform': self.enabled_one_disabled_zero_no_not_defined_transform,
},
'LmCompatibilityLevel': {
'Policy': 'Network security: LAN Manager authentication level',
'Settings': self.lm_compat_levels.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'SYSTEM\\CurrentControlSet\\Control\\Lsa',
'Value': 'LmCompatibilityLevel',
'Type': 'REG_DWORD',
},
'Transform': {
'Get': '_dict_lookup',
'Put': '_dict_lookup',
'GetArgs': {
'lookup': self.lm_compat_levels,
'value_lookup': False,
},
'PutArgs': {
'lookup': self.lm_compat_levels,
'value_lookup': True,
},
},
},
'LDAPClientIntegrity': {
'Policy': 'Network security: LDAP client signing requirements',
'Settings': self.ldap_signing_reqs.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'SYSTEM\\CurrentControlSet\\Services\\ldap',
'Value': 'LDAPClientIntegrity',
'Type': 'REG_DWORD',
},
'Transform': {
'Get': '_dict_lookup',
'Put': '_dict_lookup',
'GetArgs': {
'lookup': self.ldap_signing_reqs,
'value_lookup': False,
},
'PutArgs': {
'lookup': self.ldap_signing_reqs,
'value_lookup': True,
},
},
},
'NTLMMinClientSec': {
'Policy': 'Network security: Minimum session security for NTLM SSP based '
'(including secure RPC) clients',
'Settings': None,
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'System\\CurrentControlSet\\Control\\Lsa\\MSV1_0',
'Value': 'NTLMMinClientSec',
'Type': 'REG_DWORD',
},
'Transform': {
'Get': '_dict_lookup_bitwise_add',
'Put': '_dict_lookup_bitwise_add',
'GetArgs': {
'lookup': self.ntlm_session_security_levels,
'value_lookup': False,
},
'PutArgs': {
'lookup': self.ntlm_session_security_levels,
'value_lookup': True,
},
},
},
'NTLMMinServerSec': {
'Policy': 'Network security: Minimum session security for NTLM SSP based '
'(including secure RPC) servers',
'Settings': None,
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'System\\CurrentControlSet\\Control\\Lsa\\MSV1_0',
'Value': 'NTLMMinServerSec',
'Type': 'REG_DWORD',
},
'Transform': {
'Get': '_dict_lookup_bitwise_add',
'Put': '_dict_lookup_bitwise_add',
'GetArgs': {
'lookup': self.ntlm_session_security_levels,
'value_lookup': False,
},
'PutArgs': {
'lookup': self.ntlm_session_security_levels,
'value_lookup': True,
},
},
},
'ClientAllowedNTLMServers': {
'Policy': 'Network security: Restrict NTLM: Add remote server'
' exceptions for NTLM authentication',
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'System\\CurrentControlSet\\Control\\Lsa\\MSV1_0',
'Value': 'ClientAllowedNTLMServers',
'Type': 'REG_MULTI_SZ'
},
'Transform': {
'Put': '_multi_string_put_transform'
}
},
'DCAllowedNTLMServers': {
'Policy': 'Network security: Restrict NTLM: Add server exceptions'
' in this domain',
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'System\\CurrentControlSet\\Services\\Netlogon\\Parameters',
'Value': 'DCAllowedNTLMServers',
'Type': 'REG_MULTI_SZ'
},
'Transform': {
'Put': '_multi_string_put_transform'
}
},
'AuditReceivingNTLMTraffic': {
'Policy': 'Network security: Restrict NTLM: Audit Incoming NTLM Traffic',
'Settings': self.ntlm_audit_settings.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'SYSTEM\\CurrentControlSet\\Control\\LSA\\MSV1_0',
'Value': 'AuditReceivingNTLMTraffic',
'Type': 'REG_DWORD',
},
'Transform': {
'Get': '_dict_lookup',
'Put': '_dict_lookup',
'GetArgs': {
'lookup': self.ntlm_audit_settings,
'value_lookup': False,
},
'PutArgs': {
'lookup': self.ntlm_audit_settings,
'value_lookup': True,
},
},
},
'AuditNTLMInDomain': {
'Policy': 'Network security: Restrict NTLM: Audit NTLM '
'authentication in this domain',
'Settings': self.ntlm_domain_audit_settings.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'SYSTEM\\CurrentControlSet\\Services\\Netlogon\\Parameters',
'Value': 'AuditNTLMInDomain',
'Type': 'REG_DWORD',
},
'Transform': {
'Get': '_dict_lookup',
'Put': '_dict_lookup',
'GetArgs': {
'lookup': self.ntlm_domain_audit_settings,
'value_lookup': False,
},
'PutArgs': {
'lookup': self.ntlm_domain_audit_settings,
'value_lookup': True,
},
},
},
'RestrictReceivingNTLMTraffic': {
'Policy': 'Network security: Restrict NTLM: Incoming'
' NTLM traffic',
'Settings': self.incoming_ntlm_settings.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'SYSTEM\\CurrentControlSet\\Control\\LSA\\MSV1_0',
'Value': 'RestrictReceivingNTLMTraffic',
'Type': 'REG_DWORD',
},
'Transform': {
'Get': '_dict_lookup',
'Put': '_dict_lookup',
'GetArgs': {
'lookup': self.incoming_ntlm_settings,
'value_lookup': False,
},
'PutArgs': {
'lookup': self.incoming_ntlm_settings,
'value_lookup': True,
},
},
},
'RestrictNTLMInDomain': {
'Policy': 'Network security: Restrict NTLM: NTLM '
'authentication in this domain',
'Settings': self.ntlm_domain_auth_settings.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'SYSTEM\\CurrentControlSet\\Services\\Netlogon\\Parameters',
'Value': 'RestrictNTLMInDomain',
'Type': 'REG_DWORD',
},
'Transform': {
'Get': '_dict_lookup',
'Put': '_dict_lookup',
'GetArgs': {
'lookup': self.ntlm_domain_auth_settings,
'value_lookup': False,
},
'PutArgs': {
'lookup': self.ntlm_domain_auth_settings,
'value_lookup': True,
},
},
},
'RestrictSendingNTLMTraffic': {
'Policy': 'Network security: Restrict NTLM: Outgoing NTLM'
' traffic to remote servers',
'Settings': self.outgoing_ntlm_settings.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'SYSTEM\\CurrentControlSet\\Control\\Lsa\\MSV1_0',
'Value': 'RestrictSendingNTLMTraffic',
'Type': 'REG_DWORD',
},
'Transform': {
'Get': '_dict_lookup',
'Put': '_dict_lookup',
'GetArgs': {
'lookup': self.outgoing_ntlm_settings,
'value_lookup': False,
},
'PutArgs': {
'lookup': self.outgoing_ntlm_settings,
'value_lookup': True,
},
},
},
'ShutdownWithoutLogon': {
'Policy': 'Shutdown: Allow system to be shut down '
'without having to log on',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\policies\\system',
'Value': 'ShutdownWithoutLogon',
'Type': 'REG_DWORD',
},
'Transform': self.enabled_one_disabled_zero_transform,
},
'ClearPageFileAtShutdown': {
'Policy': 'Shutdown: Clear virtual memory pagefile',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'System\\CurrentControlSet\\Control\\'
'SESSION MANAGER\\MEMORY MANAGEMENT',
'Value': 'ClearPageFileAtShutdown',
'Type': 'REG_DWORD',
},
'Transform': self.enabled_one_disabled_zero_transform,
},
'ObCaseInsensitive': {
'Policy': 'System objects: Require case insensitivity for '
'non-Windows subsystems',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'System\\CurrentControlSet\\Control\\'
'SESSION MANAGER\\Kernel',
'Value': 'ObCaseInsensitive',
'Type': 'REG_DWORD',
},
'Transform': self.enabled_one_disabled_zero_transform,
},
'ProtectionMode': {
'Policy': 'System objects: Strengthen default permissions of '
'internal system objects (e.g. Symbolic Links)',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'System\\CurrentControlSet\\Control\\'
'SESSION MANAGER',
'Value': 'ProtectionMode',
'Type': 'REG_DWORD',
},
'Transform': self.enabled_one_disabled_zero_transform,
},
'OptionalSubsystems': {
'Policy': 'System settings: Optional subsystems',
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'System\\CurrentControlSet\\Control\\'
'SESSION MANAGER\\SubSystems',
'Value': 'optional',
'Type': 'REG_MULTI_SZ'
},
'Transform': {
'Put': '_multi_string_put_transform'
}
},
'AuthenticodeEnabled': {
'Policy': 'System settings: Use Certificate Rules on Windows'
' Executables for Software Restriction Policies',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
'Path': 'SOFTWARE\\Policies\\Microsoft\\Windows\\safer\\codeidentifiers',
'Value': 'AuthenticodeEnabled',
'Type': 'REG_DWORD',
},
'Transform': self.enabled_one_disabled_zero_transform,
},
}
},
'User': {
@ -2667,6 +3270,76 @@ class _policy_info(object):
return v
return 'Invalid Value'
@classmethod
def _dict_lookup_bitwise_add(cls, item, **kwargs):
'''
kwarg value_lookup bool to determine if item_list should be compared to keys
or values
kwarg test_zero is used to determine if 0 should be tested when value_lookup is false
lookup should be a dict with integers for keys
if value_lookup is True, item is expected to be a list
the function will return the sum of the keys whose values are in the item list
if value_lookup is False, item is expected to be an integer
the function will return the values for the keys
which successfully "bitwise and" with item
'''
value_lookup = kwargs.get('value_lookup', False)
test_zero = kwargs.get('test_zero', False)
ret_val = None
if str(item).lower() == 'not defined':
return None
if value_lookup:
if not isinstance(item, list):
return 'Invalid Value'
ret_val = 0
else:
if not isinstance(item, six.integer_types):
return 'Invalid Value'
ret_val = []
if 'lookup' in kwargs:
for k, v in six.iteritems(kwargs['lookup']):
if value_lookup:
if six.text_type(v).lower() in [z.lower() for z in item]:
ret_val = ret_val + k
else:
do_test = True
if not test_zero:
if k == 0:
do_test = False
if do_test and isinstance(k, int) and item & k == k:
ret_val.append(v)
else:
return 'Invalid Value'
return ret_val
@classmethod
def _multi_string_put_transform(cls, item, **kwargs):
'''
transform for a REG_MULTI_SZ to properly handle "Not Defined"
'''
if isinstance(item, list):
return item
elif isinstance(item, six.string_types):
if item.lower() == 'not defined':
return None
else:
return item.split(',')
else:
return 'Invalid Value'
@classmethod
def _string_put_transform(cls, item, **kwargs):
'''
transfrom for a REG_SZ to properly handle "Not Defined"
'''
if isinstance(item, six.string_types):
if item.lower() == 'not defined':
return None
else:
return item
def __virtual__():
'''
@ -5482,10 +6155,15 @@ def set_(computer_policy=None, user_policy=None,
_regedits[regedit]['value'],
_regedits[regedit]['policy']['Registry']['Type'])
else:
_ret = __salt__['reg.delete_value'](
_ret = __salt__['reg.read_value'](
_regedits[regedit]['policy']['Registry']['Hive'],
_regedits[regedit]['policy']['Registry']['Path'],
_regedits[regedit]['policy']['Registry']['Value'])
if _ret['success'] and _ret['vdata'] != '(value not set)':
_ret = __salt__['reg.delete_value'](
_regedits[regedit]['policy']['Registry']['Hive'],
_regedits[regedit]['policy']['Registry']['Path'],
_regedits[regedit]['policy']['Registry']['Value'])
if not _ret:
msg = ('Error while attempting to set policy {0} via the registry.'
' Some changes may not be applied as expected')

View file

@ -128,6 +128,6 @@ def start():
raise SystemExit(1)
try:
tornado.ioloop.IOLoop.instance().start()
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt:
raise SystemExit(0)

View file

@ -202,19 +202,18 @@ import tornado.ioloop
import tornado.web
import tornado.gen
from tornado.concurrent import Future
from zmq.eventloop import ioloop
from salt.ext import six
# pylint: enable=import-error
# instantiate the zmq IOLoop (specialized poller)
ioloop.install()
import salt.utils
salt.utils.zeromq.install_zmq()
# salt imports
import salt.ext.six as six
import salt.netapi
import salt.utils.args
import salt.utils.event
import salt.utils.json
import salt.utils.yaml
import salt.utils.minions
from salt.utils.event import tagify
import salt.client
import salt.runner
@ -383,7 +382,7 @@ class EventListener(object):
for (tag, matcher), futures in six.iteritems(self.tag_map):
try:
is_matched = matcher(mtag, tag)
except Exception as e:
except Exception:
log.error('Failed to run a matcher.', exc_info=True)
is_matched = False
@ -438,6 +437,9 @@ class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223
'runner_async': None, # empty, since we use the same client as `runner`
}
if not hasattr(self, 'ckminions'):
self.ckminions = salt.utils.minions.CkMinions(self.application.opts)
@property
def token(self):
'''
@ -939,7 +941,12 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
'''
Dispatch local client commands
'''
chunk_ret = {}
# Generate jid before triggering a job to subscribe all returns from minions
chunk['jid'] = salt.utils.jid.gen_jid(self.application.opts)
# Subscribe returns from minions before firing a job
minions = self.ckminions.check_minions(chunk['tgt'], chunk.get('tgt_type', 'glob')).get('minions', list())
future_minion_map = self.subscribe_minion_returns(chunk['jid'], minions)
f_call = self._format_call_run_job_async(chunk)
# fire a job off
@ -948,26 +955,29 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
# if the job didn't publish, lets not wait around for nothing
# TODO: set header??
if 'jid' not in pub_data:
for future in future_minion_map:
try:
future.set_result(None)
except Exception:
pass
raise tornado.gen.Return('No minions matched the target. No command was sent, no jid was assigned.')
# seed minions_remaining with the pub_data
minions_remaining = pub_data['minions']
# wait syndic a while to avoid missing published events
if self.application.opts['order_masters']:
yield tornado.gen.sleep(self.application.opts['syndic_wait'])
# To ensure job_not_running and all_return are terminated by each other, communicate using a future
is_finished = Future()
job_not_running_future = self.job_not_running(pub_data['jid'],
chunk['tgt'],
f_call['kwargs']['tgt_type'],
is_finished,
minions_remaining=list(minions_remaining),
)
is_finished)
minion_returns_future = self.sanitize_minion_returns(future_minion_map, pub_data['minions'], is_finished)
all_return_future = self.all_returns(pub_data['jid'],
is_finished,
minions_remaining=list(minions_remaining),
)
yield job_not_running_future
raise tornado.gen.Return((yield all_return_future))
raise tornado.gen.Return((yield minion_returns_future))
def subscribe_minion_returns(self, jid, minions):
# Subscribe each minion event
@ -981,39 +991,30 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
return future_minion_map
@tornado.gen.coroutine
def all_returns(self,
jid,
is_finished,
minions_remaining=None,
):
def sanitize_minion_returns(self, future_minion_map, minions, is_finished):
'''
Return a future which will complete once all returns are completed
(according to minions_remaining), or one of the passed in "is_finished" completes
(according to minions), or one of the passed in "finish_chunk_ret_future" completes
'''
if minions_remaining is None:
minions_remaining = []
if minions is None:
minions = []
# Remove redundant minions
redundant_minion_futures = [future for future in future_minion_map.keys() if future_minion_map[future] not in minions]
for redundant_minion_future in redundant_minion_futures:
try:
redundant_minion_future.set_result(None)
except Exception:
pass
del future_minion_map[redundant_minion_future]
chunk_ret = {}
minion_events = {}
syndic_min_wait = 0
if self.application.opts['order_masters']:
syndic_min_wait = self.application.opts['syndic_wait']
for minion in minions_remaining:
tag = tagify([jid, 'ret', minion], 'job')
minion_event = self.application.event_listener.get_event(self,
tag=tag,
matcher=EventListener.exact_matcher,
timeout=self.application.opts['timeout'] + syndic_min_wait)
minion_events[minion_event] = minion
while True:
f = yield Any(list(minion_events.keys()) + [is_finished])
f = yield Any(list(future_minion_map.keys()) + [is_finished])
try:
# When finished entire routine, cleanup other futures and return result
if f is is_finished:
for event in minion_events:
for event in future_minion_map.keys():
if not event.done():
event.set_result(None)
raise tornado.gen.Return(chunk_ret)
@ -1024,31 +1025,22 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
# clear finished event future
try:
minions_remaining.remove(minion_events[f])
del minion_events[f]
minions.remove(future_minion_map[f])
del future_minion_map[f]
except ValueError:
pass
if len(minions_remaining) == 0:
if not minions:
if not is_finished.done():
is_finished.set_result(True)
raise tornado.gen.Return(chunk_ret)
@tornado.gen.coroutine
def job_not_running(self,
jid,
tgt,
tgt_type,
is_finished,
minions_remaining=None,
):
def job_not_running(self, jid, tgt, tgt_type, is_finished):
'''
Return a future which will complete once jid (passed in) is no longer
running on tgt
'''
if minions_remaining is None:
minions_remaining = []
ping_pub_data = yield self.saltclients['local'](tgt,
'saltutil.find_job',
[jid],
@ -1081,13 +1073,11 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job')
minion_running = False
continue
# Minions can return, we want to see if the job is running...
if event['data'].get('return', {}) == {}:
continue
minion_running = True
id_ = event['data']['id']
if id_ not in minions_remaining:
minions_remaining.append(event['data']['id'])
@tornado.gen.coroutine
def _disbatch_local_async(self, chunk):

View file

@ -59,6 +59,22 @@ optional. The following ssl options are simply for illustration purposes:
alternative.pgjsonb.ssl_cert: '/etc/pki/mysql/certs/localhost.crt'
alternative.pgjsonb.ssl_key: '/etc/pki/mysql/certs/localhost.key'
Should you wish the returner data to be cleaned out every so often, set
``keep_jobs`` to the number of hours for the jobs to live in the tables.
Setting it to ``0`` or leaving it unset will cause the data to stay in the tables.
Should you wish to archive jobs in a different table for later processing,
set ``archive_jobs`` to True. Salt will create 3 archive tables
- ``jids_archive``
- ``salt_returns_archive`
- ``salt_events_archive`
and move the contents of ``jids``, ``salt_returns``, and ``salt_events`` that are
more than ``keep_jobs`` hours old to these tables.
.. versionadded:: Fluorine
Use the following Pg database schema:
.. code-block:: sql
@ -417,3 +433,126 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
Do any work necessary to prepare a JID, including sending a custom id
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
def _purge_jobs(timestamp):
'''
Purge records from the returner tables.
:param job_age_in_seconds: Purge jobs older than this
:return:
'''
with _get_serv() as cursor:
try:
sql = 'delete from jids where jid in (select distinct jid from salt_returns where alter_time < %s)'
cursor.execute(sql, (timestamp,))
cursor.execute('COMMIT')
except psycopg2.DatabaseError as err:
error = err.args
sys.stderr.write(six.text_type(error))
cursor.execute("ROLLBACK")
raise err
try:
sql = 'delete from salt_returns where alter_time < %s'
cursor.execute(sql, (timestamp,))
cursor.execute('COMMIT')
except psycopg2.DatabaseError as err:
error = err.args
sys.stderr.write(six.text_type(error))
cursor.execute("ROLLBACK")
raise err
try:
sql = 'delete from salt_events where alter_time < %s'
cursor.execute(sql, (timestamp,))
cursor.execute('COMMIT')
except psycopg2.DatabaseError as err:
error = err.args
sys.stderr.write(six.text_type(error))
cursor.execute("ROLLBACK")
raise err
return True
def _archive_jobs(timestamp):
'''
Copy rows to a set of backup tables, then purge rows.
:param timestamp: Archive rows older than this timestamp
:return:
'''
source_tables = ['jids',
'salt_returns',
'salt_events']
with _get_serv() as cursor:
target_tables = {}
for table_name in source_tables:
try:
tmp_table_name = table_name + '_archive'
sql = 'create table IF NOT exists {0} (LIKE {1})'.format(tmp_table_name, table_name)
cursor.execute(sql)
cursor.execute('COMMIT')
target_tables[table_name] = tmp_table_name
except psycopg2.DatabaseError as err:
error = err.args
sys.stderr.write(six.text_type(error))
cursor.execute("ROLLBACK")
raise err
try:
sql = 'insert into {0} select * from {1} where jid in (select distinct jid from salt_returns where alter_time < %s)'.format(target_tables['jids'], 'jids')
cursor.execute(sql, (timestamp,))
cursor.execute('COMMIT')
except psycopg2.DatabaseError as err:
error = err.args
sys.stderr.write(six.text_type(error))
cursor.execute("ROLLBACK")
raise err
except Exception as e:
log.error(e)
raise
try:
sql = 'insert into {0} select * from {1} where alter_time < %s'.format(target_tables['salt_returns'], 'salt_returns')
cursor.execute(sql, (timestamp,))
cursor.execute('COMMIT')
except psycopg2.DatabaseError as err:
error = err.args
sys.stderr.write(six.text_type(error))
cursor.execute("ROLLBACK")
raise err
try:
sql = 'insert into {0} select * from {1} where alter_time < %s'.format(target_tables['salt_events'], 'salt_events')
cursor.execute(sql, (timestamp,))
cursor.execute('COMMIT')
except psycopg2.DatabaseError as err:
error = err.args
sys.stderr.write(six.text_type(error))
cursor.execute("ROLLBACK")
raise err
return _purge_jobs(timestamp)
def clean_old_jobs():
'''
Called in the master's event loop every loop_interval. Archives and/or
deletes the events and job details from the database.
:return:
'''
if __opts__.get('keep_jobs', False) and int(__opts__.get('keep_jobs', 0)) > 0:
try:
with _get_serv() as cur:
sql = "select (NOW() - interval '{0}' hour) as stamp;".format(__opts__['keep_jobs'])
cur.execute(sql)
rows = cur.fetchall()
stamp = rows[0][0]
if __opts__.get('archive_jobs', False):
_archive_jobs(stamp)
else:
_purge_jobs(stamp)
except Exception as e:
log.error(e)

View file

@ -12,9 +12,18 @@ config, these are the defaults:
redis.host: 'salt'
redis.port: 6379
.. versionadded:: 2018.3.1
Alternatively a UNIX socket can be specified by `unix_socket_path`:
.. code-block:: yaml
redis.db: '0'
redis.unix_socket_path: /var/run/redis/redis.sock
Cluster Mode Example:
.. code-block::yaml
.. code-block:: yaml
redis.db: '0'
redis.cluster_mode: true
@ -66,7 +75,7 @@ cluster.startup_nodes:
A list of host, port dictionaries pointing to cluster members. At least one is required
but multiple nodes are better
.. code-block::yaml
.. code-block:: yaml
cache.redis.cluster.startup_nodes
- host: redis-member-1
@ -110,6 +119,8 @@ try:
except ImportError:
HAS_REDIS_CLUSTER = False
REDIS_POOL = None
# Define the module's virtual name
__virtualname__ = 'redis'
@ -124,7 +135,7 @@ def __virtual__():
if not HAS_REDIS:
return False, 'Could not import redis returner; ' \
'redis python client is not installed.'
if not HAS_REDIS_CLUSTER and _get_options()['cluster_mode']:
if not HAS_REDIS_CLUSTER and _get_options().get('cluster_mode', False):
return (False, "Please install the redis-py-cluster package.")
return __virtualname__
@ -135,16 +146,18 @@ def _get_options(ret=None):
'''
attrs = {'host': 'host',
'port': 'port',
'unix_socket_path': 'unix_socket_path',
'db': 'db',
'cluster_mode': 'cluster_mode',
'startup_nodes': 'cluster.startup_nodes',
'skip_full_coverage_check': 'cluster.skip_full_coverage_check',
}
}
if salt.utils.platform.is_proxy():
return {
'host': __opts__.get('redis.host', 'salt'),
'port': __opts__.get('redis.port', 6379),
'unix_socket_path': __opts__.get('redis.unix_socket_path', None),
'db': __opts__.get('redis.db', '0'),
'cluster_mode': __opts__.get('redis.cluster_mode', False),
'startup_nodes': __opts__.get('redis.cluster.startup_nodes', {}),
@ -159,31 +172,24 @@ def _get_options(ret=None):
return _options
CONN_POOL = None
def _get_conn_pool(_options):
global CONN_POOL
if CONN_POOL is None:
CONN_POOL = redis.ConnectionPool(host=_options.get('host'),
port=_options.get('port'),
db=_options.get('db'))
return CONN_POOL
def _get_serv(ret=None):
'''
Return a redis server object
'''
_options = _get_options(ret)
if _options.get('cluster_mode'):
return StrictRedisCluster(startup_nodes=_options.get('startup_nodes'),
skip_full_coverage_check=_options.get('skip_full_coverage_check'),
decode_responses=True)
global REDIS_POOL
if REDIS_POOL:
return REDIS_POOL
elif _options.get('cluster_mode'):
REDIS_POOL = StrictRedisCluster(startup_nodes=_options.get('startup_nodes'),
skip_full_coverage_check=_options.get('skip_full_coverage_check'),
decode_responses=True)
else:
pool = _get_conn_pool(_options)
return redis.StrictRedis(connection_pool=pool)
REDIS_POOL = redis.StrictRedis(host=_options.get('host'),
port=_options.get('port'),
unix_socket_path=_options.get('unix_socket_path', None),
db=_options.get('db'))
return REDIS_POOL
def _get_ttl():

View file

@ -3012,7 +3012,7 @@ class BaseHighState(object):
'top_file_merging_strategy set to \'same\', but no '
'default_top configuration option was set'
)
self.opts['environment'] = self.opts['default_top']
self.opts['saltenv'] = self.opts['default_top']
if self.opts['saltenv']:
contents = self.client.cache_file(

View file

@ -649,21 +649,6 @@ def extracted(name,
# Remove pub kwargs as they're irrelevant here.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if 'keep_source' in kwargs and 'keep' in kwargs:
ret.setdefault('warnings', []).append(
'Both \'keep_source\' and \'keep\' were used. Since these both '
'do the same thing, \'keep\' was ignored.'
)
keep_source = bool(kwargs.pop('keep_source'))
kwargs.pop('keep')
elif 'keep_source' in kwargs:
keep_source = bool(kwargs.pop('keep_source'))
elif 'keep' in kwargs:
keep_source = bool(kwargs.pop('keep'))
else:
# Neither was passed, default is True
keep_source = True
if 'keep_source' in kwargs and 'keep' in kwargs:
ret.setdefault('warnings', []).append(
'Both \'keep_source\' and \'keep\' were used. Since these both '

View file

@ -1747,7 +1747,7 @@ def running(name,
# container does not already exist)
try:
temp_container = __salt__['docker.create'](
image_id,
image,
name=name if not exists else None,
skip_translate=skip_translate,
ignore_collisions=ignore_collisions,
@ -2227,7 +2227,7 @@ def run(name,
try:
if 'networks' in kwargs and kwargs['networks'] is not None:
kwargs['networks'] = _parse_networks(kwargs['networks'])
image_id = _resolve_image(ret, image, client_timeout)
_resolve_image(ret, image, client_timeout)
except CommandExecutionError as exc:
ret['result'] = False
if exc.info is not None:
@ -2284,7 +2284,7 @@ def run(name,
try:
ret['changes'] = __salt__['docker.run_container'](
image_id,
image,
name=name,
skip_translate=skip_translate,
ignore_collisions=ignore_collisions,

View file

@ -21,6 +21,34 @@ def __virtual__():
return False
def convert_duration(duration):
'''
Convert the a duration string into XXhYYmZZs format
duration
Duration to convert
Returns: duration_string
String representation of duration in XXhYYmZZs format
'''
# durations must be specified in days, weeks or hours
if duration.endswith('h'):
hours = int(duration.split('h'))
elif duration.endswith('d'):
days = duration.split('d')
hours = int(days[0]) * 24
elif duration.endswith('w'):
weeks = duration.split('w')
hours = int(weeks[0]) * 24 * 7
duration_string = str(hours)+'h0m0s'
return duration_string
def present(name, database, duration="7d",
replication=1, default=False,
**client_args):
@ -60,6 +88,41 @@ def present(name, database, duration="7d",
ret['result'] = False
return ret
else:
current_policy = __salt__['influxdb.get_retention_policy'](database=database, name=name)
update_policy = False
if current_policy['duration'] != convert_duration(duration):
update_policy = True
ret['changes']['duration'] = "Retention changed from {0} to {1}.".format(current_policy['duration'], duration)
if current_policy['replicaN'] != replication:
update_policy = True
ret['changes']['replication'] = "Replication changed from {0} to {1}.".format(current_policy['replicaN'], replication)
if current_policy['default'] != default:
update_policy = True
ret['changes']['default'] = "Default changed from {0} to {1}.".format(current_policy['default'], default)
if update_policy:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ' {0} is present and set to be changed'\
.format(name)
return ret
else:
if __salt__['influxdb.alter_retention_policy'](
database, name,
duration, replication, default, **client_args
):
ret['comment'] = 'retention policy {0} has been changed'\
.format(name)
return ret
else:
ret['comment'] = 'Failed to update retention policy {0}'\
.format(name)
ret['result'] = False
return ret
return ret

View file

@ -17,7 +17,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -32,7 +32,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- comment: "Allow HTTP"
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -48,7 +48,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- connstate: NEW
- source: '127.0.0.1'
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -65,7 +65,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- connstate: NEW
- source: '! 127.0.0.1'
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -81,7 +81,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- connstate: NEW
- source: 'not 127.0.0.1'
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -94,7 +94,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -109,7 +109,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- dports:
- 80
- 443
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -122,7 +122,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -136,7 +136,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -148,7 +148,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -161,7 +161,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -174,7 +174,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -183,6 +183,55 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- chain: INPUT
- policy: ACCEPT
.. note::
Whereas iptables will accept ``-p``, ``--proto[c[o[l]]]`` as synonyms of
``--protocol``, if ``--proto`` appears in an iptables command after the
appearance of ``-m policy``, it is interpreted as the ``--proto`` option of
the policy extension (see the iptables-extensions(8) man page).
Example rules for IPSec policy:
.. code-block:: yaml
accept_esp_in:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- source: 10.20.0.0/24
- destination: 10.10.0.0/24
- in-interface: eth0
- match: policy
- dir: in
- pol: ipsec
- reqid: 1
- proto: esp
accept_esp_forward_in:
iptables.append:
- use:
- iptables: accept_esp_in
- chain: FORWARD
accept_esp_out:
iptables.append:
- table: filter
- chain: OUTPUT
- jump: ACCEPT
- source: 10.10.0.0/24
- destination: 10.20.0.0/24
- out-interface: eth0
- match: policy
- dir: out
- pol: ipsec
- reqid: 1
- proto: esp
accept_esp_forward_out:
iptables.append:
- use:
- iptables: accept_esp_out
- chain: FORWARD
.. note::
Various functions of the ``iptables`` module use the ``--check`` option. If

View file

@ -212,6 +212,13 @@ def managed(name,
Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw
result after the template was rendered.
.. note::
This argument cannot be used directly on the command line. Instead,
it can be passed through the ``pillar`` variable when executing one
of the :ref:`salt.modules.state.sls` or :ref:`salt.modules.state.apply`
functions (see an example below).
replace: False
Load and replace the configuration. Default: ``False`` (will apply load merge).
@ -261,7 +268,7 @@ def managed(name,
$ sudo salt 'juniper.device' state.sls router.config test=True
$ sudo salt -N all-routers state.sls router.config debug=True
$ sudo salt -N all-routers state.sls router.config pillar="{'debug': True}"
``router.config`` depends on the location of the SLS file (see above). Running this command, will be executed all
five steps from above. These examples above are not meant to be used in a production environment, their sole purpose
@ -324,11 +331,11 @@ def managed(name,
# the user can override the flags the equivalent CLI args
# which have higher precedence
test = __opts__.get('test', test)
debug = __opts__.get('debug', debug)
commit = __opts__.get('commit', commit)
replace = __opts__.get('replace', replace) # this might be a bit risky
skip_verify = __opts__.get('skip_verify', skip_verify)
test = __salt__['config.merge']('test', test)
debug = __salt__['config.merge']('debug', debug)
commit = __salt__['config.merge']('commit', commit)
replace = __salt__['config.merge']('replace', replace) # this might be a bit risky
skip_verify = __salt__['config.merge']('skip_verify', skip_verify)
config_update_ret = _update_config(template_name,
template_source=template_source,

View file

@ -761,6 +761,14 @@ def installed(name,
ret['comment'] = out['comment']
return ret
# No packages to install.
if not target_pkgs:
ret['result'] = True
aicomms = '\n'.join(already_installed_comments)
last_line = 'All specified packages are already installed' + (' and up-to-date' if upgrade else '')
ret['comment'] = aicomms + ('\n' if aicomms else '') + last_line
return ret
# Construct the string that will get passed to the install call
pkgs_str = ','.join([state_name for _, state_name in target_pkgs])
@ -811,12 +819,7 @@ def installed(name,
no_cache_dir=no_cache_dir
)
# Check the retcode for success, but don't fail if using pip1 and the package is
# already present. Pip1 returns a retcode of 1 (instead of 0 for pip2) if you run
# "pip install" without any arguments. See issue #21845.
if pip_install_call and \
(pip_install_call.get('retcode', 1) == 0 or pip_install_call.get('stdout', '').startswith(
'You must give at least one requirement to install')):
if pip_install_call and pip_install_call.get('retcode', 1) == 0:
ret['result'] = True
if requirements or editable:
@ -824,6 +827,8 @@ def installed(name,
if requirements:
PIP_REQUIREMENTS_NOCHANGE = [
'Requirement already satisfied',
'Requirement already up-to-date',
'Requirement not upgraded',
'Collecting',
'Cloning',
'Cleaning up...',

View file

@ -523,7 +523,7 @@ def _find_install_targets(name=None,
if any((pkgs, sources)):
if pkgs:
desired = _repack_pkgs(pkgs)
desired = _repack_pkgs(pkgs, normalize=normalize)
elif sources:
desired = __salt__['pkg_resource.pack_sources'](
sources,

View file

@ -12,11 +12,12 @@ Hives
-----
This is the top level of the registry. They all begin with HKEY.
- HKEY_CLASSES_ROOT (HKCR)
- HKEY_CURRENT_USER(HKCU)
- HKEY_LOCAL MACHINE (HKLM)
- HKEY_USER (HKU)
- HKEY_CURRENT_CONFIG
- HKEY_CLASSES_ROOT (HKCR)
- HKEY_CURRENT_USER(HKCU)
- HKEY_LOCAL MACHINE (HKLM)
- HKEY_USER (HKU)
- HKEY_CURRENT_CONFIG
----
Keys
@ -30,30 +31,38 @@ Values or Entries
-----------------
Values or Entries are the name/data pairs beneath the keys and subkeys. All keys
have a default name/data pair. It is usually "(Default)"="(value not set)". The
actual value for the name and the date is Null. The registry editor will display
"(Default)" and "(value not set)".
have a default name/data pair. The name is ``(Default)`` with a displayed value
of ``(value not set)``. The actual value is Null.
-------
Example
-------
The following example is taken from the windows startup portion of the registry:
```
[HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Run]
"RTHDVCPL"="\"C:\\Program Files\\Realtek\\Audio\\HDA\\RtkNGUI64.exe\" -s"
"NvBackend"="\"C:\\Program Files (x86)\\NVIDIA Corporation\\Update Core\\NvBackend.exe\""
"BTMTrayAgent"="rundll32.exe \"C:\\Program Files (x86)\\Intel\\Bluetooth\\btmshellex.dll\",TrayApp"
```
.. code-block:: bash
[HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Run]
"RTHDVCPL"="\"C:\\Program Files\\Realtek\\Audio\\HDA\\RtkNGUI64.exe\" -s"
"NvBackend"="\"C:\\Program Files (x86)\\NVIDIA Corporation\\Update Core\\NvBackend.exe\""
"BTMTrayAgent"="rundll32.exe \"C:\\Program Files (x86)\\Intel\\Bluetooth\\btmshellex.dll\",TrayApp"
In this example these are the values for each:
Hive: `HKEY_LOCAL_MACHINE`
Hive:
``HKEY_LOCAL_MACHINE``
Key and subkeys: `SOFTWARE\Microsoft\Windows\CurrentVersion\Run`
Key and subkeys:
``SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run``
Value:
- There are 3 value names: `RTHDVCPL`, `NvBackend`, and `BTMTrayAgent`
- There are 3 value names:
- `RTHDVCPL`
- `NvBackend`
- `BTMTrayAgent`
- Each value name has a corresponding value
:depends: - salt.utils.win_reg
'''
from __future__ import absolute_import, print_function, unicode_literals
@ -70,19 +79,19 @@ def __virtual__():
'''
if 'reg.read_value' not in __utils__:
return (False, 'reg state module failed to load: '
'missing module function: reg.read_value')
'missing util function: reg.read_value')
if 'reg.set_value' not in __utils__:
return (False, 'reg state module failed to load: '
'missing module function: reg.set_value')
'missing util function: reg.set_value')
if 'reg.delete_value' not in __utils__:
return (False, 'reg state module failed to load: '
'missing module function: reg.delete_value')
'missing util function: reg.delete_value')
if 'reg.delete_key_recursive' not in __utils__:
return (False, 'reg state module failed to load: '
'missing module function: reg.delete_key_recursive')
'missing util function: reg.delete_key_recursive')
return 'reg'
@ -102,76 +111,145 @@ def present(name,
vdata=None,
vtype='REG_SZ',
use_32bit_registry=False):
'''
r'''
Ensure a registry key or value is present.
:param str name: A string value representing the full path of the key to
include the HIVE, Key, and all Subkeys. For example:
Args:
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
name (str):
A string value representing the full path of the key to include the
HIVE, Key, and all Subkeys. For example:
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
:param str vname: The name of the value you'd like to create beneath the
Key. If this parameter is not passed it will assume you want to set the
(Default) value
Valid hive values include:
:param str vdata: The value you'd like to set. If a value name (vname) is
passed, this will be the data for that value name. If not, this will be the
(Default) value for the key.
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
The type for the (Default) value is always REG_SZ and cannot be changed.
This parameter is optional. If not passed, the Key will be created with no
associated item/value pairs.
vname (str):
The name of the value you'd like to create beneath the Key. If this
parameter is not passed it will assume you want to set the
``(Default)`` value
:param str vtype: The value type for the data you wish to store in the
registry. Valid values are:
vdata (str, int, list, bytes):
The value you'd like to set. If a value name (``vname``) is passed,
this will be the data for that value name. If not, this will be the
``(Default)`` value for the key.
- REG_BINARY
- REG_DWORD
- REG_EXPAND_SZ
- REG_MULTI_SZ
- REG_SZ (Default)
The type of data this parameter expects is determined by the value
type specified in ``vtype``. The correspondence is as follows:
:param bool use_32bit_registry: Use the 32bit portion of the registry.
Applies only to 64bit windows. 32bit Windows will ignore this parameter.
Default is False.
- REG_BINARY: Binary data (str in Py2, bytes in Py3)
- REG_DWORD: int
- REG_EXPAND_SZ: str
- REG_MULTI_SZ: list of str
- REG_QWORD: int
- REG_SZ: str
:return: Returns a dictionary showing the results of the registry operation.
:rtype: dict
.. note::
When setting REG_BINARY, string data will be converted to
binary automatically. To pass binary data, use the built-in
yaml tag ``!!binary`` to denote the actual binary
characters. For example, the following lines will both set
the same data in the registry:
The following example will set the ``(Default)`` value for the
``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``:
- ``vdata: Salty Test``
- ``vdata: !!binary U2FsdHkgVGVzdA==\n``
For more information about the ``!!binary`` tag see
`here <http://yaml.org/type/binary.html>`_
.. note::
The type for the ``(Default)`` value is always REG_SZ and cannot
be changed. This parameter is optional. If not passed, the Key
will be created with no associated item/value pairs.
vtype (str):
The value type for the data you wish to store in the registry. Valid
values are:
- REG_BINARY
- REG_DWORD
- REG_EXPAND_SZ
- REG_MULTI_SZ
- REG_QWORD
- REG_SZ (Default)
use_32bit_registry (bool):
Use the 32bit portion of the registry. Applies only to 64bit
windows. 32bit Windows will ignore this parameter. Default is False.
Returns:
dict: A dictionary showing the results of the registry operation.
Example:
.. code-block:: yaml
The following example will set the ``(Default)`` value for the
``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to
``2016.3.1``:
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vdata: 2016.3.1
.. code-block:: yaml
The following example will set the value for the ``version`` entry under the
``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``. The
value will be reflected in ``Wow6432Node``:
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vdata: 2016.3.1
Example:
.. code-block:: yaml
The following example will set the value for the ``version`` entry under
the ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to
``2016.3.1``. The value will be reflected in ``Wow6432Node``:
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vname: version
- vdata: 2016.3.1
.. code-block:: yaml
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\\Salt`` is the key
- ``vname`` is the value name ('version') that will be created under the key
- ``vdata`` is the data that will be assigned to 'version'
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vname: version
- vdata: 2016.3.1
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\\Salt`` is the key
- ``vname`` is the value name ('version') that will be created under the key
- ``vdata`` is the data that will be assigned to 'version'
Example:
Binary data can be set in two ways. The following two examples will set
a binary value of ``Salty Test``
.. code-block:: yaml
no_conversion:
reg.present:
- name: HKLM\SOFTWARE\SaltTesting
- vname: test_reg_binary_state
- vdata: Salty Test
- vtype: REG_BINARY
conversion:
reg.present:
- name: HKLM\SOFTWARE\SaltTesting
- vname: test_reg_binary_state_with_tag
- vdata: !!binary U2FsdHkgVGVzdA==\n
- vtype: REG_BINARY
Example:
To set a ``REG_MULTI_SZ`` value:
.. code-block:: yaml
reg_multi_sz:
reg.present:
- name: HKLM\SOFTWARE\Salt
- vname: reg_multi_sz
- vdata:
- list item 1
- list item 2
'''
ret = {'name': name,
'result': True,
@ -226,39 +304,42 @@ def absent(name, vname=None, use_32bit_registry=False):
'''
Ensure a registry value is removed. To remove a key use key_absent.
:param str name: A string value representing the full path of the key to
include the HIVE, Key, and all Subkeys. For example:
Args:
name (str):
A string value representing the full path of the key to include the
HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
Valid hive values include:
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
:param str vname: The name of the value you'd like to create beneath the
Key. If this parameter is not passed it will assume you want to set the
(Default) value
vname (str):
The name of the value you'd like to create beneath the Key. If this
parameter is not passed it will assume you want to set the
``(Default)`` value
:param bool use_32bit_registry: Use the 32bit portion of the registry.
Applies only to 64bit windows. 32bit Windows will ignore this parameter.
Default is False.
use_32bit_registry (bool):
Use the 32bit portion of the registry. Applies only to 64bit
windows. 32bit Windows will ignore this parameter. Default is False.
:return: Returns a dictionary showing the results of the registry operation.
:rtype: dict
Returns:
dict: A dictionary showing the results of the registry operation.
CLI Example:
.. code-block:: yaml
.. code-block:: yaml
'HKEY_CURRENT_USER\\SOFTWARE\\Salt':
reg.absent
- vname: version
'HKEY_CURRENT_USER\\SOFTWARE\\Salt':
reg.absent
- vname: version
In the above example the value named ``version`` will be removed from
the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was not
passed, the (Default) value would be deleted.
In the above example the value named ``version`` will be removed from
the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was
not passed, the ``(Default)`` value would be deleted.
'''
ret = {'name': name,
'result': True,
@ -304,39 +385,43 @@ def key_absent(name, use_32bit_registry=False):
r'''
.. versionadded:: 2015.5.4
Ensure a registry key is removed. This will remove a key and all value
entries it contains. It will fail if the key contains subkeys.
Ensure a registry key is removed. This will remove the key, subkeys, and all
value entries.
:param str name: A string representing the full path to the key to be
removed to include the hive and the keypath. The hive can be any of the
following:
Args:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
name (str):
A string representing the full path to the key to be removed to
include the hive and the keypath. The hive can be any of the
following:
:param bool use_32bit_registry: Use the 32bit portion of the registry.
Applies only to 64bit windows. 32bit Windows will ignore this parameter.
Default is False.
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
:return: Returns a dictionary showing the results of the registry operation.
:rtype: dict
use_32bit_registry (bool):
Use the 32bit portion of the registry. Applies only to 64bit
windows. 32bit Windows will ignore this parameter. Default is False.
The following example will delete the ``SOFTWARE\Salt`` key and all subkeys
under the ``HKEY_CURRENT_USER`` hive.
Returns:
dict: A dictionary showing the results of the registry operation.
Example:
.. code-block:: yaml
CLI Example:
'HKEY_CURRENT_USER\SOFTWARE\Salt':
reg.key_absent:
- force: True
The following example will delete the ``SOFTWARE\DeleteMe`` key in the
``HKEY_LOCAL_MACHINE` hive including all its subkeys and value pairs.
In the above example the path is interpreted as follows:
.. code-block:: yaml
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\Salt`` is the key
remove_key_demo:
reg.key_absent:
- name: HKEY_CURRENT_USER\SOFTWARE\DeleteMe
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\DeleteMe`` is the key
'''
ret = {'name': name,
'result': True,
@ -352,10 +437,10 @@ def key_absent(name, use_32bit_registry=False):
ret['comment'] = '{0} is already absent'.format(name)
return ret
ret['changes'] = {'reg': {
'Removed': {
'Key': r'{0}\{1}'.format(hive, key)
}}}
ret['changes'] = {
'reg': {
'Removed': {
'Key': r'{0}\{1}'.format(hive, key)}}}
# Check for test option
if __opts__['test']:

View file

@ -116,7 +116,7 @@ def state(name,
sls=None,
top=None,
saltenv=None,
test=False,
test=None,
pillar=None,
pillarenv=None,
expect_minions=True,
@ -169,7 +169,10 @@ def state(name,
containing a single sls file, or a list of sls files
test
Pass ``test=true`` through to the state function
Pass ``test=true`` or ``test=false`` through to the state function. This
can be used to overide a test mode set in the minion's config file. If
left as the default of None and the 'test' mode is supplied on the
command line, that value is passed instead.
pillar
Pass the ``pillar`` kwarg through to the state function
@ -283,8 +286,8 @@ def state(name,
state_ret['result'] = False
return state_ret
if test or __opts__.get('test'):
cmd_kw['kwarg']['test'] = True
if test is not None or __opts__.get('test'):
cmd_kw['kwarg']['test'] = test if test is not None else __opts__.get('test')
if pillar:
cmd_kw['kwarg']['pillar'] = pillar

View file

@ -130,11 +130,11 @@ class IPCServer(object):
else:
self.sock = tornado.netutil.bind_unix_socket(self.socket_path)
tornado.netutil.add_accept_handler(
self.sock,
self.handle_connection,
io_loop=self.io_loop,
)
with salt.utils.async.current_ioloop(self.io_loop):
tornado.netutil.add_accept_handler(
self.sock,
self.handle_connection,
)
self._started = True
@tornado.gen.coroutine
@ -196,10 +196,10 @@ class IPCServer(object):
log.trace('IPCServer: Handling connection '
'to address: %s', address)
try:
stream = IOStream(
connection,
io_loop=self.io_loop,
)
with salt.utils.async.current_ioloop(self.io_loop):
stream = IOStream(
connection,
)
self.io_loop.spawn_callback(self.handle_stream, stream)
except Exception as exc:
log.error('IPC streaming error: %s', exc)
@ -329,10 +329,10 @@ class IPCClient(object):
break
if self.stream is None:
self.stream = IOStream(
socket.socket(sock_type, socket.SOCK_STREAM),
io_loop=self.io_loop,
)
with salt.utils.async.current_ioloop(self.io_loop):
self.stream = IOStream(
socket.socket(sock_type, socket.SOCK_STREAM),
)
try:
log.trace('IPCClient: Connecting to socket: %s', self.socket_path)
@ -510,11 +510,11 @@ class IPCMessagePublisher(object):
else:
self.sock = tornado.netutil.bind_unix_socket(self.socket_path)
tornado.netutil.add_accept_handler(
self.sock,
self.handle_connection,
io_loop=self.io_loop,
)
with salt.utils.async.current_ioloop(self.io_loop):
tornado.netutil.add_accept_handler(
self.sock,
self.handle_connection,
)
self._started = True
@tornado.gen.coroutine
@ -545,17 +545,14 @@ class IPCMessagePublisher(object):
def handle_connection(self, connection, address):
log.trace('IPCServer: Handling connection to address: %s', address)
try:
kwargs = {}
if self.opts['ipc_write_buffer'] > 0:
kwargs['max_write_buffer_size'] = self.opts['ipc_write_buffer']
log.trace('Setting IPC connection write buffer: %s', (self.opts['ipc_write_buffer']))
with salt.utils.async.current_ioloop(self.io_loop):
stream = IOStream(
connection,
io_loop=self.io_loop,
max_write_buffer_size=self.opts['ipc_write_buffer']
)
else:
stream = IOStream(
connection,
io_loop=self.io_loop
**kwargs
)
self.streams.add(stream)

View file

@ -15,7 +15,6 @@ import os
import weakref
import time
import traceback
import errno
# Import Salt Libs
import salt.crypt
@ -33,6 +32,7 @@ import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.ext import six
from salt.ext.six.moves import queue # pylint: disable=import-error
from salt.exceptions import SaltReqTimeoutError, SaltClientError
from salt.transport import iter_transport_opts
@ -571,6 +571,11 @@ class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.tra
raise exc
self._socket.close()
self._socket = None
if hasattr(self.req_server, 'stop'):
try:
self.req_server.stop()
except Exception as exc:
log.exception('TCPReqServerChannel close generated an exception: %s', str(exc))
def __del__(self):
self.close()
@ -757,15 +762,23 @@ if USE_LOAD_BALANCER:
super(LoadBalancerWorker, self).__init__(
message_handler, *args, **kwargs)
self.socket_queue = socket_queue
self._stop = threading.Event()
self.thread = threading.Thread(target=self.socket_queue_thread)
self.thread.start()
t = threading.Thread(target=self.socket_queue_thread)
t.start()
def stop(self):
self._stop.set()
self.thread.join()
def socket_queue_thread(self):
try:
while True:
client_socket, address = self.socket_queue.get(True, None)
try:
client_socket, address = self.socket_queue.get(True, 1)
except queue.Empty:
if self._stop.is_set():
break
continue
# 'self.io_loop' initialized in super class
# 'tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
@ -779,10 +792,9 @@ class TCPClientKeepAlive(tornado.tcpclient.TCPClient):
'''
Override _create_stream() in TCPClient to enable keep alive support.
'''
def __init__(self, opts, resolver=None, io_loop=None):
def __init__(self, opts, resolver=None):
self.opts = opts
super(TCPClientKeepAlive, self).__init__(
resolver=resolver, io_loop=io_loop)
super(TCPClientKeepAlive, self).__init__(resolver=resolver)
def _create_stream(self, max_buffer_size, af, addr, **kwargs): # pylint: disable=unused-argument
'''
@ -798,7 +810,6 @@ class TCPClientKeepAlive(tornado.tcpclient.TCPClient):
_set_tcp_keepalive(sock, self.opts)
stream = tornado.iostream.IOStream(
sock,
io_loop=self.io_loop,
max_buffer_size=max_buffer_size)
return stream.connect(addr)
@ -860,8 +871,8 @@ class SaltMessageClient(object):
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
self._tcp_client = TCPClientKeepAlive(
opts, io_loop=self.io_loop, resolver=resolver)
with salt.utils.async.current_ioloop(self.io_loop):
self._tcp_client = TCPClientKeepAlive(opts, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
@ -950,18 +961,17 @@ class SaltMessageClient(object):
if self._closing:
break
try:
if (self.source_ip or self.source_port) and tornado.version_info >= (4, 5):
### source_ip and source_port are supported only in Tornado >= 4.5
# See http://www.tornadoweb.org/en/stable/releases/v4.5.0.html
# Otherwise will just ignore these args
self._stream = yield self._tcp_client.connect(self.host,
self.port,
ssl_options=self.opts.get('ssl'),
source_ip=self.source_ip,
source_port=self.source_port)
else:
if self.source_ip or self.source_port:
kwargs = {}
if self.source_ip or self.source_port:
if tornado.version_info >= (4, 5):
### source_ip and source_port are supported only in Tornado >= 4.5
# See http://www.tornadoweb.org/en/stable/releases/v4.5.0.html
# Otherwise will just ignore these args
kwargs = {'source_ip': self.source_ip,
'source_port': self.source_port}
else:
log.warning('If you need a certain source IP/port, consider upgrading Tornado >= 4.5')
with salt.utils.async.current_ioloop(self.io_loop):
self._stream = yield self._tcp_client.connect(self.host,
self.port,
ssl_options=self.opts.get('ssl'))
@ -1167,7 +1177,8 @@ class PubServer(tornado.tcpserver.TCPServer, object):
TCP publisher
'''
def __init__(self, opts, io_loop=None):
super(PubServer, self).__init__(io_loop=io_loop, ssl_options=opts.get('ssl'))
super(PubServer, self).__init__(ssl_options=opts.get('ssl'))
self.io_loop = io_loop
self.opts = opts
self._closing = False
self.clients = set()

View file

@ -19,6 +19,7 @@ import hashlib
import itertools
import logging
import random
import re
import shlex
import socket
import ssl
@ -35,8 +36,7 @@ from salt.utils.odict import OrderedDict
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import map, zip # pylint: disable=redefined-builtin
from salt.ext.six.moves import zip # pylint: disable=redefined-builtin
# Integrations
try:
@ -44,6 +44,11 @@ try:
HAS_DNSPYTHON = True
except ImportError:
HAS_DNSPYTHON = False
try:
import tldextract
HAS_TLDEXTRACT = True
except ImportError:
HAS_TLDEXTRACT = False
HAS_DIG = salt.utils.path.which('dig') is not None
HAS_DRILL = salt.utils.path.which('drill') is not None
HAS_HOST = salt.utils.path.which('host') is not None
@ -60,7 +65,7 @@ class RFC(object):
Simple holding class for all RFC/IANA registered lists & standards
'''
# https://tools.ietf.org/html/rfc6844#section-3
COO_TAGS = (
CAA_TAGS = (
'issue',
'issuewild',
'iodef'
@ -88,10 +93,8 @@ class RFC(object):
))
TLSA_SELECT = OrderedDict((
(0, 'pkixta'),
(1, 'pkixee'),
(2, 'daneta'),
(3, 'daneee'),
(0, 'cert'),
(1, 'spki'),
))
TLSA_MATCHING = OrderedDict((
@ -129,12 +132,23 @@ def _to_port(port):
def _tree(domain, tld=False):
'''
Split out a domain in its parents
Leverages tldextract to take the TLDs from publicsuffix.org
or makes a valiant approximation of that
:param domain: dc2.ams2.example.com
:param tld: Include TLD in list
:return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com']
'''
if '.' not in domain:
raise ValueError('Provide a decent domain')
domain = domain.rstrip('.')
assert '.' in domain, 'Provide a decent domain'
if not tld:
if HAS_TLDEXTRACT:
tld = tldextract.extract(domain).suffix
else:
tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group()
log.info('Without tldextract, dns.util resolves the TLD of {0} to {1}'.format(domain, tld))
res = [domain]
while True:
@ -142,12 +156,10 @@ def _tree(domain, tld=False):
if idx < 0:
break
domain = domain[idx + 1:]
if domain == tld:
break
res.append(domain)
# properly validating the tld is impractical
if not tld:
res = res[:-1]
return res
@ -166,6 +178,17 @@ def _weighted_order(recs):
return res
def _cast(rec_data, rec_cast):
if isinstance(rec_cast, dict):
rec_data = type(rec_cast.keys()[0])(rec_data)
res = rec_cast[rec_data]
return res
elif isinstance(rec_cast, (list, tuple)):
return RFC.validate(rec_data, rec_cast)
else:
return rec_cast(rec_data)
def _data2rec(schema, rec_data):
'''
schema = OrderedDict({
@ -180,11 +203,20 @@ def _data2rec(schema, rec_data):
'''
try:
rec_fields = rec_data.split(' ')
assert len(rec_fields) == len(schema)
return dict((
(field_name, rec_cast(rec_field))
for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields)
))
# spaces in digest fields are allowed
assert len(rec_fields) >= len(schema)
if len(rec_fields) > len(schema):
cutoff = len(schema) - 1
rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])]
if len(schema) == 1:
res = _cast(rec_fields[0], next(iter(schema.values())))
else:
res = dict((
(field_name, _cast(rec_field, rec_cast))
for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields)
))
return res
except (AssertionError, AttributeError, TypeError, ValueError) as e:
raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format(
rec_data,
@ -203,9 +235,14 @@ def _data2rec_group(schema, recs_data, group_key):
for rdata in recs_data:
rdata = _data2rec(schema, rdata)
assert rdata and group_key in rdata
idx = rdata.pop(group_key)
if idx not in res:
res[idx] = []
if len(rdata) == 1:
rdata = next(iter(rdata.values()))
res[idx].append(rdata)
return res
except (AssertionError, ValueError) as e:
@ -220,6 +257,14 @@ def _rec2data(*rdata):
return ' '.join(rdata)
def _data_clean(data):
data = data.strip(string.whitespace)
if data.startswith(('"', '\'')) and data.endswith(('"', '\'')):
return data[1:-1]
else:
return data
def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None):
'''
Use dig to lookup addresses
@ -229,7 +274,7 @@ def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None):
:param servers: [] of servers to use
:return: [] of records or False if error
'''
cmd = 'dig +search +fail +noall +answer +noclass +nottl -t {0} '.format(rdtype)
cmd = 'dig +search +fail +noall +answer +noclass +nosplit +nottl -t {0} '.format(rdtype)
if servers:
cmd += ''.join(['@{0} '.format(srv) for srv in servers])
if timeout is not None:
@ -241,7 +286,7 @@ def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None):
if secure:
cmd += '+dnssec +adflag '
cmd = __salt__['cmd.run_all'](cmd + six.text_type(name), python_shell=False, output_loglevel='quiet')
cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet')
if 'ignoring invalid type' in cmd['stderr']:
raise ValueError('Invalid DNS type {}'.format(rdtype))
@ -263,7 +308,7 @@ def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None):
elif rtype == 'RRSIG':
validated = True
continue
res.append(rdata.strip(string.whitespace + '"'))
res.append(_data_clean(rdata))
if res and secure and not validated:
return False
@ -316,7 +361,7 @@ def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None):
elif l_type != rdtype:
raise ValueError('Invalid DNS type {}'.format(rdtype))
res.append(l_rec.strip(string.whitespace + '"'))
res.append(_data_clean(l_rec))
except StopIteration:
pass
@ -344,7 +389,7 @@ def _lookup_gai(name, rdtype, timeout=None):
raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype))
if timeout:
log.warn('Ignoring timeout on gai resolver; fix resolv.conf to do that')
log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that')
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)]
@ -369,7 +414,7 @@ def _lookup_host(name, rdtype, timeout=None, server=None):
if timeout:
cmd += '-W {0} '.format(int(timeout))
cmd = __salt__['cmd.run_all'](cmd + name, python_shell=False, output_loglevel='quiet')
cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet')
if 'invalid type' in cmd['stderr']:
raise ValueError('Invalid DNS type {}'.format(rdtype))
@ -388,7 +433,7 @@ def _lookup_host(name, rdtype, timeout=None, server=None):
if line.startswith(prefix):
line = line[len(prefix) + 1:]
break
res.append(line.strip(string.whitespace + '"'))
res.append(_data_clean(line))
return res
@ -412,7 +457,7 @@ def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None):
resolver.ednsflags += dns.flags.DO
try:
res = [six.text_type(rr.to_text().strip(string.whitespace + '"'))
res = [_data_clean(rr.to_text())
for rr in resolver.query(name, rdtype, raise_on_no_answer=False)]
return res
except dns.rdatatype.UnknownRdatatype:
@ -481,7 +526,7 @@ def _lookup_nslookup(name, rdtype, timeout=None, server=None):
else:
line = line.split(' ')
res.append(line[-1].strip(string.whitespace + '"'))
res.append(_data_clean(line[-1]))
line = next(lookup_res)
except StopIteration:
@ -504,13 +549,14 @@ def lookup(
secure=None
):
'''
Lookup DNS record data
Lookup DNS records and return their data
:param name: name to lookup
:param rdtype: DNS record type
:param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default)
:param servers: (list of) server(s) to try in-order
:param timeout: query timeout or a valiant approximation of that
:param walk: Find records in parents if they don't exist
:param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'.
:param walk_tld: Include the final domain in the walk
:param secure: return only DNSSEC secured responses
:return: [] of record data
@ -574,13 +620,16 @@ def lookup(
name = [name]
else:
idx = 0
if rdtype == 'SRV': # The only rr I know that has 2 name components
if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components
idx = name.find('.') + 1
idx = name.find('.', idx) + 1
domain = name[idx:]
name = name[0:idx]
rname = name[0:idx]
name = _tree(domain, walk_tld)
if walk == 'name':
name = [rname + domain for domain in name]
name = [name + domain for domain in _tree(domain, walk_tld)]
if timeout:
timeout /= len(name)
@ -594,6 +643,8 @@ def lookup(
if res:
return res
return res
def query(
name,
@ -606,14 +657,16 @@ def query(
secure=None
):
'''
Query DNS for information
Query DNS for information.
Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results
:param name: name to lookup
:param rdtype: DNS record type
:param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default)
:param servers: (list of) server(s) to try in-order
:param timeout: query timeout or a valiant approximation of that
:param secure: return only DNSSEC secured response
:param walk: Find records in parents if they don't exist
:param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'.
:param walk_tld: Include the top-level domain in the walk
:return: [] of records
'''
@ -630,32 +683,58 @@ def query(
if rdtype == 'PTR' and not name.endswith('arpa'):
name = ptr_name(name)
qres = lookup(name, rdtype, **qargs)
if rdtype == 'SPF' and not qres:
if rdtype == 'SPF':
# 'SPF' has become a regular 'TXT' again
qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')]
if not qres:
qres = lookup(name, rdtype, **qargs)
else:
qres = lookup(name, rdtype, **qargs)
rec_map = {
'A': a_rec,
'AAAA': aaaa_rec,
'CAA': caa_rec,
'MX': mx_rec,
'SOA': soa_rec,
'SPF': spf_rec,
'SRV': srv_rec,
'A': a_rec,
'AAAA': aaaa_rec,
'CAA': caa_rec,
'MX': mx_rec,
'SOA': soa_rec,
'SPF': spf_rec,
'SRV': srv_rec,
'SSHFP': sshfp_rec,
'TLSA': tlsa_rec,
}
if rdtype not in rec_map:
if not qres or rdtype not in rec_map:
return qres
caster = rec_map[rdtype]
if rdtype in ('MX', 'SRV'):
# Grouped returns
res = caster(qres)
elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'):
res = [rec_map[rdtype](res) for res in qres]
elif rdtype in ('SOA', 'SPF'):
res = rec_map[rdtype](qres[0])
else:
# List of results
res = list(map(caster, qres))
res = rec_map[rdtype](qres)
return res
def host(name, ip4=True, ip6=True, **kwargs):
'''
Return a list of addresses for name
ip6:
Return IPv6 addresses
ip4:
Return IPv4 addresses
the rest is passed on to lookup()
'''
res = {}
if ip6:
ip6 = lookup(name, 'AAAA', **kwargs)
if ip6:
res['ip6'] = ip6
if ip4:
ip4 = lookup(name, 'A', **kwargs)
if ip4:
res['ip4'] = ip4
return res
@ -692,8 +771,8 @@ def caa_rec(rdatas):
'''
rschema = OrderedDict((
('flags', lambda flag: ['critical'] if int(flag) > 0 else []),
('tag', lambda tag: RFC.validate(tag, RFC.COO_TAGS)),
('value', lambda val: six.text_type(val).strip('"'))
('tag', RFC.CAA_TAGS),
('value', lambda val: val.strip('\',"'))
))
res = _data2rec_group(rschema, rdatas, 'tag')
@ -845,7 +924,7 @@ def srv_name(svc, proto='tcp', domain=None):
:return:
'''
proto = RFC.validate(proto, RFC.SRV_PROTO)
if svc.isdigit():
if isinstance(svc, int) or svc.isdigit():
svc = _to_port(svc)
if domain:
@ -887,6 +966,21 @@ def sshfp_data(key_t, hash_t, pub):
return _rec2data(key_t, hash_t, ssh_fp)
def sshfp_rec(rdata):
'''
Validate and parse DNS record data for TLSA record(s)
:param rdata: DNS record data
:return: dict w/fields
'''
rschema = OrderedDict((
('algorithm', RFC.SSHFP_ALGO),
('fp_hash', RFC.SSHFP_HASH),
('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one
))
return _data2rec(rschema, rdata)
def tlsa_data(pub, usage, selector, matching):
'''
Generate a TLSA rec
@ -913,6 +1007,22 @@ def tlsa_data(pub, usage, selector, matching):
return _rec2data(usage, selector, matching, cert_fp)
def tlsa_rec(rdata):
'''
Validate and parse DNS record data for TLSA record(s)
:param rdata: DNS record data
:return: dict w/fields
'''
rschema = OrderedDict((
('usage', RFC.TLSA_USAGE),
('selector', RFC.TLSA_SELECT),
('matching', RFC.TLSA_MATCHING),
('pub', str)
))
return _data2rec(rschema, rdata)
def service(
svc,
proto='tcp',

View file

@ -135,6 +135,12 @@ def vb_get_manager():
'''
global _virtualboxManager
if _virtualboxManager is None and HAS_LIBS:
try:
from importlib import reload
except ImportError:
# If we get here, we are in py2 and reload is a built-in.
pass
# Reloading the API extends sys.paths for subprocesses of multiprocessing, since they seem to share contexts
reload(vboxapi)
_virtualboxManager = vboxapi.VirtualBoxManager(None, None)
@ -149,7 +155,13 @@ def vb_get_box():
@rtype: IVirtualBox
'''
vb_get_manager()
vbox = _virtualboxManager.vbox
try:
# This works in older versions of the SDK, but does not seem to work anymore.
vbox = _virtualboxManager.vbox
except AttributeError:
vbox = _virtualboxManager.getVirtualBox()
return vbox

View file

@ -6,21 +6,23 @@ Manage the Windows registry
Hives
-----
Hives are the main sections of the registry and all begin with the word HKEY.
- HKEY_LOCAL_MACHINE
- HKEY_CURRENT_USER
- HKEY_USER
- HKEY_LOCAL_MACHINE
- HKEY_CURRENT_USER
- HKEY_USER
----
Keys
----
Keys are the folders in the registry. Keys can have many nested subkeys. Keys
can have a value assigned to them under the (Default)
can have a value assigned to them under the (Default) value name
-----------------
Values or Entries
-----------------
Values/Entries are name/data pairs. There can be many values in a key. The
(Default) value corresponds to the Key, the rest are their own value pairs.
(Default) value corresponds to the Key itself, the rest are their own name/value
pairs.
:depends: - PyWin32
'''
@ -91,7 +93,8 @@ def _to_unicode(vdata):
class Registry(object): # pylint: disable=R0903
'''
Delay usage until this module is used
This was put in a class to delay usage until this module is actually used
This class contains all the lookup dicts for working with the registry
'''
def __init__(self):
self.hkeys = {
@ -157,14 +160,26 @@ class Registry(object): # pylint: disable=R0903
def key_exists(hive, key, use_32bit_registry=False):
'''
Check that the key is found in the registry
Check that the key is found in the registry. This refers to keys and not
value/data pairs.
:param str hive: The hive to connect to.
:param str key: The key to check
:param bool use_32bit_registry: Look in the 32bit portion of the registry
Args:
:return: Returns True if found, False if not found
:rtype: bool
hive (str): The hive to connect to
key (str): The key to check
use_32bit_registry (bool): Look in the 32bit portion of the registry
Returns:
bool: True if exists, otherwise False
Usage:
.. code-block:: python
import salt.utils.win_reg
winreg.key_exists(hive='HKLM', key='SOFTWARE\\Microsoft')
'''
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
@ -188,13 +203,19 @@ def broadcast_change():
'''
Refresh the windows environment.
Returns (bool): True if successful, otherwise False
.. note::
This will only effect new processes and windows. Services will not see
the change until the system restarts.
CLI Example:
Returns:
bool: True if successful, otherwise False
.. code-block:: bash
Usage:
salt '*' reg.broadcast_change
.. code-block:: python
import salt.utils.win_reg
winreg.broadcast_change()
'''
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms644952(v=vs.85).aspx
_, res = win32gui.SendMessageTimeout(
@ -207,28 +228,34 @@ def list_keys(hive, key=None, use_32bit_registry=False):
'''
Enumerates the subkeys in a registry key or hive.
:param str hive: The name of the hive. Can be one of the following
Args:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
hive (str):
The name of the hive. Can be one of the following:
:param str key: The key (looks like a path) to the value name. If a key is
not passed, the keys under the hive will be returned.
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
:param bool use_32bit_registry: Accesses the 32bit portion of the registry
on 64 bit installations. On 32bit machines this is ignored.
key (str):
The key (looks like a path) to the value name. If a key is not
passed, the keys under the hive will be returned.
:return: A list of keys/subkeys under the hive or key.
:rtype: list
use_32bit_registry (bool):
Accesses the 32bit portion of the registry on 64 bit installations.
On 32bit machines this is ignored.
CLI Example:
Returns:
list: A list of keys/subkeys under the hive or key.
.. code-block:: bash
Usage:
salt '*' reg.list_keys HKLM 'SOFTWARE'
.. code-block:: python
import salt.utils.win_reg
winreg.list_keys(hive='HKLM', key='SOFTWARE\\Microsoft')
'''
local_hive = _to_unicode(hive)
@ -265,30 +292,37 @@ def list_values(hive, key=None, use_32bit_registry=False, include_default=True):
'''
Enumerates the values in a registry key or hive.
:param str hive: The name of the hive. Can be one of the following
Args:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
hive (str):
The name of the hive. Can be one of the following:
:param str key: The key (looks like a path) to the value name. If a key is
not passed, the values under the hive will be returned.
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
:param bool use_32bit_registry: Accesses the 32bit portion of the registry
on 64 bit installations. On 32bit machines this is ignored.
key (str):
The key (looks like a path) to the value name. If a key is not
passed, the values under the hive will be returned.
:param bool include_default: Toggle whether to include the '(Default)' value.
use_32bit_registry (bool):
Accesses the 32bit portion of the registry on 64 bit installations.
On 32bit machines this is ignored.
:return: A list of values under the hive or key.
:rtype: list
include_default (bool):
Toggle whether to include the '(Default)' value.
CLI Example:
Returns:
list: A list of values under the hive or key.
.. code-block:: bash
Usage:
salt '*' reg.list_values HKLM 'SYSTEM\\CurrentControlSet\\Services\\Tcpip'
.. code-block:: python
import salt.utils.win_reg
winreg.list_values(hive='HKLM', key='SYSTEM\\CurrentControlSet\\Services\\Tcpip')
'''
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
@ -335,40 +369,60 @@ def list_values(hive, key=None, use_32bit_registry=False, include_default=True):
def read_value(hive, key, vname=None, use_32bit_registry=False):
r'''
Reads a registry value entry or the default value for a key.
Reads a registry value entry or the default value for a key. To read the
default value, don't pass ``vname``
:param str hive: The name of the hive. Can be one of the following
Args:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
hive (str): The name of the hive. Can be one of the following:
:param str key: The key (looks like a path) to the value name.
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
:param str vname: The value name. These are the individual name/data pairs
under the key. If not passed, the key (Default) value will be returned
key (str):
The key (looks like a path) to the value name.
:param bool use_32bit_registry: Accesses the 32bit portion of the registry
on 64bit installations. On 32bit machines this is ignored.
vname (str):
The value name. These are the individual name/data pairs under the
key. If not passed, the key (Default) value will be returned.
:return: A dictionary containing the passed settings as well as the
value_data if successful. If unsuccessful, sets success to False.
use_32bit_registry (bool):
Accesses the 32bit portion of the registry on 64bit installations.
On 32bit machines this is ignored.
:rtype: dict
Returns:
dict: A dictionary containing the passed settings as well as the
value_data if successful. If unsuccessful, sets success to False.
If vname is not passed:
bool: Returns False if the key is not found
- Returns the first unnamed value (Default) as a string.
- Returns none if first unnamed value is empty.
- Returns False if key not found.
If vname is not passed:
CLI Example:
- Returns the first unnamed value (Default) as a string.
- Returns none if first unnamed value is empty.
.. code-block:: bash
Usage:
salt '*' reg.read_value HKEY_LOCAL_MACHINE 'SOFTWARE\Salt' 'version'
The following will get the value of the ``version`` value name in the
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` key
.. code-block:: python
import salt.utils.win_reg
winreg.read_value(hive='HKLM', key='SOFTWARE\\Salt', vname='version')
Usage:
The following will get the default value of the
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` key
.. code-block:: python
import salt.utils.win_reg
winreg.read_value(hive='HKLM', key='SOFTWARE\\Salt')
'''
# If no name is passed, the default value of the key will be returned
# The value name is Default
@ -438,98 +492,125 @@ def set_value(hive,
use_32bit_registry=False,
volatile=False):
'''
Sets a registry value entry or the default value for a key.
Sets a value in the registry. If ``vname`` is passed, it will be the value
for that value name, otherwise it will be the default value for the
specified key
:param str hive: The name of the hive. Can be one of the following
Args:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
hive (str):
The name of the hive. Can be one of the following
:param str key: The key (looks like a path) to the value name.
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
:param str vname: The value name. These are the individual name/data pairs
under the key. If not passed, the key (Default) value will be set.
key (str):
The key (looks like a path) to the value name.
:param object vdata: The value data to be set.
What the type of this parameter
should be is determined by the value of the vtype
parameter. The correspondence
is as follows:
vname (str):
The value name. These are the individual name/data pairs under the
key. If not passed, the key (Default) value will be set.
.. glossary::
vdata (str, int, list, bytes):
The value you'd like to set. If a value name (vname) is passed, this
will be the data for that value name. If not, this will be the
(Default) value for the key.
REG_BINARY
binary data (i.e. str in python version < 3 and bytes in version >=3)
REG_DWORD
int
REG_EXPAND_SZ
str
REG_MULTI_SZ
list of objects of type str
REG_SZ
str
The type of data this parameter expects is determined by the value
type specified in ``vtype``. The correspondence is as follows:
:param str vtype: The value type.
The possible values of the vtype parameter are indicated
above in the description of the vdata parameter.
- REG_BINARY: Binary data (str in Py2, bytes in Py3)
- REG_DWORD: int
- REG_EXPAND_SZ: str
- REG_MULTI_SZ: list of str
- REG_QWORD: int
- REG_SZ: str
:param bool use_32bit_registry: Sets the 32bit portion of the registry on
64bit installations. On 32bit machines this is ignored.
.. note::
When setting REG_BINARY, string data will be converted to
binary. You can pass base64 encoded using the ``binascii``
built-in module. Use ``binascii.b2a_base64('your data')``
:param bool volatile: When this parameter has a value of True, the registry key will be
made volatile (i.e. it will not persist beyond a system reset or shutdown).
This parameter only has an effect when a key is being created and at no
other time.
.. note::
The type for the (Default) value is always REG_SZ and cannot be
changed.
:return: Returns True if successful, False if not
:rtype: bool
.. note::
This parameter is optional. If not passed, the Key will be
created with no associated item/value pairs.
CLI Example:
vtype (str):
The value type. The possible values of the vtype parameter are
indicated above in the description of the vdata parameter.
.. code-block:: bash
use_32bit_registry (bool):
Sets the 32bit portion of the registry on 64bit installations. On
32bit machines this is ignored.
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2'
volatile (bool):
When this parameter has a value of True, the registry key will be
made volatile (i.e. it will not persist beyond a system reset or
shutdown). This parameter only has an effect when a key is being
created and at no other time.
This function is strict about the type of vdata. For instance the
the next example will fail because vtype has a value of REG_SZ and vdata
has a type of int (as opposed to str as expected).
Returns:
bool: True if successful, otherwise False
CLI Example:
Usage:
.. code-block:: bash
This will set the version value to 2015.5.2 in the SOFTWARE\\Salt key in
the HKEY_LOCAL_MACHINE hive
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2' \\
vtype=REG_SZ vdata=0
.. code-block:: python
However, this next example where vdata is properly quoted should succeed.
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='version', vdata='2015.5.2')
CLI Example:
Usage:
.. code-block:: bash
This function is strict about the type of vdata. For instance this
example will fail because vtype has a value of REG_SZ and vdata has a
type of int (as opposed to str as expected).
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2' \\
vtype=REG_SZ vdata="'0'"
.. code-block:: python
An example of using vtype REG_BINARY is as follows:
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='str_data', vdata=1.2)
CLI Example:
Usage:
.. code-block:: bash
In this next example vdata is properly quoted and should succeed.
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2' \\
vtype=REG_BINARY vdata='!!binary d2hhdCdzIHRoZSBwb2ludA=='
.. code-block:: python
An example of using vtype REG_LIST is as follows:
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='str_data', vdata='1.2')
CLI Example:
Usage:
.. code-block:: bash
This is an example of using vtype REG_BINARY. Both ``set_value``
commands will set the same value ``Salty Test``
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2' \\
vtype=REG_LIST vdata='[a,b,c]'
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='bin_data', vdata='Salty Test', vtype='REG_BINARY')
import binascii
bin_data = binascii.b2a_base64('Salty Test')
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='bin_data_encoded', vdata=bin_data, vtype='REG_BINARY')
Usage:
An example using vtype REG_MULTI_SZ is as follows:
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='list_data', vdata=['Salt', 'is', 'great'], vtype='REG_MULTI_SZ')
'''
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
@ -571,20 +652,29 @@ def cast_vdata(vdata=None, vtype='REG_SZ'):
Args:
vdata (str, list, bin): The data to cast
vdata (str, int, list, bytes): The data to cast
vtype (str):
The type of data to be written to the registry. Must be one of the
following:
- REG_BINARY
- REG_DWORD
- REG_EXPAND_SZ
- REG_MULTI_SZ
- REG_QWORD
- REG_SZ
Returns:
The vdata cast to the appropriate type. Will be unicode string, binary,
list of unicode strings, or int
Usage:
.. code-block:: python
import salt.utils.win_reg
winreg.cast_vdata(vdata='This is the string', vtype='REG_SZ')
'''
# Check data type and cast to expected type
# int will automatically become long on 64bit numbers
@ -614,33 +704,39 @@ def delete_key_recursive(hive, key, use_32bit_registry=False):
'''
.. versionadded:: 2015.5.4
Delete a registry key to include all subkeys.
Delete a registry key to include all subkeys and value/data pairs.
:param hive: The name of the hive. Can be one of the following
Args:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
hive (str):
The name of the hive. Can be one of the following
:param key: The key to remove (looks like a path)
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
:param bool use_32bit_registry: Deletes the 32bit portion of the registry on
64bit installations. On 32bit machines this is ignored.
key (str):
The key to remove (looks like a path)
:return: A dictionary listing the keys that deleted successfully as well as
those that failed to delete.
:rtype: dict
use_32bit_registry (bool):
Deletes the 32bit portion of the registry on 64bit
installations. On 32bit machines this is ignored.
The following example will remove ``salt`` and all its subkeys from the
``SOFTWARE`` key in ``HKEY_LOCAL_MACHINE``:
Returns:
dict: A dictionary listing the keys that deleted successfully as well as
those that failed to delete.
CLI Example:
Usage:
.. code-block:: bash
The following example will remove ``salt`` and all its subkeys from the
``SOFTWARE`` key in ``HKEY_LOCAL_MACHINE``:
salt '*' reg.delete_key_recursive HKLM SOFTWARE\\salt
.. code-block:: python
import salt.utils.win_reg
winreg.delete_key_recursive(hive='HKLM', key='SOFTWARE\\DeleteMe')
'''
local_hive = _to_unicode(hive)
@ -718,31 +814,37 @@ def delete_value(hive, key, vname=None, use_32bit_registry=False):
'''
Delete a registry value entry or the default value for a key.
:param str hive: The name of the hive. Can be one of the following
Args:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
hive (str):
The name of the hive. Can be one of the following
:param str key: The key (looks like a path) to the value name.
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
:param str vname: The value name. These are the individual name/data pairs
under the key. If not passed, the key (Default) value will be deleted.
key (str):
The key (looks like a path) to the value name.
:param bool use_32bit_registry: Deletes the 32bit portion of the registry on
64bit installations. On 32bit machines this is ignored.
vname (str):
The value name. These are the individual name/data pairs under the
key. If not passed, the key (Default) value will be deleted.
:return: Returns True if successful, None if the value didn't exist, and
False if unsuccessful
:rtype: bool
use_32bit_registry (bool):
Deletes the 32bit portion of the registry on 64bit installations. On
32bit machines this is ignored.
CLI Example:
Return:
bool: True if successful, otherwise False
.. code-block:: bash
Usage:
salt '*' reg.delete_value HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version'
.. code-block:: python
import salt.utils.win_reg
winreg.delete_value(hive='HKLM', key='SOFTWARE\\SaltTest', vname='version')
'''
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)

View file

@ -55,13 +55,17 @@ import salt.log.setup
from salt.utils.odict import OrderedDict
# Define the pytest plugins we rely on
pytest_plugins = ['pytest_catchlog', 'tempdir', 'helpers_namespace'] # pylint: disable=invalid-name
pytest_plugins = ['tempdir', 'helpers_namespace'] # pylint: disable=invalid-name
# Define where not to collect tests from
collect_ignore = ['setup.py']
log = logging.getLogger('salt.testsuite')
# Reset logging root handlers
for handler in logging.root.handlers:
logging.root.removeHandler(handler)
def pytest_tempdir_basename():
'''
@ -197,25 +201,6 @@ def pytest_configure(config):
called after command line options have been parsed
and all plugins and initial conftest files been loaded.
'''
# Configure the console logger based on the catch_log settings.
# Most importantly, shutdown Salt's null, store and temporary logging queue handlers
catch_log = config.pluginmanager.getplugin('_catch_log')
cli_logging_handler = catch_log.log_cli_handler
# Add the pytest_catchlog CLI log handler to the logging root
logging.root.addHandler(cli_logging_handler)
cli_level = cli_logging_handler.level
cli_level = config._catchlog_log_cli_level
cli_format = cli_logging_handler.formatter._fmt
cli_date_format = cli_logging_handler.formatter.datefmt
# Setup the console logger which shuts down the null and the temporary queue handlers
salt.log.setup_console_logger(
log_level=salt.log.setup.LOG_VALUES_TO_LEVELS.get(cli_level, 'error'),
log_format=cli_format,
date_format=cli_date_format
)
# Disable the store logging queue handler
salt.log.setup.setup_extended_logging({'extension_modules': ''})
config.addinivalue_line('norecursedirs', os.path.join(CODE_DIR, 'templates'))
config.addinivalue_line(
'markers',

View file

@ -21,6 +21,7 @@ import logging
# Import salt libs
import salt.utils.event
import salt.utils.async
# Import 3rd-party libs
from tornado import gen
@ -69,11 +70,11 @@ class PyTestEngine(object):
self.sock.bind(('localhost', port))
# become a server socket
self.sock.listen(5)
netutil.add_accept_handler(
self.sock,
self.handle_connection,
io_loop=self.io_loop,
)
with salt.utils.async.current_ioloop(self.io_loop):
netutil.add_accept_handler(
self.sock,
self.handle_connection,
)
def handle_connection(self, connection, address):
log.warning('Accepted connection from %s. Role: %s', address, self.opts['__role'])

View file

@ -0,0 +1,25 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils.platform
@skipIf(not salt.utils.platform.is_windows(), 'windows tests only')
class AutoRunsModuleTest(ModuleCase):
'''
Test the autoruns module
'''
def test_win_autoruns_list(self):
'''
test win_autoruns.list module
'''
ret = self.run_function('autoruns.list')
self.assertIn('HKLM', str(ret))
self.assertTrue(isinstance(ret, dict))

View file

@ -0,0 +1,110 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.helpers import destructiveTest
# Import Salt Libs
import salt.utils.platform
@skipIf(not salt.utils.platform.is_windows(), 'Tests for only Windows')
class FirewallTest(ModuleCase):
'''
Validate windows firewall module
'''
def _pre_firewall_status(self, pre_run):
post_run = self.run_function('firewall.get_config')
network = ['Domain', 'Public', 'Private']
# compare the status of the firewall before and after test
# and re-enable or disable depending on status before test run
for net in network:
if post_run[net] != pre_run[net]:
if pre_run[net]:
self.assertTrue(self.run_function('firewall.enable', profile=net))
else:
self.assertTrue(self.run_function('firewall.disable', profile=net))
@destructiveTest
def test_firewall_get_config(self):
'''
test firewall.get_config
'''
pre_run = self.run_function('firewall.get_config')
# ensure all networks are enabled then test status
self.assertTrue(self.run_function('firewall.enable', profile='allprofiles'))
ret = self.run_function('firewall.get_config')
network = ['Domain', 'Public', 'Private']
for net in network:
self.assertTrue(ret[net])
self._pre_firewall_status(pre_run)
@destructiveTest
def test_firewall_disable(self):
'''
test firewall.disable
'''
pre_run = self.run_function('firewall.get_config')
network = 'Private'
ret = self.run_function('firewall.get_config')[network]
if not ret:
self.assertTrue(self.run_function('firewall.enable', profile=network))
self.assertTrue(self.run_function('firewall.disable', profile=network))
ret = self.run_function('firewall.get_config')[network]
self.assertFalse(ret)
self._pre_firewall_status(pre_run)
@destructiveTest
def test_firewall_enable(self):
'''
test firewall.enable
'''
pre_run = self.run_function('firewall.get_config')
network = 'Private'
ret = self.run_function('firewall.get_config')[network]
if ret:
self.assertTrue(self.run_function('firewall.disable', profile=network))
self.assertTrue(self.run_function('firewall.enable', profile=network))
ret = self.run_function('firewall.get_config')[network]
self.assertTrue(ret)
self._pre_firewall_status(pre_run)
def test_firewall_get_rule(self):
'''
test firewall.get_rule
'''
rule = 'Remote Event Log Management (NP-In)'
ret = self.run_function('firewall.get_rule', [rule])
checks = ['Private', 'LocalPort', 'RemotePort']
for check in checks:
self.assertIn(check, ret[rule])
@destructiveTest
def test_firewall_add_delete_rule(self):
'''
test firewall.add_rule and delete_rule
'''
rule = 'test rule'
port = '8080'
# test adding firewall rule
add_rule = self.run_function('firewall.add_rule', [rule, port])
ret = self.run_function('firewall.get_rule', [rule])
self.assertIn(rule, ret[rule])
self.assertIn(port, ret[rule])
# test deleting firewall rule
self.assertTrue(self.run_function('firewall.delete_rule', [rule, port]))
ret = self.run_function('firewall.get_rule', [rule])
self.assertNotIn(rule, ret)
self.assertNotIn(port, ret)
self.assertIn('No rules match the specified criteria.', ret)

View file

@ -2,7 +2,6 @@
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import grp
import random
import string
@ -18,6 +17,9 @@ import salt.utils.files
import salt.utils.platform
import salt.utils.stringutils
if not salt.utils.platform.is_windows():
import grp
@skip_if_not_root
@destructiveTest
@ -126,6 +128,7 @@ class GroupModuleTest(ModuleCase):
self.assertFalse(self.run_function('group.add', [self._group], gid=self._gid))
@destructiveTest
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows')
def test_add_system_group(self):
'''
Test the add group function with system=True
@ -144,6 +147,7 @@ class GroupModuleTest(ModuleCase):
[self._group]))
@destructiveTest
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows')
def test_add_system_group_gid(self):
'''
Test the add group function with system=True and a specific gid

View file

@ -0,0 +1,60 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
# Import Salt Libs
import salt.utils.path
import salt.utils.platform
URL = 'repo.saltstack.com'
class NetworkTest(ModuleCase):
'''
Validate network module
'''
def test_network_ping(self):
'''
network.ping
'''
ret = self.run_function('network.ping', [URL])
exp_out = ['ping', URL, 'ttl', 'time']
for out in exp_out:
self.assertIn(out, ret.lower())
@skipIf(salt.utils.platform.is_darwin(), 'not supported on macosx')
def test_network_netstat(self):
'''
network.netstat
'''
ret = self.run_function('network.netstat')
exp_out = ['proto', 'local-address']
for val in ret:
for out in exp_out:
self.assertIn(out, val)
def test_network_traceroute(self):
'''
network.traceroute
'''
if not salt.utils.path.which('traceroute') and not salt.utils.platform.is_windows():
self.skipTest('traceroute not installed')
ret = self.run_function('network.traceroute', [URL])
exp_out = ['hostname', 'ip']
for out in exp_out:
self.assertIn(out, exp_out)
@skipIf(not salt.utils.platform.is_windows(), 'windows only test')
def test_network_nslookup(self):
'''
network.nslookup
'''
ret = self.run_function('network.nslookup', [URL])
exp_out = ['Server', 'Address']
for out in exp_out:
self.assertIn(out, exp_out)

View file

@ -0,0 +1,30 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.helpers import destructiveTest
# Import Salt Libs
import salt.utils.platform
@skipIf(not salt.utils.platform.is_windows(), 'Tests for only Windows')
class NTPTest(ModuleCase):
'''
Validate windows ntp module
'''
@destructiveTest
def test_ntp_set_servers(self):
'''
test ntp get and set servers
'''
ntp_srv = 'pool.ntp.org'
set_srv = self.run_function('ntp.set_servers', [ntp_srv])
self.assertTrue(set_srv)
get_srv = self.run_function('ntp.get_servers')
self.assertEqual(ntp_srv, get_srv[0])

View file

@ -929,13 +929,13 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
'pip_|-another_non_changing_state_|-mock_|-installed': {
'__run_num__': 3,
'changes': False,
'comment': 'Python package mock was already installed\nAll packages were successfully installed',
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
},
'pip_|-non_changing_state_|-mock_|-installed': {
'__run_num__': 2,
'changes': False,
'comment': 'Python package mock was already installed\nAll packages were successfully installed',
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
}
}

View file

@ -359,3 +359,65 @@ class SystemModuleTest(ModuleCase):
if self.run_function('grains.get', ['os_family']) == 'NILinuxRT':
self.assertTrue(self.run_function('system._has_settable_hwclock'))
self.assertTrue(self._hwclock_has_compare())
@skipIf(not salt.utils.platform.is_windows(), 'These tests can only be run on windows')
class WinSystemModuleTest(ModuleCase):
'''
Validate the date/time functions in the win_system module
'''
def test_get_computer_name(self):
'''
Test getting the computer name
'''
ret = self.run_function('system.get_computer_name')
self.assertTrue(isinstance(ret, str))
import socket
name = socket.gethostname()
self.assertEqual(name, ret)
@destructiveTest
def test_set_computer_desc(self):
'''
Test setting the computer description
'''
desc = 'test description'
set_desc = self.run_function('system.set_computer_desc', [desc])
self.assertTrue(set_desc)
get_desc = self.run_function('system.get_computer_desc')
self.assertEqual(set_desc['Computer Description'], get_desc)
def test_get_system_time(self):
'''
Test getting the system time
'''
ret = self.run_function('system.get_system_time')
now = datetime.datetime.now()
self.assertEqual(now.strftime("%I:%M"), ret.rsplit(':', 1)[0])
@destructiveTest
def test_set_system_time(self):
'''
Test setting the system time
'''
test_time = '10:55'
set_time = self.run_function('system.set_system_time', [test_time + ' AM'])
get_time = self.run_function('system.get_system_time').rsplit(':', 1)[0]
self.assertEqual(get_time, test_time)
def test_get_system_date(self):
'''
Test getting system date
'''
ret = self.run_function('system.get_system_date')
date = datetime.datetime.now().date().strftime("%m/%d/%Y")
self.assertEqual(date, ret)
@destructiveTest
def test_set_system_date(self):
'''
Test setting system date
'''
self.assertTrue(self.run_function('system.set_system_date', ['3/25/2018']))

View file

@ -261,6 +261,10 @@ class TestSaltAPIHandler(_SaltnadoIntegrationTestCase):
'tgt': '*',
'fun': 'test.ping',
}
self.application.opts['order_masters'] = ['']
self.application.opts['syndic_wait'] = 5
response = self.fetch('/',
method='POST',
body=salt.utils.json.dumps(low),
@ -270,9 +274,6 @@ class TestSaltAPIHandler(_SaltnadoIntegrationTestCase):
request_timeout=30,
)
response_obj = salt.utils.json.loads(response.body)
self.application.opts['order_masters'] = []
self.application.opts['syndic_wait'] = 5
self.assertEqual(response_obj['return'], [{'localhost': True, 'minion': True, 'sub_minion': True}])
# runner tests
@ -290,7 +291,7 @@ class TestSaltAPIHandler(_SaltnadoIntegrationTestCase):
)
response_obj = salt.utils.json.loads(response.body)
self.assertEqual(len(response_obj['return']), 1)
self.assertEqual(set(response_obj['return'][0]), set(['localhost', 'minion', 'sub_minion']))
self.assertEqual(sorted(response_obj['return'][0]), sorted(['localhost', 'minion', 'sub_minion']))
# runner_async tests
def test_simple_local_runner_async_post(self):

View file

@ -16,7 +16,7 @@ from tests.support.unit import skipIf
from tests.support.case import ModuleCase
from tests.support.docker import with_network, random_name
from tests.support.paths import FILES, TMP
from tests.support.helpers import destructiveTest
from tests.support.helpers import destructiveTest, with_tempdir
from tests.support.mixins import SaltReturnAssertsMixin
# Import Salt Libs
@ -33,24 +33,6 @@ log = logging.getLogger(__name__)
IPV6_ENABLED = bool(salt.utils.network.ip_addrs6(include_loopback=True))
def with_temp_dir(func):
'''
Generate a temp directory for a test
'''
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
tempdir = tempfile.mkdtemp(dir=TMP)
try:
return func(self, tempdir, *args, **kwargs)
finally:
try:
salt.utils.files.rm_rf(tempdir)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
return wrapper
def container_name(func):
'''
Generate a randomized name for a container and clean it up afterward
@ -126,7 +108,7 @@ class DockerContainerTestCase(ModuleCase, SaltReturnAssertsMixin):
log.debug('ret = %s', ret)
return ret
@with_temp_dir
@with_tempdir()
@container_name
def test_running_with_no_predefined_volume(self, name, bind_dir_host):
'''
@ -568,6 +550,21 @@ class DockerContainerTestCase(ModuleCase, SaltReturnAssertsMixin):
'Forcibly removed container \'{0}\''.format(name)
)
@container_name
def test_running_image_name(self, name):
'''
Ensure that we create the container using the image name instead of ID
'''
ret = self.run_state(
'docker_container.running',
name=name,
image=self.image,
shutdown_timeout=1,
)
self.assertSaltTrueReturn(ret)
ret = self.run_function('docker.inspect_container', [name])
self.assertEqual(ret['Config']['Image'], self.image)
@container_name
def test_env_with_running_container(self, name):
'''

View file

@ -592,7 +592,7 @@ class PipStateTest(ModuleCase, SaltReturnAssertsMixin):
self.assertEqual(
ret[key]['comment'],
('Python package carbon < 1.3 was already installed\n'
'All packages were successfully installed'))
'All specified packages are already installed'))
break
else:
raise Exception('Expected state did not run')

View file

@ -121,7 +121,7 @@ class ShellTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
data = '\n'.join(data)
self.assertIn('minion', data)
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
arg_str = '-c {0} -t {1} {2}'.format(self.get_config_dir(), timeout, arg_str)
return self.run_script('salt', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=timeout)
def run_ssh(self, arg_str, with_retcode=False, timeout=25,

View file

@ -24,6 +24,7 @@ import shutil
import signal
import socket
import string
import subprocess
import sys
import tempfile
import threading
@ -60,6 +61,31 @@ import salt.utils.files
log = logging.getLogger(__name__)
HAS_SYMLINKS = None
def no_symlinks():
'''
Check if git is installed and has symlinks enabled in the configuration.
'''
global HAS_SYMLINKS
if HAS_SYMLINKS is not None:
return not HAS_SYMLINKS
output = ''
try:
output = subprocess.check_output('git config --get core.symlinks', shell=True)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
except subprocess.CalledProcessError:
# git returned non-zero status
pass
HAS_SYMLINKS = False
if output.strip() == 'true':
HAS_SYMLINKS = True
return not HAS_SYMLINKS
def destructiveTest(caller):
'''
Mark a test case as a destructive test for example adding or removing users

View file

@ -39,6 +39,7 @@ import salt.utils.stringutils
import salt.utils.yaml
import salt.version
import salt.exceptions
import salt.utils.process
from salt.utils.verify import verify_env
from salt.utils.immutabletypes import freeze
from salt._compat import ElementTree as etree
@ -638,6 +639,29 @@ class SaltReturnAssertsMixin(object):
self.assertNotEqual(saltret, comparison)
def _fetch_events(q):
'''
Collect events and store them
'''
def _clean_queue():
print('Cleaning queue!')
while not q.empty():
queue_item = q.get()
queue_item.task_done()
atexit.register(_clean_queue)
a_config = AdaptedConfigurationTestCaseMixin()
event = salt.utils.event.get_event('minion', sock_dir=a_config.get_config('minion')['sock_dir'], opts=a_config.get_config('minion'))
while True:
try:
events = event.get_event(full=False)
except Exception:
# This is broad but we'll see all kinds of issues right now
# if we drop the proc out from under the socket while we're reading
pass
q.put(events)
class SaltMinionEventAssertsMixin(object):
'''
Asserts to verify that a given event was seen
@ -646,36 +670,15 @@ class SaltMinionEventAssertsMixin(object):
def __new__(cls, *args, **kwargs):
# We have to cross-call to re-gen a config
cls.q = multiprocessing.Queue()
cls.fetch_proc = multiprocessing.Process(target=cls._fetch, args=(cls.q,))
cls.fetch_proc = salt.utils.process.SignalHandlingMultiprocessingProcess(
target=_fetch_events, args=(cls.q,)
)
cls.fetch_proc.start()
return object.__new__(cls)
def __exit__(self, *args, **kwargs):
self.fetch_proc.join()
@staticmethod
def _fetch(q):
'''
Collect events and store them
'''
def _clean_queue():
print('Cleaning queue!')
while not q.empty():
queue_item = q.get()
queue_item.task_done()
atexit.register(_clean_queue)
a_config = AdaptedConfigurationTestCaseMixin()
event = salt.utils.event.get_event('minion', sock_dir=a_config.get_config('minion')['sock_dir'], opts=a_config.get_config('minion'))
while True:
try:
events = event.get_event(full=False)
except Exception:
# This is broad but we'll see all kinds of issues right now
# if we drop the proc out from under the socket while we're reading
pass
q.put(events)
def assertMinionEventFired(self, tag):
#TODO
raise salt.exceptions.NotImplemented('assertMinionEventFired() not implemented')

View file

@ -177,7 +177,7 @@ class SaltCoverageTestingParser(SaltTestingParser):
# Update environ so that any subprocess started on tests are also
# included in the report
coverage_options['data_suffix'] = True
os.environ['COVERAGE_PROCESS_START'] = '1'
os.environ['COVERAGE_PROCESS_START'] = ''
os.environ['COVERAGE_OPTIONS'] = salt.utils.json.dumps(coverage_options)
# Setup coverage

View file

@ -862,15 +862,6 @@ SwapTotal: 4789244 kB'''
with patch.object(salt.utils.dns, 'parse_resolv', MagicMock(return_value=resolv_mock)):
assert core.dns() == ret
def _run_dns_test(self, resolv_mock, ret):
with patch.object(salt.utils, 'is_windows',
MagicMock(return_value=False)):
with patch.dict(core.__opts__, {'ipv6': False}):
with patch.object(salt.utils.dns, 'parse_resolv',
MagicMock(return_value=resolv_mock)):
get_dns = core.dns()
self.assertEqual(get_dns, ret)
@skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
@patch.object(salt.utils, 'is_windows', MagicMock(return_value=False))
@patch('salt.utils.network.ip_addrs', MagicMock(return_value=['1.2.3.4', '5.6.7.8']))
@ -892,3 +883,21 @@ SwapTotal: 4789244 kB'''
self.assertIn('fqdns', fqdns)
self.assertEqual(len(fqdns['fqdns']), len(ret['fqdns']))
self.assertEqual(set(fqdns['fqdns']), set(ret['fqdns']))
def test_core_virtual(self):
'''
test virtual grain with cmd virt-what
'''
virt = 'kvm'
with patch.object(salt.utils, 'is_windows',
MagicMock(return_value=False)):
with patch.object(salt.utils, 'which',
MagicMock(return_value=True)):
with patch.dict(core.__salt__, {'cmd.run_all':
MagicMock(return_value={'pid': 78,
'retcode': 0,
'stderr': '',
'stdout': virt})}):
osdata = {'kernel': 'test', }
ret = core._virtual(osdata)
self.assertEqual(ret['virtual'], virt)

View file

@ -773,6 +773,107 @@ class FileModuleTestCase(TestCase, LoaderModuleMockMixin):
saltenv='base')
self.assertEqual(ret, 'This is a templated file.')
def test_get_diff(self):
text1 = textwrap.dedent('''\
foo
bar
baz
спам
''')
text2 = textwrap.dedent('''\
foo
bar
baz
яйца
''')
# The below two variables are 8 bytes of data pulled from /dev/urandom
binary1 = b'\xd4\xb2\xa6W\xc6\x8e\xf5\x0f'
binary2 = b',\x13\x04\xa5\xb0\x12\xdf%'
# pylint: disable=no-self-argument
class MockFopen(object):
'''
Provides a fake filehandle object that has just enough to run
readlines() as file.get_diff does. Any significant changes to
file.get_diff may require this class to be modified.
'''
def __init__(mockself, path, *args, **kwargs): # pylint: disable=unused-argument
mockself.path = path
def readlines(mockself): # pylint: disable=unused-argument
return {
'text1': text1.encode('utf8'),
'text2': text2.encode('utf8'),
'binary1': binary1,
'binary2': binary2,
}[mockself.path].splitlines(True)
def __enter__(mockself):
return mockself
def __exit__(mockself, *args): # pylint: disable=unused-argument
pass
# pylint: enable=no-self-argument
fopen = MagicMock(side_effect=lambda x, *args, **kwargs: MockFopen(x))
cache_file = MagicMock(side_effect=lambda x, *args, **kwargs: x)
# Mocks for __utils__['files.is_text']
mock_text_text = MagicMock(side_effect=[True, True])
mock_bin_bin = MagicMock(side_effect=[False, False])
mock_text_bin = MagicMock(side_effect=[True, False])
mock_bin_text = MagicMock(side_effect=[False, True])
with patch.dict(filemod.__salt__, {'cp.cache_file': cache_file}), \
patch.object(salt.utils.files, 'fopen', fopen):
# Test diffing two text files
with patch.dict(filemod.__utils__, {'files.is_text': mock_text_text}):
# Identical files
ret = filemod.get_diff('text1', 'text1')
self.assertEqual(ret, '')
# Non-identical files
ret = filemod.get_diff('text1', 'text2')
self.assertEqual(
ret,
textwrap.dedent('''\
--- text1
+++ text2
@@ -1,4 +1,4 @@
foo
bar
baz
-спам
+яйца
''')
)
# Test diffing two binary files
with patch.dict(filemod.__utils__, {'files.is_text': mock_bin_bin}):
# Identical files
ret = filemod.get_diff('binary1', 'binary1')
self.assertEqual(ret, '')
# Non-identical files
ret = filemod.get_diff('binary1', 'binary2')
self.assertEqual(ret, 'Replace binary file')
# Test diffing a text file with a binary file
with patch.dict(filemod.__utils__, {'files.is_text': mock_text_bin}):
ret = filemod.get_diff('text1', 'binary1')
self.assertEqual(ret, 'Replace text file with binary file')
# Test diffing a binary file with a text file
with patch.dict(filemod.__utils__, {'files.is_text': mock_bin_text}):
ret = filemod.get_diff('binary1', 'text1')
self.assertEqual(ret, 'Replace binary file with text file')
@skipIf(pytest is None, 'PyTest required for this set of tests')
class FilemodLineTests(TestCase, LoaderModuleMockMixin):

View file

@ -19,11 +19,10 @@
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import errno
import subprocess
# Import Salt Testing Libs
from tests.support.unit import TestCase, skipIf
from tests.support.helpers import no_symlinks
from tests.support.mock import (
MagicMock,
patch,
@ -35,31 +34,6 @@ from tests.support.mock import (
from salt.modules.inspectlib.collector import Inspector
HAS_SYMLINKS = None
def no_symlinks():
'''
Check if git is installed and has symlinks enabled in the configuration.
'''
global HAS_SYMLINKS
if HAS_SYMLINKS is not None:
return not HAS_SYMLINKS
output = ''
try:
output = subprocess.check_output('git config --get core.symlinks', shell=True)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
except subprocess.CalledProcessError:
# git returned non-zero status
pass
HAS_SYMLINKS = False
if output.strip() == 'true':
HAS_SYMLINKS = True
return not HAS_SYMLINKS
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(no_symlinks(), "Git missing 'core.symlinks=true' config")
class InspectorCollectorTestCase(TestCase):

View file

@ -60,38 +60,38 @@ class IptablesTestCase(TestCase, LoaderModuleMockMixin):
self.assertEqual(iptables.build_rule(**{'if': 'not eth0'}),
'! -i eth0')
self.assertEqual(iptables.build_rule(**{'proto': 'tcp', 'syn': '!'}),
self.assertEqual(iptables.build_rule(**{'protocol': 'tcp', 'syn': '!'}),
'-p tcp ! --syn')
self.assertEqual(iptables.build_rule(dports=[80, 443], proto='tcp'),
self.assertEqual(iptables.build_rule(dports=[80, 443], protocol='tcp'),
'-p tcp -m multiport --dports 80,443')
self.assertEqual(iptables.build_rule(dports='80,443', proto='tcp'),
self.assertEqual(iptables.build_rule(dports='80,443', protocol='tcp'),
'-p tcp -m multiport --dports 80,443')
# Should it really behave this way?
self.assertEqual(iptables.build_rule(dports=['!80', 443],
proto='tcp'),
protocol='tcp'),
'-p tcp -m multiport ! --dports 80,443')
self.assertEqual(iptables.build_rule(dports='!80,443', proto='tcp'),
self.assertEqual(iptables.build_rule(dports='!80,443', protocol='tcp'),
'-p tcp -m multiport ! --dports 80,443')
self.assertEqual(iptables.build_rule(sports=[80, 443], proto='tcp'),
self.assertEqual(iptables.build_rule(sports=[80, 443], protocol='tcp'),
'-p tcp -m multiport --sports 80,443')
self.assertEqual(iptables.build_rule(sports='80,443', proto='tcp'),
self.assertEqual(iptables.build_rule(sports='80,443', protocol='tcp'),
'-p tcp -m multiport --sports 80,443')
self.assertEqual(iptables.build_rule('filter', 'INPUT', command='I',
position='3', full=True,
dports='proto', jump='ACCEPT'),
'Error: proto must be specified')
dports='protocol', jump='ACCEPT'),
'Error: protocol must be specified')
self.assertEqual(iptables.build_rule('filter', 'INPUT', command='I',
position='3', full=True,
sports='proto', jump='ACCEPT'),
'Error: proto must be specified')
sports='protocol', jump='ACCEPT'),
'Error: protocol must be specified')
self.assertEqual(iptables.build_rule('', 'INPUT', command='I',
position='3', full='True',

View file

@ -0,0 +1,255 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`David Homolka <david.homolka@ultimum.io>`
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libsrestartcheck
import salt.modules.restartcheck as restartcheck
# import salt.utils.files
# from salt.exceptions import CommandExecutionError
@skipIf(NO_MOCK, NO_MOCK_REASON)
class RestartcheckTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.restartcheck
'''
def setup_loader_modules(self):
return {restartcheck: {}}
def test_kernel_versions_debian(self):
'''
Test kernel version debian
'''
mock = MagicMock(return_value=' Installed: 4.9.82-1+deb9u3')
with patch.dict(restartcheck.__grains__, {'os': 'Debian'}):
with patch.dict(restartcheck.__salt__, {'cmd.run': mock}):
self.assertListEqual(restartcheck._kernel_versions_debian(), ['4.9.82-1+deb9u3'])
def test_kernel_versions_ubuntu(self):
'''
Test kernel version ubuntu
'''
mock = MagicMock(return_value=' Installed: 4.10.0-42.46')
with patch.dict(restartcheck.__grains__, {'os': 'Ubuntu'}):
with patch.dict(restartcheck.__salt__, {'cmd.run': mock}):
self.assertListEqual(restartcheck._kernel_versions_debian(),
['4.10.0-42.46', '4.10.0-42-generic #46', '4.10.0-42-lowlatency #46'])
def test_kernel_versions_redhat(self):
'''
Test if it return a data structure of the current, in-memory rules
'''
mock = MagicMock(return_value='kernel-3.10.0-862.el7.x86_64 Thu Apr 5 00:40:00 2018')
with patch.dict(restartcheck.__salt__, {'cmd.run': mock}):
self.assertListEqual(restartcheck._kernel_versions_redhat(), ['3.10.0-862.el7.x86_64'])
def test_valid_deleted_file_deleted(self):
'''
Test (deleted) file
'''
self.assertTrue(restartcheck._valid_deleted_file('/usr/lib/test (deleted)'))
def test_valid_deleted_file_psth_inode(self):
'''
Test (path inode=1) file
'''
self.assertTrue(restartcheck._valid_deleted_file('/usr/lib/test (path inode=1)'))
def test_valid_deleted_file_var_log(self):
'''
Test /var/log/
'''
self.assertFalse(restartcheck._valid_deleted_file('/var/log/test'))
self.assertFalse(restartcheck._valid_deleted_file('/var/log/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/var/log/test (path inode=1)'))
def test_valid_deleted_file_var_local_log(self):
'''
Test /var/local/log/
'''
self.assertFalse(restartcheck._valid_deleted_file('/var/local/log/test'))
self.assertFalse(restartcheck._valid_deleted_file('/var/local/log/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/var/local/log/test (path inode=1)'))
def test_valid_deleted_file_var_run(self):
'''
Test /var/run/
'''
self.assertFalse(restartcheck._valid_deleted_file('/var/run/test'))
self.assertFalse(restartcheck._valid_deleted_file('/var/run/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/var/run/test (path inode=1)'))
def test_valid_deleted_file_var_local_run(self):
'''
Test /var/local/run/
'''
self.assertFalse(restartcheck._valid_deleted_file('/var/local/run/test'))
self.assertFalse(restartcheck._valid_deleted_file('/var/local/run/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/var/local/run/test (path inode=1)'))
def test_valid_deleted_file_tmp(self):
'''
Test /tmp/
'''
self.assertFalse(restartcheck._valid_deleted_file('/tmp/test'))
self.assertFalse(restartcheck._valid_deleted_file('/tmp/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/tmp/test (path inode=1)'))
def test_valid_deleted_file_dev_shm(self):
'''
Test /dev/shm/
'''
self.assertFalse(restartcheck._valid_deleted_file('/dev/shm/test'))
self.assertFalse(restartcheck._valid_deleted_file('/dev/shm/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/dev/shm/test (path inode=1)'))
def test_valid_deleted_file_run(self):
'''
Test /run/
'''
self.assertFalse(restartcheck._valid_deleted_file('/run/test'))
self.assertFalse(restartcheck._valid_deleted_file('/run/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/run/test (path inode=1)'))
def test_valid_deleted_file_drm(self):
'''
Test /drm/
'''
self.assertFalse(restartcheck._valid_deleted_file('/drm/test'))
self.assertFalse(restartcheck._valid_deleted_file('/drm/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/drm/test (path inode=1)'))
def test_valid_deleted_file_var_tmp(self):
'''
Test /var/tmp/
'''
self.assertFalse(restartcheck._valid_deleted_file('/var/tmp/test'))
self.assertFalse(restartcheck._valid_deleted_file('/var/tmp/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/var/tmp/test (path inode=1)'))
def test_valid_deleted_file_var_local_tmp(self):
'''
Test /var/local/tmp/
'''
self.assertFalse(restartcheck._valid_deleted_file('/var/local/tmp/test'))
self.assertFalse(restartcheck._valid_deleted_file('/var/local/tmp/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/var/local/tmp/test (path inode=1)'))
def test_valid_deleted_file_dev_zero(self):
'''
Test /dev/zero/
'''
self.assertFalse(restartcheck._valid_deleted_file('/dev/zero/test'))
self.assertFalse(restartcheck._valid_deleted_file('/dev/zero/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/dev/zero/test (path inode=1)'))
def test_valid_deleted_file_dev_pts(self):
'''
Test /dev/pts/
'''
self.assertFalse(restartcheck._valid_deleted_file('/dev/pts/test'))
self.assertFalse(restartcheck._valid_deleted_file('/dev/pts/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/dev/pts/test (path inode=1)'))
def test_valid_deleted_file_usr_lib_locale(self):
'''
Test /usr/lib/locale/
'''
self.assertFalse(restartcheck._valid_deleted_file('/usr/lib/locale/test'))
self.assertFalse(restartcheck._valid_deleted_file('/usr/lib/locale/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/usr/lib/locale/test (path inode=1)'))
def test_valid_deleted_file_home(self):
'''
Test /home/
'''
self.assertFalse(restartcheck._valid_deleted_file('/home/test'))
self.assertFalse(restartcheck._valid_deleted_file('/home/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/home/test (path inode=1)'))
def test_valid_deleted_file_icon_theme_cache(self):
'''
Test /test.icon-theme.cache
'''
self.assertFalse(restartcheck._valid_deleted_file('/dev/test.icon-theme.cache'))
self.assertFalse(restartcheck._valid_deleted_file('/dev/test.icon-theme.cache (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/dev/test.icon-theme.cache (path inode=1)'))
def test_valid_deleted_file_var_cache_fontconfig(self):
'''
Test /var/cache/fontconfig/
'''
self.assertFalse(restartcheck._valid_deleted_file('/var/cache/fontconfig/test'))
self.assertFalse(restartcheck._valid_deleted_file('/var/cache/fontconfig/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/var/cache/fontconfig/test (path inode=1)'))
def test_valid_deleted_file_var_lib_nagios3_spool(self):
'''
Test /var/lib/nagios3/spool/
'''
self.assertFalse(restartcheck._valid_deleted_file('/var/lib/nagios3/spool/test'))
self.assertFalse(restartcheck._valid_deleted_file('/var/lib/nagios3/spool/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/var/lib/nagios3/spool/test (path inode=1)'))
def test_valid_deleted_file_var_lib_nagios3_spool_checkresults(self):
'''
Test /var/lib/nagios3/spool/checkresults/
'''
self.assertFalse(restartcheck._valid_deleted_file('/var/lib/nagios3/spool/checkresults/test'))
self.assertFalse(restartcheck._valid_deleted_file('/var/lib/nagios3/spool/checkresults/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/var/lib/nagios3/spool/checkresults/test (path inode=1)'))
def test_valid_deleted_file_var_lib_postgresql(self):
'''
Test /var/lib/postgresql/
'''
self.assertFalse(restartcheck._valid_deleted_file('/var/lib/postgresql/test'))
self.assertFalse(restartcheck._valid_deleted_file('/var/lib/postgresql/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/var/lib/postgresql/test (path inode=1)'))
def test_valid_deleted_file_var_lib_vdr(self):
'''
Test /var/lib/vdr/
'''
self.assertFalse(restartcheck._valid_deleted_file('/var/lib/vdr/test'))
self.assertFalse(restartcheck._valid_deleted_file('/var/lib/vdr/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/var/lib/vdr/test (path inode=1)'))
def test_valid_deleted_file_aio(self):
'''
Test /[aio]/
'''
self.assertFalse(restartcheck._valid_deleted_file('/opt/test'))
self.assertFalse(restartcheck._valid_deleted_file('/opt/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/opt/test (path inode=1)'))
self.assertFalse(restartcheck._valid_deleted_file('/apt/test'))
self.assertFalse(restartcheck._valid_deleted_file('/apt/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/apt/test (path inode=1)'))
self.assertFalse(restartcheck._valid_deleted_file('/ipt/test'))
self.assertFalse(restartcheck._valid_deleted_file('/ipt/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/ipt/test (path inode=1)'))
self.assertFalse(restartcheck._valid_deleted_file('/aio/test'))
self.assertFalse(restartcheck._valid_deleted_file('/aio/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/aio/test (path inode=1)'))
def test_valid_deleted_file_sysv(self):
'''
Test /SYSV/
'''
self.assertFalse(restartcheck._valid_deleted_file('/SYSV/test'))
self.assertFalse(restartcheck._valid_deleted_file('/SYSV/test (deleted)'))
self.assertFalse(restartcheck._valid_deleted_file('/SYSV/test (path inode=1)'))

View file

@ -94,18 +94,16 @@ class SSHAuthKeyTestCase(TestCase, LoaderModuleMockMixin):
comment_line = '# this is a comment\n'
# Write out the authorized key to a temporary file
if salt.utils.platform.is_windows():
temp_file = tempfile.NamedTemporaryFile(delete=False)
else:
temp_file = tempfile.NamedTemporaryFile(delete=False, mode='w+')
# Add comment
temp_file.write(comment_line)
# Add empty line for #41335
temp_file.write(empty_line)
temp_file.write('{0} {1} {2} {3}'.format(options, enc, key, email))
temp_file = tempfile.NamedTemporaryFile(delete=False, mode='w+')
temp_file.close()
with salt.utils.files.fopen(temp_file.name, 'w') as _fh:
# Add comment
_fh.write(comment_line)
# Add empty line for #41335
_fh.write(empty_line)
_fh.write('{0} {1} {2} {3}'.format(options, enc, key, email))
with patch.dict(ssh.__salt__, {'user.info': MagicMock(return_value={})}):
with patch('salt.modules.ssh._get_config_file', MagicMock(return_value=temp_file.name)):
ssh._replace_auth_key('foo', key, config=temp_file.name)

View file

@ -520,6 +520,21 @@ class MysqlPillarTestCase(TestCase):
)
def test_301_process_results_with_lists(self):
'''
Validates the following results:
{'a': [
{'c': [
{'e': 1},
{'g': 2}
]
},
{'h': [
{'j': 3, 'k': 4}
]
}
]}
'''
return_data = mysql.MySQLExtPillar()
return_data.as_list = False
return_data.with_lists = [1, 3]
@ -529,22 +544,49 @@ class MysqlPillarTestCase(TestCase):
['a', 'b', 'c', 'f', 'g', 2],
['a', 'z', 'h', 'y', 'j', 3],
['a', 'z', 'h', 'y', 'k', 4]])
self.assertEqual(
{'a': [
{'c': [
{'e': 1},
{'g': 2}
]
},
{'h': [
{'j': 3, 'k': 4}
]
}
]},
return_data.result
)
assert 'a' in return_data.result
for x in return_data.result['a']:
if 'c' in x:
assert list(x.keys()) == ['c'], x.keys()
for y in x['c']:
if 'e' in y:
assert list(y.keys()) == ['e']
assert y['e'] == 1
elif 'g' in y:
assert list(y.keys()) == ['g']
assert y['g'] == 2
else:
raise ValueError("Unexpected value {0}".format(y))
elif 'h' in x:
assert len(x['h']) == 1
for y in x['h']:
if 'j' in y:
assert len(y.keys()) == 2
assert y['j'] == 3
elif 'h' in y:
assert len(y.keys()) == 2
assert y['k'] == 4
else:
raise ValueError("Unexpected value {0}".format(y))
else:
raise ValueError("Unexpected value {0}".format(x))
def test_302_process_results_with_lists_consecutive(self):
'''
Validates the following results:
{'a': [
[[
{'e': 1},
{'g': 2}
]
],
[[
{'j': 3, 'k': 4}
]
]
]}
'''
return_data = mysql.MySQLExtPillar()
return_data.as_list = False
return_data.with_lists = [1, 2, 3]
@ -554,17 +596,31 @@ class MysqlPillarTestCase(TestCase):
['a', 'b', 'c', 'f', 'g', 2],
['a', 'z', 'h', 'y', 'j', 3],
['a', 'z', 'h', 'y', 'k', 4]])
self.assertEqual(
{'a': [
[[
{'e': 1},
{'g': 2}
]
],
[[
{'j': 3, 'k': 4}
]
]
]},
return_data.result
)
assert 'a' in return_data.result
for x in return_data.result['a']:
assert len(x) == 1
if len(x[0][0]) == 1:
for y in x[0]:
if 'e' in y:
assert list(y.keys()) == ['e']
assert y['e'] == 1
elif 'g' in y:
assert list(y.keys()) == ['g']
assert y['g'] == 2
else:
raise ValueError("Unexpected value {0}".format(y))
elif len(x[0][0]) == 2:
for y in x[0]:
if 'j' in y:
assert len(y.keys()) == 2
assert y['j'] == 3
elif 'k' in y:
assert len(y.keys()) == 2
assert y['k'] == 4
else:
raise ValueError(
"Unexpected value {0}".format(len(x[0][0]))
)
else:
raise ValueError("Unexpected value {0}".format(x))

View file

@ -0,0 +1,54 @@
# -*- coding: utf-8 -*-
'''
tests.unit.returners.pgjsonb_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the PGJsonb returner (pgjsonb).
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt libs
import salt.returners.pgjsonb as pgjsonb
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PGJsonbCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin):
'''
Tests for the local_cache.clean_old_jobs function.
'''
def setup_loader_modules(self):
return {pgjsonb: {'__opts__': {'keep_jobs': 1, 'archive_jobs': 0}}}
def test_clean_old_jobs_purge(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
def test_clean_old_jobs_archive(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
with patch.dict(pgjsonb.__opts__, {'archive_jobs': 1}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)

View file

@ -207,7 +207,7 @@ class PipStateTest(TestCase, SaltReturnAssertsMixin, LoaderModuleMockMixin):
)
self.assertSaltTrueReturn({'test': ret})
self.assertInSaltComment(
'successfully installed',
'packages are already installed',
{'test': ret}
)
@ -241,7 +241,7 @@ class PipStateTest(TestCase, SaltReturnAssertsMixin, LoaderModuleMockMixin):
)
self.assertSaltTrueReturn({'test': ret})
self.assertInSaltComment(
'were successfully installed',
'packages are already installed',
{'test': ret}
)
@ -264,7 +264,7 @@ class PipStateTest(TestCase, SaltReturnAssertsMixin, LoaderModuleMockMixin):
)
self.assertSaltTrueReturn({'test': ret})
self.assertInSaltComment(
'were successfully installed',
'packages are already installed',
{'test': ret}
)

View file

@ -250,3 +250,49 @@ class TestBadCryptodomePubKey(TestCase):
'''
key = salt.crypt.get_rsa_pub_key(self.key_path)
assert key.can_encrypt()
class TestM2CryptoRegression47124(TestCase):
SIGNATURE = (
'w\xac\xfe18o\xeb\xfb\x14+\x9e\xd1\xb7\x7fe}\xec\xd6\xe1P\x9e\xab'
'\xb5\x07\xe0\xc1\xfd\xda#\x04Z\x8d\x7f\x0b\x1f}:~\xb2s\x860u\x02N'
'\xd4q"\xb7\x86*\x8f\x1f\xd0\x9d\x11\x92\xc5~\xa68\xac>\x12H\xc2%y,'
'\xe6\xceU\x1e\xa3?\x0c,\xf0u\xbb\xd0[g_\xdd\x8b\xb0\x95:Y\x18\xa5*'
'\x99\xfd\xf3K\x92\x92 ({\xd1\xff\xd9F\xc8\xd6K\x86e\xf9\xa8\xad\xb0z'
'\xe3\x9dD\xf5k\x8b_<\xe7\xe7\xec\xf3"\'\xd5\xd2M\xb4\xce\x1a\xe3$'
'\x9c\x81\xad\xf9\x11\xf6\xf5>)\xc7\xdd\x03&\xf7\x86@ks\xa6\x05\xc2'
'\xd0\xbd\x1a7\xfc\xde\xe6\xb0\xad!\x12#\xc86Y\xea\xc5\xe3\xe2\xb3'
'\xc9\xaf\xfa\x0c\xf2?\xbf\x93w\x18\x9e\x0b\xa2a\x10:M\x05\x89\xe2W.Q'
'\xe8;yGT\xb1\xf2\xc6A\xd2\xc4\xbeN\xb3\xcfS\xaf\x03f\xe2\xb4)\xe7\xf6'
'\xdbs\xd0Z}8\xa4\xd2\x1fW*\xe6\x1c"\x8b\xd0\x18w\xb9\x7f\x9e\x96\xa3'
'\xd9v\xf7\x833\x8e\x01'
)
@skipIf(not HAS_M2, "Skip when m2crypto is not installed")
def test_m2crypto_verify_bytes(self):
message = salt.utils.stringutils.to_unicode('meh')
with patch('salt.utils.files.fopen', mock_open(read_data=PUBKEY_DATA)):
salt.crypt.verify_signature('/keydir/keyname.pub', message, self.SIGNATURE)
@skipIf(not HAS_M2, "Skip when m2crypto is not installed")
def test_m2crypto_verify_unicode(self):
message = salt.utils.stringutils.to_bytes('meh')
with patch('salt.utils.files.fopen', mock_open(read_data=PUBKEY_DATA)):
salt.crypt.verify_signature('/keydir/keyname.pub', message, self.SIGNATURE)
@skipIf(not HAS_M2, "Skip when m2crypto is not installed")
def test_m2crypto_sign_bytes(self):
message = salt.utils.stringutils.to_unicode('meh')
key = M2Crypto.RSA.load_key_string(six.b(PRIVKEY_DATA))
with patch('salt.crypt.get_rsa_key', return_value=key):
signature = salt.crypt.sign_message('/keydir/keyname.pem', message, passphrase='password')
self.assertEqual(signature, self.SIGNATURE)
@skipIf(not HAS_M2, "Skip when m2crypto is not installed")
def test_m2crypto_sign_unicode(self):
message = salt.utils.stringutils.to_bytes('meh')
key = M2Crypto.RSA.load_key_string(six.b(PRIVKEY_DATA))
with patch('salt.crypt.get_rsa_key', return_value=key):
signature = salt.crypt.sign_message('/keydir/keyname.pem', message, passphrase='password')
self.assertEqual(signature, self.SIGNATURE)

View file

@ -11,6 +11,7 @@ import os
# Import Salt Testing libs
from tests.support.unit import TestCase, skipIf
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.helpers import skip_if_not_root
# Import salt libs
import salt.minion
@ -24,7 +25,7 @@ __opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MinionTestCase(TestCase):
class MinionTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
def test_invalid_master_address(self):
with patch.dict(__opts__, {'ipv6': False, 'master': float('127.0'), 'master_port': '4555', 'retry_dns': False}):
self.assertRaises(SaltSystemExit, salt.minion.resolve_dns, __opts__)
@ -263,7 +264,7 @@ class MinionTestCase(TestCase):
patch('salt.minion.Minion.sync_connect_master', MagicMock(side_effect=RuntimeError('stop execution'))), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True)), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True)):
mock_opts = copy.copy(salt.config.DEFAULT_MINION_OPTS)
mock_opts = self.get_config('minion', from_scratch=True)
mock_opts['beacons_before_connect'] = True
minion = salt.minion.Minion(mock_opts, io_loop=tornado.ioloop.IOLoop())
try:
@ -287,7 +288,7 @@ class MinionTestCase(TestCase):
patch('salt.minion.Minion.sync_connect_master', MagicMock(side_effect=RuntimeError('stop execution'))), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True)), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True)):
mock_opts = copy.copy(salt.config.DEFAULT_MINION_OPTS)
mock_opts = self.get_config('minion', from_scratch=True)
mock_opts['scheduler_before_connect'] = True
minion = salt.minion.Minion(mock_opts, io_loop=tornado.ioloop.IOLoop())
try:

View file

@ -6,6 +6,7 @@ from __future__ import absolute_import, print_function, unicode_literals
# Python
import socket
import textwrap
from salt.ext.six.moves import zip # pylint: disable=redefined-builtin
# Salt
@ -20,11 +21,6 @@ from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
# Debug
import pprint
ppr = pprint.PrettyPrinter(indent=2).pprint
class DNShelpersCase(TestCase):
'''
Tests for the parser helpers
@ -40,13 +36,17 @@ class DNShelpersCase(TestCase):
test_map = (
'ex1.nl',
'o.1.example.eu',
'a1a.b2b.c3c.example.com'
'a1a.b2b.c3c.example.com',
'c3c.example.co.uk',
'c3c.example.mil.ng',
)
res_map = (
['ex1.nl'],
['o.1.example.eu', '1.example.eu', 'example.eu'],
['a1a.b2b.c3c.example.com', 'b2b.c3c.example.com', 'c3c.example.com', 'example.com']
['a1a.b2b.c3c.example.com', 'b2b.c3c.example.com', 'c3c.example.com', 'example.com'],
['c3c.example.co.uk', 'example.co.uk'],
['c3c.example.mil.ng', 'example.mil.ng']
)
for domain, result in zip(test_map, res_map):
@ -69,7 +69,9 @@ class DNShelpersCase(TestCase):
]
# What are the odds of this tripping over a build
# 1/(8!^4) builds?
self.assertNotEqual(
_weighted_order(list(recs[-1])),
_weighted_order(list(recs[-1])),
_weighted_order(list(recs[-1]))
)
@ -101,7 +103,7 @@ class DNShelpersCase(TestCase):
]
results = [
{'address': ipaddress.IPv4Address(right[0])},
ipaddress.IPv4Address(right[0]),
{'preference': 10, 'name': 'mbox.example.com'},
{'prio': 10, 'weight': 20, 'port': 30, 'name': 'example.com'}
]
@ -111,7 +113,7 @@ class DNShelpersCase(TestCase):
wrong = [
'not-an-ip',
'10 20 30 toomany.example.com',
'hundred 20 30 interror.example.com',
'10 toolittle.example.com',
]
@ -136,11 +138,11 @@ class DNShelpersCase(TestCase):
))
results = [
OrderedDict([(10, [{'srvr': 'mbox.example.com'}])]),
OrderedDict([(10, ['mbox.example.com'])]),
OrderedDict([
(10, [{'srvr': 'mbox1.example.com'}]),
(20, [{'srvr': 'mbox2.example.com'}, {'srvr': 'mbox3.example.com'}]),
(30, [{'srvr': 'mbox4.example.com'}, {'srvr': 'mbox5.example.com'}, {'srvr': 'mbox6.example.com'}])]
(10, ['mbox1.example.com']),
(20, ['mbox2.example.com', 'mbox3.example.com']),
(30, ['mbox4.example.com', 'mbox5.example.com', 'mbox6.example.com'])]
),
]
@ -159,9 +161,9 @@ class DNSlookupsCase(TestCase):
only nslookup is bad enough to be an exception to that
a lookup function
- raises ValueError when an incorrect DNS type is given
- returns False upon error
- returns [*record-data] upon succes/no records
- raises ValueError when an incorrect DNS type is given
- returns False upon error
- returns [*record-data] upon succes/no records
'''
CMD_RET = {
@ -182,6 +184,9 @@ class DNSlookupsCase(TestCase):
'2a00:a00:b01:c02:d03:e04:f05:222',
'2a00:a00:b01:c02:d03:e04:f05:333'] # multi-match
],
'CAA': [
['0 issue "exampleca.com"', '0 iodef "mailto:sslabuse@example.com"'],
],
'CNAME': [
['web.example.com.']
],
@ -189,6 +194,14 @@ class DNSlookupsCase(TestCase):
['10 mx1.example.com.'],
['10 mx1.example.com.', '20 mx2.example.eu.', '30 mx3.example.nl.']
],
'SSHFP': [
[
'1 1 0aabda8af5418108e8a5d3f90f207226b2c89fbe',
'1 2 500ca871d8e255e01f1261a2370c4e5406b8712f19916d3ab9f86344a67e5597',
'3 1 a3b605ce6f044617c6077c46a7cd5d17a767f0d5',
'4 2 0360d0a5a2fa550f972259e7374533add7ac8e5f303322a5b8e208bbc859ab1b'
]
],
'TXT': [
['v=spf1 a include:_spf4.example.com include:mail.example.eu ip4:10.0.0.0/8 ip6:2a00:a00:b01::/48 ~all']
]
@ -217,7 +230,6 @@ class DNSlookupsCase(TestCase):
cmd_mock = MagicMock(
return_value=test_res
)
return patch.dict(salt.utils.dns.__salt__, {'cmd.run_all': cmd_mock}, clear=True)
def _test_cmd_lookup(self, lookup_cb, wrong_type, wrong, right, empty=None, secure=None):
@ -248,12 +260,19 @@ class DNSlookupsCase(TestCase):
for rec_t, tests in right.items():
with self._mock_cmd_ret([dict([('stdout', dres)]) for dres in tests]):
for test_res in self.RESULTS[rec_t]:
if rec_t in ('A', 'AAAA', 'CNAME'):
if rec_t in ('A', 'AAAA', 'CNAME', 'SSHFP'):
rec = 'mocksrvr.example.com'
else:
rec = 'example.com'
lookup_res = lookup_cb(rec, rec_t)
if rec_t == 'SSHFP':
# Some resolvers 'split' the output and/or capitalize differently.
# So we need to workaround that here as well
lookup_res = [res[:4] + res[4:].replace(' ', '').lower() for res in lookup_res]
self.assertEqual(
lookup_cb(rec, rec_t), test_res,
lookup_res, test_res,
# msg='Error parsing {0} returns'.format(rec_t)
)
@ -284,14 +303,13 @@ class DNSlookupsCase(TestCase):
{'retcode': 9, 'stderr': ';; connection timed out; no servers could be reached'},
]
# example returns for dig
# example returns for dig +search +fail +noall +answer +noclass +nosplit +nottl -t {rtype} {name}
rights = {
'A': [
'mocksrvr.example.com.\tA\t10.1.1.1',
'web.example.com.\t\tA\t10.1.1.1\n'
'web.example.com.\t\tA\t10.2.2.2\n'
'web.example.com.\t\tA\t10.3.3.3'
],
'AAAA': [
'mocksrvr.example.com.\tA\t2a00:a00:b01:c02:d03:e04:f05:111',
@ -300,6 +318,10 @@ class DNSlookupsCase(TestCase):
'web.example.com.\t\tAAAA\t2a00:a00:b01:c02:d03:e04:f05:222\n'
'web.example.com.\t\tAAAA\t2a00:a00:b01:c02:d03:e04:f05:333'
],
'CAA': [
'example.com.\t\tCAA\t0 issue "exampleca.com"\n'
'example.com.\t\tCAA\t0 iodef "mailto:sslabuse@example.com"'
],
'CNAME': [
'mocksrvr.example.com.\tCNAME\tweb.example.com.'
],
@ -307,6 +329,12 @@ class DNSlookupsCase(TestCase):
'example.com.\t\tMX\t10 mx1.example.com.',
'example.com.\t\tMX\t10 mx1.example.com.\nexample.com.\t\tMX\t20 mx2.example.eu.\nexample.com.\t\tMX\t30 mx3.example.nl.'
],
'SSHFP': [
'mocksrvr.example.com.\tSSHFP\t1 1 0AABDA8AF5418108E8A5D3F90F207226B2C89FBE\n'
'mocksrvr.example.com.\tSSHFP\t1 2 500CA871D8E255E01F1261A2370C4E5406B8712F19916D3AB9F86344A67E5597\n'
'mocksrvr.example.com.\tSSHFP\t3 1 A3B605CE6F044617C6077C46A7CD5D17A767F0D5\n'
'mocksrvr.example.com.\tSSHFP\t4 2 0360D0A5A2FA550F972259E7374533ADD7AC8E5F303322A5B8E208BBC859AB1B'
],
'TXT': [
'example.com.\tTXT\t"v=spf1 a include:_spf4.example.com include:mail.example.eu ip4:10.0.0.0/8 ip6:2a00:a00:b01::/48 ~all"'
]
@ -320,7 +348,6 @@ class DNSlookupsCase(TestCase):
'web.example.com.\t\tA\t10.2.2.2\n'
'web.example.com.\t\tA\t10.3.3.3\n'
'web.example.com.\tRRSIG\tA 8 3 7200 20170420000000 20170330000000 1629 example.com. Hv4p37EF55LKBxUNYpnhWiEYqfmMct0z0WgDJyG5reqYfl+z4HX/kaoi Wr2iCYuYeB4Le7BgnMSb77UGHPWE7lCQ8z5gkgJ9rCDrooJzSTVdnHfw 1JQ7txRSp8Rj2GLf/L3Ytuo6nNZTV7bWUkfhOs61DAcOPHYZiX8rVhIh UAE='
]
}
@ -328,23 +355,24 @@ class DNSlookupsCase(TestCase):
def test_drill(self):
# all Drill returns look like this
RES_TMPL = ''';; ->>HEADER<<- opcode: QUERY, rcode: NOERROR, id: 58233
;; flags: qr rd ra ; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;; mocksrvr.example.com. IN A
RES_TMPL = textwrap.dedent('''\
;; ->>HEADER<<- opcode: QUERY, rcode: NOERROR, id: 58233
;; flags: qr rd ra ; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;; mocksrvr.example.com.\tIN\tA
;; ANSWER SECTION:
{}
;; ANSWER SECTION:
{}
;; AUTHORITY SECTION:
;; AUTHORITY SECTION:
;; ADDITIONAL SECTION:
;; ADDITIONAL SECTION:
;; Query time: 37 msec
;; SERVER: 10.100.150.129
;; WHEN: Tue Apr 4 19:03:51 2017
;; MSG SIZE rcvd: 50
'''
;; Query time: 37 msec
;; SERVER: 10.100.150.129
;; WHEN: Tue Apr 4 19:03:51 2017
;; MSG SIZE rcvd: 50
''')
# Not even a different retcode!?
wrong_type = {'stdout': RES_TMPL.format('mocksrvr.example.com.\t4404\tIN\tA\t10.1.1.1\n')}
@ -353,6 +381,7 @@ class DNSlookupsCase(TestCase):
{'retcode': 1, 'stderr': 'Error: error sending query: No (valid) nameservers defined in the resolver'}
]
# example returns for drill {rtype} {name}
rights = {
'A': [
'mocksrvr.example.com.\t4404\tIN\tA\t10.1.1.1\n',
@ -367,6 +396,10 @@ class DNSlookupsCase(TestCase):
'web.example.com.\t4404\tIN\tAAAA\t2a00:a00:b01:c02:d03:e04:f05:222\n'
'web.example.com.\t4404\tIN\tAAAA\t2a00:a00:b01:c02:d03:e04:f05:333'
],
'CAA': [
'example.com.\t1144\tIN\tCAA\t0 issue "exampleca.com"\n'
'example.com.\t1144\tIN\tCAA\t0 iodef "mailto:sslabuse@example.com"'
],
'CNAME': [
'mocksrvr.example.com.\t4404\tIN\tCNAME\tweb.example.com.'
],
@ -376,6 +409,12 @@ class DNSlookupsCase(TestCase):
'example.com.\t4404\tIN\tMX\t20 mx2.example.eu.\n'
'example.com.\t4404\tIN\tMX\t30 mx3.example.nl.'
],
'SSHFP': [
'mocksrvr.example.com.\t3339\tIN\tSSHFP\t1 1 0aabda8af5418108e8a5d3f90f207226b2c89fbe\n'
'mocksrvr.example.com.\t3339\tIN\tSSHFP\t1 2 500ca871d8e255e01f1261a2370c4e5406b8712f19916d3ab9f86344a67e5597\n'
'mocksrvr.example.com.\t3339\tIN\tSSHFP\t3 1 a3b605ce6f044617c6077c46a7cd5d17a767f0d5\n'
'mocksrvr.example.com.\t3339\tIN\tSSHFP\t4 2 0360d0a5a2fa550f972259e7374533add7ac8e5f303322a5b8e208bbc859ab1b'
],
'TXT': [
'example.com.\t4404\tIN\tTXT\t"v=spf1 a include:_spf4.example.com include:mail.example.eu ip4:10.0.0.0/8 ip6:2a00:a00:b01::/48 ~all"'
]
@ -407,7 +446,7 @@ class DNSlookupsCase(TestCase):
# wrong
with patch.object(socket, 'getaddrinfo', MagicMock(side_effect=socket.gaierror)):
for rec_t in ('A', 'AAAA'):
self.assertEqual(_lookup_gai('mockq', rec_t), False)
self.assertEqual(False, _lookup_gai('mockq', rec_t))
# example returns from getaddrinfo
right = {
@ -442,14 +481,13 @@ class DNSlookupsCase(TestCase):
empty = {'stdout': 'www.example.com has no MX record'}
# example returns for dig
# example returns for host -t {rdtype} {name}
rights = {
'A': [
'mocksrvr.example.com has address 10.1.1.1',
'web.example.com has address 10.1.1.1\n'
'web.example.com has address 10.2.2.2\n'
'web.example.com has address 10.3.3.3'
'web.example.com has address 10.1.1.1\n'
'web.example.com has address 10.2.2.2\n'
'web.example.com has address 10.3.3.3'
],
'AAAA': [
'mocksrvr.example.com has IPv6 address 2a00:a00:b01:c02:d03:e04:f05:111',
@ -458,6 +496,10 @@ class DNSlookupsCase(TestCase):
'web.example.com has IPv6 address 2a00:a00:b01:c02:d03:e04:f05:222\n'
'web.example.com has IPv6 address 2a00:a00:b01:c02:d03:e04:f05:333'
],
'CAA': [
'example.com has CAA record 0 issue "exampleca.com"\n'
'example.com has CAA record 0 iodef "mailto:sslabuse@example.com"'
],
'CNAME': [
'mocksrvr.example.com is an alias for web.example.com.'
],
@ -467,6 +509,12 @@ class DNSlookupsCase(TestCase):
'example.com mail is handled by 20 mx2.example.eu.\n'
'example.com mail is handled by 30 mx3.example.nl.'
],
'SSHFP': [
'mocksrvr.example.com has SSHFP record 1 1 0AABDA8AF5418108E8A5D3F90F207226B2C89FBE\n'
'mocksrvr.example.com has SSHFP record 1 2 500CA871D8E255E01F1261A2370C4E5406B8712F19916D3AB9F86344 A67E5597\n'
'mocksrvr.example.com has SSHFP record 3 1 A3B605CE6F044617C6077C46A7CD5D17A767F0D5\n'
'mocksrvr.example.com has SSHFP record 4 2 0360D0A5A2FA550F972259E7374533ADD7AC8E5F303322A5B8E208BB C859AB1B'
],
'TXT': [
'example.com descriptive text "v=spf1 a include:_spf4.example.com include:mail.example.eu ip4:10.0.0.0/8 ip6:2a00:a00:b01::/48 ~all"'
]
@ -474,13 +522,17 @@ class DNSlookupsCase(TestCase):
self._test_cmd_lookup(_lookup_host, wrong_type=wrong_type, wrong=wrongs, right=rights, empty=empty)
def test_dnspython(self):
pass
def test_nslookup(self):
# all nslookup returns look like this
RES_TMPL = 'Server:\t\t10.11.12.13\nAddress:\t10.11.12.13#53\n\nNon-authoritative answer:\n{}\n\nAuthoritative answers can be found from:'
RES_TMPL = textwrap.dedent('''\
Server:\t\t10.11.12.13
Address:\t10.11.12.13#53
Non-authoritative answer:
{}
Authoritative answers can be found from:
''')
wrong_type = {'stdout': 'unknown query type: WRONG' +
RES_TMPL.format('Name:\tmocksrvr.example.com\nAddress: 10.1.1.1')}
@ -490,8 +542,9 @@ class DNSlookupsCase(TestCase):
]
empty = {'stdout': RES_TMPL.format(
"*** Can't find www.google.com: No answer\n\nAuthoritative answers can be found from:")}
"*** Can't find www.google.com: No answer")}
# Example returns of nslookup -query={rdype} {name}
rights = {
'A': [
'Name:\tmocksrvr.example.com\nAddress: 10.1.1.1',
@ -506,6 +559,10 @@ class DNSlookupsCase(TestCase):
'web.example.com\thas AAAA address 2a00:a00:b01:c02:d03:e04:f05:222\n'
'web.example.com\thas AAAA address 2a00:a00:b01:c02:d03:e04:f05:333'
],
'CAA': [
'example.com\trdata_257 = 0 issue "exampleca.com"\n'
'example.com\trdata_257 = 0 iodef "mailto:sslabuse@example.com"'
],
'CNAME': [
'mocksrvr.example.com\tcanonical name = web.example.com.'
],
@ -515,7 +572,13 @@ class DNSlookupsCase(TestCase):
'example.com\tmail exchanger = 20 mx2.example.eu.\n'
'example.com\tmail exchanger = 30 mx3.example.nl.'
],
'TXT': [
'SSHFP': [
'mocksrvr.example.com\trdata_44 = 1 1 0AABDA8AF5418108E8A5D3F90F207226B2C89FBE\n'
'mocksrvr.example.com\trdata_44 = 1 2 500CA871D8E255E01F1261A2370C4E5406B8712F19916D3AB9F86344 A67E5597\n'
'mocksrvr.example.com\trdata_44 = 3 1 A3B605CE6F044617C6077C46A7CD5D17A767F0D5\n'
'mocksrvr.example.com\trdata_44 = 4 2 0360D0A5A2FA550F972259E7374533ADD7AC8E5F303322A5B8E208BB C859AB1B'
],
'TXT': [
'example.com\ttext = "v=spf1 a include:_spf4.example.com include:mail.example.eu ip4:10.0.0.0/8 ip6:2a00:a00:b01::/48 ~all"'
]
}

View file

@ -4,22 +4,27 @@ integration.grains.test_core
integration.loader.test_ext_grains
integration.loader.test_ext_modules
integration.modules.test_aliases
integration.modules.test_autoruns
integration.modules.test_beacons
integration.modules.test_config
integration.modules.test_cp
integration.modules.test_data
integration.modules.test_disk
integration.modules.test_firewall
integration.modules.test_git
integration.modules.test_grains
integration.modules.test_groupadd
integration.modules.test_hosts
integration.modules.test_mine
integration.modules.test_network
integration.modules.test_ntp
integration.modules.test_pillar
integration.modules.test_pkg
integration.modules.test_publish
integration.modules.test_state
integration.modules.test_status
integration.modules.test_sysmod
integration.modules.test_system
integration.modules.test_test
integration.modules.test_useradd
integration.reactor.test_reactor