mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch '2018.3' into 51069-ri-and-rdoc-removed
This commit is contained in:
commit
e586370c67
43 changed files with 678 additions and 270 deletions
11
Pipfile
11
Pipfile
|
@ -20,17 +20,18 @@ boto = ">=2.32.1"
|
|||
boto3 = ">=1.2.1"
|
||||
moto = ">=0.3.6"
|
||||
SaltPyLint = ">=v2017.3.6"
|
||||
pytest = ">=3.5.0"
|
||||
pytest = ">=4.0.1"
|
||||
pytest-cov = "*"
|
||||
pytest-salt = "==2018.12.8"
|
||||
pytest-timeout = ">=1.3.3"
|
||||
pytest-tempdir = ">=2018.8.11"
|
||||
pytest-helpers-namespace = ">=2017.11.11"
|
||||
|
||||
[packages.futures]
|
||||
# Required by Tornado to handle threads stuff.
|
||||
version = ">=2.0"
|
||||
markers = "python_version < '3.0'"
|
||||
|
||||
[dev-packages.pytest-salt]
|
||||
git = "git://github.com/saltstack/pytest-salt.git"
|
||||
ref = "master"
|
||||
|
||||
[dev-packages.httpretty]
|
||||
# httpretty Needs to be here for now even though it's a dependency of boto.
|
||||
# A pip install on a fresh system will decide to target httpretty 0.8.10 to
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
What is SaltStack?
|
||||
==================
|
||||
|
||||
SaltStack makes software for complex systems management at scale.
|
||||
SaltStack makes software for complex systems management at scale.
|
||||
SaltStack is the company that created and maintains the Salt Open
|
||||
project and develops and sells SaltStack Enterprise software, services
|
||||
and support. Easy enough to get running in minutes, scalable enough to
|
||||
|
|
|
@ -87,6 +87,13 @@ the context into the included file is required:
|
|||
.. code-block:: jinja
|
||||
|
||||
{% from 'lib.sls' import test with context %}
|
||||
|
||||
Includes must use full paths, like so:
|
||||
|
||||
.. code-block:: jinja
|
||||
:caption: spam/eggs.jinja
|
||||
|
||||
{% include 'spam/foobar.jinja' %}
|
||||
|
||||
Including Context During Include/Import
|
||||
---------------------------------------
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
|
||||
mock>=2.0.0
|
||||
SaltPyLint>=v2017.3.6
|
||||
pytest>=3.5.0
|
||||
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt
|
||||
testinfra>=1.7.0,!=1.17.0
|
||||
|
||||
# httpretty Needs to be here for now even though it's a dependency of boto.
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
pytest>=3.5.0
|
||||
pytest-helpers-namespace
|
||||
pytest-tempdir
|
||||
# PyTest
|
||||
pytest >= 4.0.1
|
||||
pytest-cov
|
||||
pytest-salt == 2018.12.8
|
||||
pytest-timeout >= 1.3.3
|
||||
pytest-tempdir >= 2018.8.11
|
||||
pytest-helpers-namespace >= 2017.11.11
|
||||
|
|
|
@ -323,7 +323,14 @@ def groups(username, **kwargs):
|
|||
|
||||
'''
|
||||
group_list = []
|
||||
bind = auth(username, kwargs.get('password', None))
|
||||
|
||||
# If bind credentials are configured, use them instead of user's
|
||||
if _config('binddn', mandatory=False) and _config('bindpw', mandatory=False):
|
||||
bind = _bind_for_search(anonymous=_config('anonymous', mandatory=False))
|
||||
else:
|
||||
bind = _bind(username, kwargs.get('password', ''),
|
||||
anonymous=_config('auth_by_group_membership_only', mandatory=False)
|
||||
and _config('anonymous', mandatory=False))
|
||||
|
||||
if bind:
|
||||
log.debug('ldap bind to determine group membership succeeded!')
|
||||
|
|
|
@ -265,7 +265,7 @@ def avail_locations(conn=None, call=None): # pylint: disable=unused-argument
|
|||
webconn = get_conn(WebSiteManagementClient)
|
||||
|
||||
ret = {}
|
||||
regions = webconn.global_model.get_subscription_geo_regions()
|
||||
regions = webconn.list_geo_regions()
|
||||
if hasattr(regions, 'value'):
|
||||
regions = regions.value
|
||||
for location in regions: # pylint: disable=no-member
|
||||
|
@ -533,7 +533,7 @@ def list_nodes_select(conn=None, call=None): # pylint: disable=unused-argument
|
|||
)
|
||||
|
||||
|
||||
def show_instance(name, resource_group=None, call=None): # pylint: disable=unused-argument
|
||||
def show_instance(name, kwargs=None, call=None): # pylint: disable=unused-argument
|
||||
'''
|
||||
Show the details from the provider concerning an instance
|
||||
'''
|
||||
|
@ -547,6 +547,12 @@ def show_instance(name, resource_group=None, call=None): # pylint: disable=unus
|
|||
compconn = get_conn()
|
||||
|
||||
data = None
|
||||
resource_group = None
|
||||
|
||||
# check if there is a resource_group specified
|
||||
if kwargs:
|
||||
resource_group = kwargs.get('resource_group', None)
|
||||
|
||||
if resource_group is None:
|
||||
for group in list_resource_groups():
|
||||
try:
|
||||
|
@ -555,8 +561,13 @@ def show_instance(name, resource_group=None, call=None): # pylint: disable=unus
|
|||
resource_group = group
|
||||
except CloudError:
|
||||
continue
|
||||
else:
|
||||
try:
|
||||
instance = compconn.virtual_machines.get(resource_group, name)
|
||||
data = object_to_dict(instance)
|
||||
except CloudError:
|
||||
pass
|
||||
|
||||
# Find under which cloud service the name is listed, if any
|
||||
if data is None:
|
||||
return {}
|
||||
|
||||
|
@ -568,7 +579,7 @@ def show_instance(name, resource_group=None, call=None): # pylint: disable=unus
|
|||
data['network_profile']['network_interfaces'] = []
|
||||
|
||||
for iface in data['network_profile']['network_interfaces']:
|
||||
iface_name = iface.id.split('/')[-1]
|
||||
iface_name = iface['id'].split('/')[-1]
|
||||
iface_data = show_interface(kwargs={
|
||||
'resource_group': resource_group,
|
||||
'iface_name': iface_name,
|
||||
|
|
|
@ -1,4 +1,9 @@
|
|||
#!/bin/sh -
|
||||
|
||||
# WARNING: Changes to this file in the salt repo will be overwritten!
|
||||
# Please submit pull requests against the salt-bootstrap repo:
|
||||
# https://github.com/saltstack/salt-bootstrap
|
||||
|
||||
#======================================================================================================================
|
||||
# vim: softtabstop=4 shiftwidth=4 expandtab fenc=utf-8 spell spelllang=en cc=120
|
||||
#======================================================================================================================
|
||||
|
@ -18,7 +23,7 @@
|
|||
#======================================================================================================================
|
||||
set -o nounset # Treat unset variables as an error
|
||||
|
||||
__ScriptVersion="2018.08.15"
|
||||
__ScriptVersion="2019.01.08"
|
||||
__ScriptName="bootstrap-salt.sh"
|
||||
|
||||
__ScriptFullName="$0"
|
||||
|
@ -585,14 +590,14 @@ elif [ "$ITYPE" = "stable" ]; then
|
|||
if [ "$#" -eq 0 ];then
|
||||
STABLE_REV="latest"
|
||||
else
|
||||
if [ "$(echo "$1" | grep -E '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7|2018\.3)$')" != "" ]; then
|
||||
if [ "$(echo "$1" | grep -E '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7|2018\.3|2019\.2)$')" != "" ]; then
|
||||
STABLE_REV="$1"
|
||||
shift
|
||||
elif [ "$(echo "$1" | grep -E '^([0-9]*\.[0-9]*\.[0-9]*)$')" != "" ]; then
|
||||
STABLE_REV="archive/$1"
|
||||
shift
|
||||
else
|
||||
echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, 2018.3, latest, \$MAJOR.\$MINOR.\$PATCH)"
|
||||
echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, 2018.3, 2019.2, latest, \$MAJOR.\$MINOR.\$PATCH)"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
@ -1269,6 +1274,7 @@ __ubuntu_derivatives_translation() {
|
|||
linuxmint_13_ubuntu_base="12.04"
|
||||
linuxmint_17_ubuntu_base="14.04"
|
||||
linuxmint_18_ubuntu_base="16.04"
|
||||
linuxmint_19_ubuntu_base="18.04"
|
||||
linaro_12_ubuntu_base="12.04"
|
||||
elementary_os_02_ubuntu_base="12.04"
|
||||
neon_16_ubuntu_base="16.04"
|
||||
|
@ -1632,7 +1638,8 @@ __check_end_of_life_versions() {
|
|||
|
||||
amazon*linux*ami)
|
||||
# Amazon Linux versions lower than 2012.0X no longer supported
|
||||
if [ "$DISTRO_MAJOR_VERSION" -lt 2012 ]; then
|
||||
# Except for Amazon Linux 2, which reset the major version counter
|
||||
if [ "$DISTRO_MAJOR_VERSION" -lt 2012 ] && [ "$DISTRO_MAJOR_VERSION" -gt 10 ]; then
|
||||
echoerror "End of life distributions are not supported."
|
||||
echoerror "Please consider upgrading to the next stable. See:"
|
||||
echoerror " https://aws.amazon.com/amazon-linux-ami/"
|
||||
|
@ -1797,24 +1804,32 @@ __function_defined() {
|
|||
# process is finished so the script doesn't exit on a locked proc.
|
||||
#----------------------------------------------------------------------------------------------------------------------
|
||||
__wait_for_apt(){
|
||||
echodebug "Checking if apt process is currently running."
|
||||
|
||||
# Timeout set at 15 minutes
|
||||
WAIT_TIMEOUT=900
|
||||
|
||||
while ps -C apt,apt-get,aptitude,dpkg >/dev/null; do
|
||||
sleep 1
|
||||
WAIT_TIMEOUT=$((WAIT_TIMEOUT - 1))
|
||||
# Run our passed in apt command
|
||||
"${@}"
|
||||
APT_RETURN=$?
|
||||
|
||||
# If timeout reaches 0, abort.
|
||||
if [ "$WAIT_TIMEOUT" -eq 0 ]; then
|
||||
echoerror "Apt, apt-get, aptitude, or dpkg process is taking too long."
|
||||
echoerror "Bootstrap script cannot proceed. Aborting."
|
||||
return 1
|
||||
fi
|
||||
# If our exit code from apt is 100, then we're waiting on a lock
|
||||
while [ $APT_RETURN -eq 100 ]; do
|
||||
echoinfo "Aware of the lock. Patiently waiting $WAIT_TIMEOUT more seconds..."
|
||||
sleep 1
|
||||
WAIT_TIMEOUT=$((WAIT_TIMEOUT - 1))
|
||||
|
||||
# If timeout reaches 0, abort.
|
||||
if [ "$WAIT_TIMEOUT" -eq 0 ]; then
|
||||
echoerror "Apt, apt-get, aptitude, or dpkg process is taking too long."
|
||||
echoerror "Bootstrap script cannot proceed. Aborting."
|
||||
return 1
|
||||
else
|
||||
# Try running apt again until our return code != 100
|
||||
"${@}"
|
||||
APT_RETURN=$?
|
||||
fi
|
||||
done
|
||||
|
||||
echodebug "No apt processes are currently running."
|
||||
return $APT_RETURN
|
||||
}
|
||||
|
||||
#--- FUNCTION -------------------------------------------------------------------------------------------------------
|
||||
|
@ -1823,8 +1838,7 @@ __wait_for_apt(){
|
|||
# PARAMETERS: packages
|
||||
#----------------------------------------------------------------------------------------------------------------------
|
||||
__apt_get_install_noinput() {
|
||||
__wait_for_apt
|
||||
apt-get install -y -o DPkg::Options::=--force-confold "${@}"; return $?
|
||||
__wait_for_apt apt-get install -y -o DPkg::Options::=--force-confold "${@}"; return $?
|
||||
} # ---------- end of function __apt_get_install_noinput ----------
|
||||
|
||||
|
||||
|
@ -1833,8 +1847,7 @@ __apt_get_install_noinput() {
|
|||
# DESCRIPTION: (DRY) apt-get upgrade with noinput options
|
||||
#----------------------------------------------------------------------------------------------------------------------
|
||||
__apt_get_upgrade_noinput() {
|
||||
__wait_for_apt
|
||||
apt-get upgrade -y -o DPkg::Options::=--force-confold; return $?
|
||||
__wait_for_apt apt-get upgrade -y -o DPkg::Options::=--force-confold; return $?
|
||||
} # ---------- end of function __apt_get_upgrade_noinput ----------
|
||||
|
||||
|
||||
|
@ -1844,11 +1857,10 @@ __apt_get_upgrade_noinput() {
|
|||
# PARAMETERS: url
|
||||
#----------------------------------------------------------------------------------------------------------------------
|
||||
__apt_key_fetch() {
|
||||
__wait_for_apt
|
||||
url=$1
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
apt-key adv ${_GPG_ARGS} --fetch-keys "$url"; return $?
|
||||
__wait_for_apt apt-key adv ${_GPG_ARGS} --fetch-keys "$url"; return $?
|
||||
} # ---------- end of function __apt_key_fetch ----------
|
||||
|
||||
|
||||
|
@ -2633,8 +2645,7 @@ __install_saltstack_ubuntu_repository() {
|
|||
|
||||
__apt_key_fetch "$SALTSTACK_UBUNTU_URL/SALTSTACK-GPG-KEY.pub" || return 1
|
||||
|
||||
__wait_for_apt
|
||||
apt-get update || return 1
|
||||
__wait_for_apt apt-get update || return 1
|
||||
}
|
||||
|
||||
install_ubuntu_deps() {
|
||||
|
@ -2646,8 +2657,7 @@ install_ubuntu_deps() {
|
|||
|
||||
__enable_universe_repository || return 1
|
||||
|
||||
__wait_for_apt
|
||||
apt-get update || return 1
|
||||
__wait_for_apt apt-get update || return 1
|
||||
fi
|
||||
|
||||
__PACKAGES=''
|
||||
|
@ -2703,8 +2713,7 @@ install_ubuntu_stable_deps() {
|
|||
# No user interaction, libc6 restart services for example
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
__wait_for_apt
|
||||
apt-get update || return 1
|
||||
__wait_for_apt apt-get update || return 1
|
||||
|
||||
if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then
|
||||
if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then
|
||||
|
@ -2724,8 +2733,7 @@ install_ubuntu_stable_deps() {
|
|||
}
|
||||
|
||||
install_ubuntu_git_deps() {
|
||||
__wait_for_apt
|
||||
apt-get update || return 1
|
||||
__wait_for_apt apt-get update || return 1
|
||||
|
||||
if ! __check_command_exists git; then
|
||||
__apt_get_install_noinput git-core || return 1
|
||||
|
@ -3032,8 +3040,7 @@ __install_saltstack_debian_repository() {
|
|||
|
||||
__apt_key_fetch "$SALTSTACK_DEBIAN_URL/SALTSTACK-GPG-KEY.pub" || return 1
|
||||
|
||||
__wait_for_apt
|
||||
apt-get update || return 1
|
||||
__wait_for_apt apt-get update || return 1
|
||||
}
|
||||
|
||||
install_debian_deps() {
|
||||
|
@ -3044,8 +3051,7 @@ install_debian_deps() {
|
|||
# No user interaction, libc6 restart services for example
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
__wait_for_apt
|
||||
apt-get update || return 1
|
||||
__wait_for_apt apt-get update || return 1
|
||||
|
||||
if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then
|
||||
# Try to update GPG keys first if allowed
|
||||
|
@ -3164,8 +3170,7 @@ install_debian_8_git_deps() {
|
|||
/etc/apt/sources.list.d/backports.list
|
||||
fi
|
||||
|
||||
__wait_for_apt
|
||||
apt-get update || return 1
|
||||
__wait_for_apt apt-get update || return 1
|
||||
|
||||
# python-tornado package should be installed from backports repo
|
||||
__PACKAGES="${__PACKAGES} python-backports.ssl-match-hostname python-tornado/jessie-backports"
|
||||
|
@ -3415,36 +3420,33 @@ install_debian_check_services() {
|
|||
#
|
||||
|
||||
install_fedora_deps() {
|
||||
if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then
|
||||
dnf -y update || return 1
|
||||
fi
|
||||
|
||||
__PACKAGES="${__PACKAGES:=}"
|
||||
if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then
|
||||
# Packages are named python3-<whatever>
|
||||
PY_PKG_VER=3
|
||||
__PACKAGES="python3-m2crypto python3-PyYAML"
|
||||
__PACKAGES="${__PACKAGES} python3-m2crypto python3-PyYAML"
|
||||
else
|
||||
PY_PKG_VER=2
|
||||
__PACKAGES="m2crypto"
|
||||
__PACKAGES="${__PACKAGES} m2crypto"
|
||||
if [ "$DISTRO_MAJOR_VERSION" -ge 28 ]; then
|
||||
__PACKAGES="${__PACKAGES} python2-pyyaml"
|
||||
else
|
||||
__PACKAGES="${__PACKAGES} PyYAML"
|
||||
fi
|
||||
fi
|
||||
|
||||
__PACKAGES="${__PACKAGES} procps-ng dnf-utils libyaml python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2"
|
||||
__PACKAGES="${__PACKAGES} dnf-utils libyaml procps-ng python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2"
|
||||
__PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests python${PY_PKG_VER}-zmq"
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
dnf install -y ${__PACKAGES} || return 1
|
||||
|
||||
if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then
|
||||
dnf -y update || return 1
|
||||
fi
|
||||
|
||||
if [ "${_EXTRA_PACKAGES}" != "" ]; then
|
||||
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
|
||||
# shellcheck disable=SC2086
|
||||
dnf install -y ${_EXTRA_PACKAGES} || return 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
dnf install -y ${__PACKAGES} ${_EXTRA_PACKAGES} || return 1
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
|
@ -3494,36 +3496,38 @@ install_fedora_git_deps() {
|
|||
PY_PKG_VER=2
|
||||
fi
|
||||
|
||||
__PACKAGES="${__PACKAGES:=}"
|
||||
if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then
|
||||
dnf install -y ca-certificates || return 1
|
||||
__PACKAGES="${__PACKAGES} ca-certificates"
|
||||
fi
|
||||
if ! __check_command_exists git; then
|
||||
__PACKAGES="${__PACKAGES} git"
|
||||
fi
|
||||
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
|
||||
__PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud python${PY_PKG_VER}-netaddr"
|
||||
fi
|
||||
__PACKAGES="${__PACKAGES} python${PY_PKG_VER}-systemd"
|
||||
|
||||
# Fedora 28+ ships with tornado 5.0+ which is broken for salt on py3
|
||||
# https://github.com/saltstack/salt-bootstrap/issues/1220
|
||||
if [ "${PY_PKG_VER}" -lt 3 ] || [ "$DISTRO_MAJOR_VERSION" -lt 28 ]; then
|
||||
__PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado"
|
||||
fi
|
||||
|
||||
install_fedora_deps || return 1
|
||||
|
||||
if ! __check_command_exists git; then
|
||||
dnf install -y git || return 1
|
||||
fi
|
||||
|
||||
__git_clone_and_checkout || return 1
|
||||
|
||||
__PACKAGES="python${PY_PKG_VER}-systemd"
|
||||
# Fedora 28+ needs tornado <5.0 from pip
|
||||
# https://github.com/saltstack/salt-bootstrap/issues/1220
|
||||
if [ "${PY_PKG_VER}" -eq 3 ] && [ "$DISTRO_MAJOR_VERSION" -ge 28 ]; then
|
||||
__check_pip_allowed "You need to allow pip based installations (-P) for Tornado <5.0 in order to install Salt on Python 3"
|
||||
grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" | while IFS='
|
||||
' read -r dep; do
|
||||
"${_PY_EXE}" -m pip install "${dep}" || return 1
|
||||
done
|
||||
else
|
||||
__PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado"
|
||||
fi
|
||||
|
||||
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
|
||||
__PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud python${PY_PKG_VER}-netaddr"
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
dnf install -y ${__PACKAGES} || return 1
|
||||
|
||||
# Let's trigger config_salt()
|
||||
if [ "$_TEMP_CONFIG_DIR" = "null" ]; then
|
||||
_TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/"
|
||||
|
@ -4681,6 +4685,138 @@ install_amazon_linux_ami_git_deps() {
|
|||
return 0
|
||||
}
|
||||
|
||||
install_amazon_linux_ami_2_git_deps() {
|
||||
if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then
|
||||
yum -y install ca-certificates || return 1
|
||||
fi
|
||||
|
||||
PIP_EXE='pip'
|
||||
if __check_command_exists python2.7; then
|
||||
if ! __check_command_exists pip2.7; then
|
||||
__yum_install_noinput python2-pip
|
||||
fi
|
||||
PIP_EXE='/bin/pip'
|
||||
_PY_EXE='python2.7'
|
||||
fi
|
||||
|
||||
install_amazon_linux_ami_2_deps || return 1
|
||||
|
||||
if ! __check_command_exists git; then
|
||||
__yum_install_noinput git || return 1
|
||||
fi
|
||||
|
||||
__git_clone_and_checkout || return 1
|
||||
|
||||
__PACKAGES=""
|
||||
__PIP_PACKAGES=""
|
||||
|
||||
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
|
||||
__check_pip_allowed "You need to allow pip based installations (-P) in order to install apache-libcloud"
|
||||
__PACKAGES="${__PACKAGES} python27-pip"
|
||||
__PIP_PACKAGES="${__PIP_PACKAGES} apache-libcloud>=$_LIBCLOUD_MIN_VERSION"
|
||||
fi
|
||||
|
||||
if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then
|
||||
# We're on the develop branch, install whichever tornado is on the requirements file
|
||||
__REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")"
|
||||
if [ "${__REQUIRED_TORNADO}" != "" ]; then
|
||||
__PACKAGES="${__PACKAGES} ${pkg_append}-tornado"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "${__PACKAGES}" != "" ]; then
|
||||
# shellcheck disable=SC2086
|
||||
__yum_install_noinput ${__PACKAGES} || return 1
|
||||
fi
|
||||
|
||||
if [ "${__PIP_PACKAGES}" != "" ]; then
|
||||
# shellcheck disable=SC2086
|
||||
${PIP_EXE} install ${__PIP_PACKAGES} || return 1
|
||||
fi
|
||||
|
||||
# Let's trigger config_salt()
|
||||
if [ "$_TEMP_CONFIG_DIR" = "null" ]; then
|
||||
_TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/"
|
||||
CONFIG_SALT_FUNC="config_salt"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
install_amazon_linux_ami_2_deps() {
|
||||
# Shim to figure out if we're using old (rhel) or new (aws) rpms.
|
||||
_USEAWS=$BS_FALSE
|
||||
pkg_append="python"
|
||||
|
||||
if [ "$ITYPE" = "stable" ]; then
|
||||
repo_rev="$STABLE_REV"
|
||||
else
|
||||
repo_rev="latest"
|
||||
fi
|
||||
|
||||
if echo $repo_rev | grep -E -q '^archive'; then
|
||||
year=$(echo "$repo_rev" | cut -d '/' -f 2 | cut -c1-4)
|
||||
else
|
||||
year=$(echo "$repo_rev" | cut -c1-4)
|
||||
fi
|
||||
|
||||
if echo "$repo_rev" | grep -E -q '^(latest|2016\.11)$' || \
|
||||
[ "$year" -gt 2016 ]; then
|
||||
_USEAWS=$BS_TRUE
|
||||
pkg_append="python"
|
||||
fi
|
||||
|
||||
# We need to install yum-utils before doing anything else when installing on
|
||||
# Amazon Linux ECS-optimized images. See issue #974.
|
||||
__yum_install_noinput yum-utils
|
||||
|
||||
# Do upgrade early
|
||||
if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then
|
||||
yum -y update || return 1
|
||||
fi
|
||||
|
||||
if [ $_DISABLE_REPOS -eq $BS_FALSE ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then
|
||||
__REPO_FILENAME="saltstack-repo.repo"
|
||||
|
||||
base_url="$HTTP_VAL://${_REPO_URL}/yum/redhat/7/\$basearch/$repo_rev/"
|
||||
base_url="$HTTP_VAL://${_REPO_URL}/yum/amazon/2/\$basearch/latest/"
|
||||
gpg_key="${base_url}SALTSTACK-GPG-KEY.pub
|
||||
${base_url}base/RPM-GPG-KEY-CentOS-7"
|
||||
repo_name="SaltStack repo for Amazon Linux 2.0"
|
||||
|
||||
# This should prob be refactored to use __install_saltstack_rhel_repository()
|
||||
# With args passed in to do the right thing. Reformatted to be more like the
|
||||
# amazon linux yum file.
|
||||
if [ ! -s "/etc/yum.repos.d/${__REPO_FILENAME}" ]; then
|
||||
cat <<_eof > "/etc/yum.repos.d/${__REPO_FILENAME}"
|
||||
[saltstack-repo]
|
||||
name=$repo_name
|
||||
failovermethod=priority
|
||||
priority=10
|
||||
gpgcheck=1
|
||||
gpgkey=$gpg_key
|
||||
baseurl=$base_url
|
||||
_eof
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
# Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64
|
||||
# which is already installed
|
||||
__PACKAGES="m2crypto ${pkg_append}-crypto ${pkg_append}-jinja2 PyYAML"
|
||||
__PACKAGES="${__PACKAGES} ${pkg_append}-msgpack ${pkg_append}-requests ${pkg_append}-zmq"
|
||||
__PACKAGES="${__PACKAGES} ${pkg_append}-futures"
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
__yum_install_noinput ${__PACKAGES} || return 1
|
||||
|
||||
if [ "${_EXTRA_PACKAGES}" != "" ]; then
|
||||
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
|
||||
# shellcheck disable=SC2086
|
||||
__yum_install_noinput ${_EXTRA_PACKAGES} || return 1
|
||||
fi
|
||||
}
|
||||
|
||||
install_amazon_linux_ami_stable() {
|
||||
install_centos_stable || return 1
|
||||
return 0
|
||||
|
@ -4715,6 +4851,41 @@ install_amazon_linux_ami_testing_post() {
|
|||
install_centos_testing_post || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
install_amazon_linux_ami_2_stable() {
|
||||
install_centos_stable || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
install_amazon_linux_ami_2_stable_post() {
|
||||
install_centos_stable_post || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
install_amazon_linux_ami_2_restart_daemons() {
|
||||
install_centos_restart_daemons || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
install_amazon_linux_ami_2_git() {
|
||||
install_centos_git || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
install_amazon_linux_ami_2_git_post() {
|
||||
install_centos_git_post || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
install_amazon_linux_ami_2_testing() {
|
||||
install_centos_testing || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
install_amazon_linux_ami_2_testing_post() {
|
||||
install_centos_testing_post || return 1
|
||||
return 0
|
||||
}
|
||||
#
|
||||
# Ended Amazon Linux AMI Install Functions
|
||||
#
|
||||
|
@ -5336,7 +5507,8 @@ install_openbsd_restart_daemons() {
|
|||
# SmartOS Install Functions
|
||||
#
|
||||
install_smartos_deps() {
|
||||
pkgin -y install zeromq py27-crypto py27-m2crypto py27-msgpack py27-yaml py27-jinja2 py27-zmq py27-requests || return 1
|
||||
smartos_deps="$(pkgin show-deps salt | grep '^\s' | grep -v '\snot' | xargs) py27-m2crypto"
|
||||
pkgin -y install "${smartos_deps}" || return 1
|
||||
|
||||
# Set _SALT_ETC_DIR to SmartOS default if they didn't specify
|
||||
_SALT_ETC_DIR=${BS_SALT_ETC_DIR:-/opt/local/etc/salt}
|
||||
|
|
|
@ -9,6 +9,7 @@ authenticating peers
|
|||
# the Array class, which has incompatibilities with it.
|
||||
from __future__ import absolute_import, print_function
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
import copy
|
||||
import time
|
||||
|
@ -727,6 +728,10 @@ class AsyncAuth(object):
|
|||
'minion.\nOr restart the Salt Master in open mode to '
|
||||
'clean out the keys. The Salt Minion will now exit.'
|
||||
)
|
||||
# Add a random sleep here for systems that are using a
|
||||
# a service manager to immediately restart the service
|
||||
# to avoid overloading the system
|
||||
time.sleep(random.randint(10, 20))
|
||||
sys.exit(salt.defaults.exitcodes.EX_NOPERM)
|
||||
# has the master returned that its maxed out with minions?
|
||||
elif payload['load']['ret'] == 'full':
|
||||
|
|
|
@ -303,8 +303,8 @@ def _file_lists(load, form):
|
|||
except os.error:
|
||||
log.critical('Unable to make cachedir %s', list_cachedir)
|
||||
return []
|
||||
list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv']))
|
||||
w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv']))
|
||||
list_cache = os.path.join(list_cachedir, '{0}.p'.format(salt.utils.files.safe_filename_leaf(load['saltenv'])))
|
||||
w_lock = os.path.join(list_cachedir, '.{0}.w'.format(salt.utils.files.safe_filename_leaf(load['saltenv'])))
|
||||
cache_match, refresh_cache, save_cache = \
|
||||
salt.fileserver.check_file_list_cache(
|
||||
__opts__, form, list_cache, w_lock
|
||||
|
|
|
@ -1704,7 +1704,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
|||
if not isinstance(key, six.string_types):
|
||||
raise KeyError('The key must be a string.')
|
||||
if '.' not in key:
|
||||
raise KeyError('The key \'%s\' should contain a \'.\'', key)
|
||||
raise KeyError('The key \'{0}\' should contain a \'.\''.format(key))
|
||||
mod_name, _ = key.split('.', 1)
|
||||
with self._lock:
|
||||
# It is possible that the key is in the dictionary after
|
||||
|
|
|
@ -135,9 +135,7 @@ def setup_handlers():
|
|||
transport_registry = TransportRegistry(default_transports)
|
||||
url = urlparse(dsn)
|
||||
if not transport_registry.supported_scheme(url.scheme):
|
||||
raise ValueError(
|
||||
'Unsupported Sentry DSN scheme: %s', url.scheme
|
||||
)
|
||||
raise ValueError('Unsupported Sentry DSN scheme: {0}'.format(url.scheme))
|
||||
except ValueError as exc:
|
||||
log.info(
|
||||
'Raven failed to parse the configuration provided DSN: %s', exc
|
||||
|
|
|
@ -27,6 +27,7 @@ from binascii import crc32
|
|||
# pylint: disable=import-error,no-name-in-module,redefined-builtin
|
||||
from salt.ext import six
|
||||
from salt._compat import ipaddress
|
||||
from salt.utils.network import parse_host_port
|
||||
from salt.ext.six.moves import range
|
||||
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
|
||||
|
||||
|
@ -243,27 +244,29 @@ def resolve_dns(opts, fallback=True):
|
|||
|
||||
|
||||
def prep_ip_port(opts):
|
||||
'''
|
||||
parse host:port values from opts['master'] and return valid:
|
||||
master: ip address or hostname as a string
|
||||
master_port: (optional) master returner port as integer
|
||||
|
||||
e.g.:
|
||||
- master: 'localhost:1234' -> {'master': 'localhost', 'master_port': 1234}
|
||||
- master: '127.0.0.1:1234' -> {'master': '127.0.0.1', 'master_port' :1234}
|
||||
- master: '[::1]:1234' -> {'master': '::1', 'master_port': 1234}
|
||||
- master: 'fe80::a00:27ff:fedc:ba98' -> {'master': 'fe80::a00:27ff:fedc:ba98'}
|
||||
'''
|
||||
ret = {}
|
||||
# Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without
|
||||
# a port specified. The is_ipv6 check returns False if brackets are used in the IP
|
||||
# definition such as master: '[::1]:1234'.
|
||||
if opts['master_uri_format'] == 'ip_only' or salt.utils.network.is_ipv6(opts['master']):
|
||||
ret['master'] = opts['master']
|
||||
if opts['master_uri_format'] == 'ip_only':
|
||||
ret['master'] = ipaddress.ip_address(opts['master'])
|
||||
else:
|
||||
ip_port = opts['master'].rsplit(':', 1)
|
||||
if len(ip_port) == 1:
|
||||
# e.g. master: mysaltmaster
|
||||
ret['master'] = ip_port[0]
|
||||
else:
|
||||
# e.g. master: localhost:1234
|
||||
# e.g. master: 127.0.0.1:1234
|
||||
# e.g. master: [::1]:1234
|
||||
# Strip off brackets for ipv6 support
|
||||
ret['master'] = ip_port[0].strip('[]')
|
||||
host, port = parse_host_port(opts['master'])
|
||||
ret = {'master': host}
|
||||
if port:
|
||||
ret.update({'master_port': port})
|
||||
|
||||
# Cast port back to an int! Otherwise a TypeError is thrown
|
||||
# on some of the socket calls elsewhere in the minion and utils code.
|
||||
ret['master_port'] = int(ip_port[1])
|
||||
return ret
|
||||
|
||||
|
||||
|
|
|
@ -19,8 +19,7 @@ The firewall configuration is generated by Capirca_.
|
|||
|
||||
.. _Capirca: https://github.com/google/capirca
|
||||
|
||||
Capirca is not yet available on PyPI threrefore it has to be installed
|
||||
directly form Git: ``pip install -e git+git@github.com:google/capirca.git#egg=aclgen``.
|
||||
To install Capirca, execute: ``pip install capirca``.
|
||||
'''
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
|
||||
|
@ -34,7 +33,10 @@ log = logging.getLogger(__file__)
|
|||
# Import third party libs
|
||||
from salt.ext import six
|
||||
try:
|
||||
import aclgen
|
||||
import capirca
|
||||
import capirca.aclgen
|
||||
import capirca.lib.policy
|
||||
import capirca.lib.aclgenerator
|
||||
HAS_CAPIRCA = True
|
||||
except ImportError:
|
||||
HAS_CAPIRCA = False
|
||||
|
@ -69,10 +71,12 @@ def __virtual__():
|
|||
# module globals
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
|
||||
# define the default values for all possible term fields
|
||||
# we could also extract them from the `policy` module, inspecting the `Policy`
|
||||
# class, but that might be overkill & it would make the code less obvious.
|
||||
# we can revisit this later if necessary.
|
||||
|
||||
_TERM_FIELDS = {
|
||||
'action': [],
|
||||
'address': [],
|
||||
|
@ -161,7 +165,19 @@ _SERVICES = {}
|
|||
|
||||
|
||||
if HAS_CAPIRCA:
|
||||
class _Policy(aclgen.policy.Policy):
|
||||
_TempTerm = capirca.lib.policy.Term
|
||||
|
||||
def _add_object(self, obj):
|
||||
return
|
||||
|
||||
setattr(_TempTerm, 'AddObject', _add_object)
|
||||
dumy_term = _TempTerm(None)
|
||||
for item in dir(dumy_term):
|
||||
if hasattr(item, '__func__') or item.startswith('_') or item != item.lower():
|
||||
continue
|
||||
_TERM_FIELDS[item] = getattr(dumy_term, item)
|
||||
|
||||
class _Policy(capirca.lib.policy.Policy):
|
||||
'''
|
||||
Extending the Capirca Policy class to allow inserting custom filters.
|
||||
'''
|
||||
|
@ -169,7 +185,7 @@ if HAS_CAPIRCA:
|
|||
self.filters = []
|
||||
self.filename = ''
|
||||
|
||||
class _Term(aclgen.policy.Term):
|
||||
class _Term(capirca.lib.policy.Term):
|
||||
'''
|
||||
Extending the Capirca Term class to allow setting field valued on the fly.
|
||||
'''
|
||||
|
@ -186,10 +202,10 @@ def _import_platform_generator(platform):
|
|||
for a class inheriting the `ACLGenerator` class.
|
||||
'''
|
||||
log.debug('Using platform: {plat}'.format(plat=platform))
|
||||
for mod_name, mod_obj in inspect.getmembers(aclgen):
|
||||
for mod_name, mod_obj in inspect.getmembers(capirca.aclgen):
|
||||
if mod_name == platform and inspect.ismodule(mod_obj):
|
||||
for plat_obj_name, plat_obj in inspect.getmembers(mod_obj): # pylint: disable=unused-variable
|
||||
if inspect.isclass(plat_obj) and issubclass(plat_obj, aclgen.aclgenerator.ACLGenerator):
|
||||
if inspect.isclass(plat_obj) and issubclass(plat_obj, capirca.lib.aclgenerator.ACLGenerator):
|
||||
log.debug('Identified Capirca class {cls} for {plat}'.format(
|
||||
cls=plat_obj,
|
||||
plat=platform))
|
||||
|
@ -366,7 +382,11 @@ def _clean_term_opts(term_opts):
|
|||
# IP-type fields need to be transformed
|
||||
ip_values = []
|
||||
for addr in value:
|
||||
ip_values.append(aclgen.policy.nacaddr.IP(addr))
|
||||
if six.PY2:
|
||||
addr = six.text_type(addr)
|
||||
# Adding this, as ipaddress would complain about valid
|
||||
# addresses not being valid. #pythonIsFun
|
||||
ip_values.append(capirca.lib.policy.nacaddr.IP(addr))
|
||||
value = ip_values[:]
|
||||
clean_opts[field] = value
|
||||
return clean_opts
|
||||
|
@ -427,7 +447,7 @@ def _merge_list_of_dict(first, second, prepend=True):
|
|||
if first and not second:
|
||||
return first
|
||||
# Determine overlaps
|
||||
# So we don't change the position of the existing terms/filters
|
||||
# So we dont change the position of the existing terms/filters
|
||||
overlaps = []
|
||||
merged = []
|
||||
appended = []
|
||||
|
@ -514,7 +534,7 @@ def _get_policy_object(platform,
|
|||
continue # go to the next filter
|
||||
filter_name = filter_.keys()[0]
|
||||
filter_config = filter_.values()[0]
|
||||
header = aclgen.policy.Header() # same header everywhere
|
||||
header = capirca.lib.policy.Header() # same header everywhere
|
||||
target_opts = [
|
||||
platform,
|
||||
filter_name
|
||||
|
@ -524,7 +544,7 @@ def _get_policy_object(platform,
|
|||
filter_options = _make_it_list({}, filter_name, filter_options)
|
||||
# make sure the filter options are sent as list
|
||||
target_opts.extend(filter_options)
|
||||
target = aclgen.policy.Target(target_opts)
|
||||
target = capirca.lib.policy.Target(target_opts)
|
||||
header.AddObject(target)
|
||||
filter_terms = []
|
||||
for term_ in filter_config.get('terms', []):
|
||||
|
|
|
@ -1913,9 +1913,11 @@ def get_network_settings():
|
|||
|
||||
hostname = _parse_hostname()
|
||||
domainname = _parse_domainname()
|
||||
searchdomain = _parse_searchdomain()
|
||||
|
||||
settings['hostname'] = hostname
|
||||
settings['domainname'] = domainname
|
||||
settings['searchdomain'] = searchdomain
|
||||
|
||||
else:
|
||||
settings = _parse_current_network_settings()
|
||||
|
|
|
@ -226,6 +226,7 @@ def _resolve_user_group_names(opts):
|
|||
if _info and _param in _info:
|
||||
_id = _info[_param]
|
||||
opts[ind] = _param + '=' + six.text_type(_id)
|
||||
opts[ind] = opts[ind].replace('\\040', '\\ ')
|
||||
return opts
|
||||
|
||||
|
||||
|
@ -727,7 +728,7 @@ def set_fstab(
|
|||
'name': name,
|
||||
'device': device.replace('\\ ', '\\040'),
|
||||
'fstype': fstype,
|
||||
'opts': opts,
|
||||
'opts': opts.replace('\\ ', '\\040'),
|
||||
'dump': dump,
|
||||
'pass_num': pass_num,
|
||||
}
|
||||
|
|
|
@ -19,6 +19,8 @@ The firewall configuration is generated by Capirca_.
|
|||
|
||||
.. _Capirca: https://github.com/google/capirca
|
||||
|
||||
To install Capirca, execute: ``pip install capirca``.
|
||||
|
||||
To be able to load configuration on network devices,
|
||||
it requires NAPALM_ library to be installed: ``pip install napalm``.
|
||||
Please check Installation_ for complete details.
|
||||
|
@ -34,7 +36,10 @@ log = logging.getLogger(__file__)
|
|||
# Import third party libs
|
||||
try:
|
||||
# pylint: disable=W0611
|
||||
import aclgen
|
||||
import capirca
|
||||
import capirca.aclgen
|
||||
import capirca.lib.policy
|
||||
import capirca.lib.aclgenerator
|
||||
HAS_CAPIRCA = True
|
||||
# pylint: enable=W0611
|
||||
except ImportError:
|
||||
|
|
|
@ -29,7 +29,7 @@ import requests
|
|||
import salt.exceptions
|
||||
import salt.utils.json
|
||||
|
||||
API_ENDPOINT = "https://api.opsgenie.com/v1/json/saltstack?apiKey="
|
||||
API_ENDPOINT = "https://api.opsgenie.com/v2/alerts"
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -68,14 +68,14 @@ def post_data(api_key=None, name='OpsGenie Execution Module', reason=None,
|
|||
functionality you must provide name field for both states like in
|
||||
this case.
|
||||
'''
|
||||
if api_key is None or reason is None or action_type is None:
|
||||
if api_key is None or reason is None:
|
||||
raise salt.exceptions.SaltInvocationError(
|
||||
'API Key or Reason or Action Type cannot be None.')
|
||||
'API Key or Reason cannot be None.')
|
||||
|
||||
data = dict()
|
||||
data['name'] = name
|
||||
data['reason'] = reason
|
||||
data['actionType'] = action_type
|
||||
data['alias'] = name
|
||||
data['message'] = reason
|
||||
# data['actions'] = action_type
|
||||
data['cpuModel'] = __grains__['cpu_model']
|
||||
data['cpuArch'] = __grains__['cpuarch']
|
||||
data['fqdn'] = __grains__['fqdn']
|
||||
|
@ -93,8 +93,17 @@ def post_data(api_key=None, name='OpsGenie Execution Module', reason=None,
|
|||
log.debug('Below data will be posted:\n%s', data)
|
||||
log.debug('API Key: %s \t API Endpoint: %s', api_key, API_ENDPOINT)
|
||||
|
||||
response = requests.post(
|
||||
url=API_ENDPOINT + api_key,
|
||||
data=salt.utils.json.dumps(data),
|
||||
headers={'Content-Type': 'application/json'})
|
||||
if action_type == "Create":
|
||||
response = requests.post(
|
||||
url=API_ENDPOINT,
|
||||
data=salt.utils.json.dumps(data),
|
||||
headers={'Content-Type': 'application/json',
|
||||
'Authorization': 'GenieKey ' + api_key})
|
||||
else:
|
||||
response = requests.post(
|
||||
url=API_ENDPOINT + "/" + name + "/close?identifierType=alias",
|
||||
data=salt.utils.json.dumps(data),
|
||||
headers={'Content-Type': 'application/json',
|
||||
'Authorization': 'GenieKey ' + api_key})
|
||||
|
||||
return response.status_code, response.text
|
||||
|
|
|
@ -412,7 +412,7 @@ def create_snapshot(config='root', snapshot_type='single', pre_number=None,
|
|||
cleanup_algorithm, userdata)
|
||||
else:
|
||||
raise CommandExecutionError(
|
||||
"Invalid snapshot type '{0}'", format(snapshot_type))
|
||||
"Invalid snapshot type '{0}'".format(snapshot_type))
|
||||
except dbus.DBusException as exc:
|
||||
raise CommandExecutionError(
|
||||
'Error encountered while listing changed files: {0}'
|
||||
|
|
|
@ -2503,6 +2503,7 @@ def managed(name,
|
|||
ret, 'Defaults must be formed as a dict')
|
||||
|
||||
if not replace and os.path.exists(name):
|
||||
ret_perms = {}
|
||||
# Check and set the permissions if necessary
|
||||
if salt.utils.platform.is_windows():
|
||||
ret = __salt__['file.check_perms'](
|
||||
|
@ -2514,10 +2515,19 @@ def managed(name,
|
|||
inheritance=win_inheritance,
|
||||
reset=win_perms_reset)
|
||||
else:
|
||||
ret, _ = __salt__['file.check_perms'](
|
||||
ret, ret_perms = __salt__['file.check_perms'](
|
||||
name, ret, user, group, mode, attrs, follow_symlinks)
|
||||
if __opts__['test']:
|
||||
ret['comment'] = 'File {0} not updated'.format(name)
|
||||
if isinstance(ret_perms, dict) and \
|
||||
'lmode' in ret_perms and \
|
||||
mode != ret_perms['lmode']:
|
||||
ret['comment'] = ('File {0} will be updated with permissions '
|
||||
'{1} from its current '
|
||||
'state of {2}'.format(name,
|
||||
mode,
|
||||
ret_perms['lmode']))
|
||||
else:
|
||||
ret['comment'] = 'File {0} not updated'.format(name)
|
||||
elif not ret['changes'] and ret['result']:
|
||||
ret['comment'] = ('File {0} exists with proper permissions. '
|
||||
'No changes made.'.format(name))
|
||||
|
|
|
@ -749,6 +749,12 @@ def latest(name,
|
|||
ret,
|
||||
'Failed to check remote refs: {0}'.format(_strip_exc(exc))
|
||||
)
|
||||
except NameError as exc:
|
||||
if 'global name' in exc.message:
|
||||
raise CommandExecutionError(
|
||||
'Failed to check remote refs: You may need to install '
|
||||
'GitPython or PyGit2')
|
||||
raise
|
||||
|
||||
if 'HEAD' in all_remote_refs:
|
||||
head_rev = all_remote_refs['HEAD']
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
Network ACL
|
||||
===========
|
||||
|
||||
Manage the firewall configuration on the network device namaged through NAPALM.
|
||||
Manage the firewall configuration on the network device managed through NAPALM.
|
||||
The firewall configuration is generated by Capirca_.
|
||||
|
||||
.. _Capirca: https://github.com/google/capirca
|
||||
|
@ -18,7 +18,13 @@ The firewall configuration is generated by Capirca_.
|
|||
Dependencies
|
||||
------------
|
||||
|
||||
Capirca: ``pip install -e git+git@github.com:google/capirca.git#egg=aclgen``
|
||||
Capirca
|
||||
~~~~~~~
|
||||
|
||||
To install Capirca, execute: ``pip install capirca``.
|
||||
|
||||
NAPALM
|
||||
~~~~~~
|
||||
|
||||
To be able to load configuration on network devices,
|
||||
it requires NAPALM_ library to be installed: ``pip install napalm``.
|
||||
|
@ -35,7 +41,10 @@ log = logging.getLogger(__file__)
|
|||
# Import third party libs
|
||||
try:
|
||||
# pylint: disable=W0611
|
||||
import aclgen
|
||||
import capirca
|
||||
import capirca.aclgen
|
||||
import capirca.lib.policy
|
||||
import capirca.lib.aclgenerator
|
||||
HAS_CAPIRCA = True
|
||||
# pylint: enable=W0611
|
||||
except ImportError:
|
||||
|
|
|
@ -86,9 +86,7 @@ def create_alert(name=None, api_key=None, reason=None, action_type="Create"):
|
|||
if __opts__['test'] is True:
|
||||
ret[
|
||||
'comment'] = 'Test: {0} alert request will be processed ' \
|
||||
'using the API Key="{1}".'.format(
|
||||
action_type,
|
||||
api_key)
|
||||
'using the API Key="{1}".'.format(action_type, api_key)
|
||||
|
||||
# Return ``None`` when running with ``test=true``.
|
||||
ret['result'] = None
|
||||
|
|
|
@ -284,6 +284,8 @@ def state(name,
|
|||
|
||||
cmd_kw['tgt_type'] = tgt_type
|
||||
cmd_kw['ssh'] = ssh
|
||||
if 'roster' in kwargs:
|
||||
cmd_kw['roster'] = kwargs['roster']
|
||||
cmd_kw['expect_minions'] = expect_minions
|
||||
if highstate:
|
||||
fun = 'state.highstate'
|
||||
|
|
|
@ -33,6 +33,7 @@ import salt.transport.server
|
|||
import salt.transport.mixins.auth
|
||||
from salt.ext import six
|
||||
from salt.exceptions import SaltReqTimeoutError
|
||||
from salt._compat import ipaddress
|
||||
|
||||
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO, LIBZMQ_VERSION_INFO
|
||||
import zmq.error
|
||||
|
@ -71,33 +72,38 @@ def _get_master_uri(master_ip,
|
|||
'''
|
||||
Return the ZeroMQ URI to connect the Minion to the Master.
|
||||
It supports different source IP / port, given the ZeroMQ syntax:
|
||||
|
||||
// Connecting using a IP address and bind to an IP address
|
||||
rc = zmq_connect(socket, "tcp://192.168.1.17:5555;192.168.1.1:5555"); assert (rc == 0);
|
||||
|
||||
Source: http://api.zeromq.org/4-1:zmq-tcp
|
||||
'''
|
||||
if LIBZMQ_VERSION_INFO >= (4, 1, 6) and ZMQ_VERSION_INFO >= (16, 0, 1):
|
||||
# The source:port syntax for ZeroMQ has been added in libzmq 4.1.6
|
||||
# which is included in the pyzmq wheels starting with 16.0.1.
|
||||
if source_ip or source_port:
|
||||
if source_ip and source_port:
|
||||
return 'tcp://{source_ip}:{source_port};{master_ip}:{master_port}'.format(
|
||||
source_ip=source_ip, source_port=source_port,
|
||||
master_ip=master_ip, master_port=master_port)
|
||||
elif source_ip and not source_port:
|
||||
return 'tcp://{source_ip}:0;{master_ip}:{master_port}'.format(
|
||||
source_ip=source_ip,
|
||||
master_ip=master_ip, master_port=master_port)
|
||||
elif not source_ip and source_port:
|
||||
return 'tcp://0.0.0.0:{source_port};{master_ip}:{master_port}'.format(
|
||||
source_port=source_port,
|
||||
master_ip=master_ip, master_port=master_port)
|
||||
from salt.utils.zeromq import ip_bracket
|
||||
|
||||
master_uri = 'tcp://{master_ip}:{master_port}'.format(
|
||||
master_ip=ip_bracket(master_ip), master_port=master_port)
|
||||
|
||||
if source_ip or source_port:
|
||||
log.warning('Unable to connect to the Master using a specific source IP / port')
|
||||
log.warning('Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6')
|
||||
return 'tcp://{master_ip}:{master_port}'.format(
|
||||
master_ip=master_ip, master_port=master_port)
|
||||
if LIBZMQ_VERSION_INFO >= (4, 1, 6) and ZMQ_VERSION_INFO >= (16, 0, 1):
|
||||
# The source:port syntax for ZeroMQ has been added in libzmq 4.1.6
|
||||
# which is included in the pyzmq wheels starting with 16.0.1.
|
||||
if source_ip and source_port:
|
||||
master_uri = 'tcp://{source_ip}:{source_port};{master_ip}:{master_port}'.format(
|
||||
source_ip=ip_bracket(source_ip), source_port=source_port,
|
||||
master_ip=ip_bracket(master_ip), master_port=master_port)
|
||||
elif source_ip and not source_port:
|
||||
master_uri = 'tcp://{source_ip}:0;{master_ip}:{master_port}'.format(
|
||||
source_ip=ip_bracket(source_ip),
|
||||
master_ip=ip_bracket(master_ip), master_port=master_port)
|
||||
elif source_port and not source_ip:
|
||||
ip_any = '0.0.0.0' if ipaddress.ip_address(master_ip).version == 4 else ip_bracket('::')
|
||||
master_uri = 'tcp://{ip_any}:{source_port};{master_ip}:{master_port}'.format(
|
||||
ip_any=ip_any, source_port=source_port,
|
||||
master_ip=ip_bracket(master_ip), master_port=master_port)
|
||||
else:
|
||||
log.warning('Unable to connect to the Master using a specific source IP / port')
|
||||
log.warning('Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6')
|
||||
log.warning('Specific source IP / port for connecting to master returner port: configuraion ignored')
|
||||
|
||||
return master_uri
|
||||
|
||||
|
||||
class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
|
||||
|
|
|
@ -8,7 +8,6 @@ Utilities for accessing storage container blobs on Azure
|
|||
# Import python libs
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import logging
|
||||
import inspect
|
||||
|
||||
# Import azure libs
|
||||
HAS_LIBS = False
|
||||
|
@ -19,7 +18,6 @@ except ImportError:
|
|||
pass
|
||||
|
||||
# Import salt libs
|
||||
from salt.ext import six
|
||||
from salt.exceptions import SaltSystemExit
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -178,25 +176,13 @@ def object_to_dict(obj):
|
|||
if isinstance(obj, list) or isinstance(obj, tuple):
|
||||
ret = []
|
||||
for item in obj:
|
||||
#ret.append(obj.__dict__[item])
|
||||
ret.append(object_to_dict(obj))
|
||||
elif isinstance(obj, six.text_type):
|
||||
ret = obj.encode('ascii', 'replace'),
|
||||
elif isinstance(obj, six.string_types):
|
||||
ret = obj
|
||||
else:
|
||||
ret.append(object_to_dict(item))
|
||||
elif hasattr(obj, '__dict__'):
|
||||
ret = {}
|
||||
for item in obj.__dict__:
|
||||
if item.startswith('_'):
|
||||
continue
|
||||
# This is ugly, but inspect.isclass() doesn't seem to work
|
||||
try:
|
||||
if inspect.isclass(obj) or 'class' in six.text_type(type(obj.__dict__.get(item))):
|
||||
ret[item] = object_to_dict(obj.__dict__[item])
|
||||
elif isinstance(obj.__dict__[item], six.text_type):
|
||||
ret[item] = obj.__dict__[item].encode('ascii', 'replace')
|
||||
else:
|
||||
ret[item] = obj.__dict__[item]
|
||||
except AttributeError:
|
||||
ret[item] = obj.get(item)
|
||||
ret[item] = object_to_dict(obj.__dict__[item])
|
||||
else:
|
||||
ret = obj
|
||||
return ret
|
||||
|
|
|
@ -58,10 +58,10 @@ except (ImportError, OSError, AttributeError, TypeError):
|
|||
def sanitize_host(host):
|
||||
'''
|
||||
Sanitize host string.
|
||||
https://tools.ietf.org/html/rfc1123#section-2.1
|
||||
'''
|
||||
return ''.join([
|
||||
c for c in host[0:255] if c in (ascii_letters + digits + '.-')
|
||||
])
|
||||
RFC952_characters = ascii_letters + digits + ".-"
|
||||
return "".join([c for c in host[0:255] if c in RFC952_characters])
|
||||
|
||||
|
||||
def isportopen(host, port):
|
||||
|
@ -137,23 +137,11 @@ def _generate_minion_id():
|
|||
def first(self):
|
||||
return self and self[0] or None
|
||||
|
||||
hosts = DistinctList([])
|
||||
|
||||
try:
|
||||
hosts.append(socket.getfqdn())
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
hosts.append(platform.node())
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
hosts.append(socket.gethostname())
|
||||
except ValueError:
|
||||
pass
|
||||
hostname = socket.gethostname()
|
||||
|
||||
hosts = DistinctList().append(
|
||||
salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname)))
|
||||
).append(platform.node()).append(hostname)
|
||||
if not hosts:
|
||||
try:
|
||||
for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET,
|
||||
|
@ -1886,14 +1874,14 @@ def dns_check(addr, port, safe=False, ipv6=None):
|
|||
if h[0] == socket.AF_INET6 and ipv6 is False:
|
||||
continue
|
||||
|
||||
candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0])
|
||||
candidate_addr = h[4][0]
|
||||
|
||||
if h[0] != socket.AF_INET6 or ipv6 is not None:
|
||||
candidates.append(candidate_addr)
|
||||
|
||||
try:
|
||||
s = socket.socket(h[0], socket.SOCK_STREAM)
|
||||
s.connect((candidate_addr.strip('[]'), port))
|
||||
s.connect((candidate_addr, port))
|
||||
s.close()
|
||||
|
||||
resolved = candidate_addr
|
||||
|
@ -1922,3 +1910,55 @@ def dns_check(addr, port, safe=False, ipv6=None):
|
|||
raise SaltClientError()
|
||||
raise SaltSystemExit(code=42, msg=err)
|
||||
return resolved
|
||||
|
||||
|
||||
def parse_host_port(host_port):
|
||||
"""
|
||||
Takes a string argument specifying host or host:port.
|
||||
|
||||
Returns a (hostname, port) or (ip_address, port) tuple. If no port is given,
|
||||
the second (port) element of the returned tuple will be None.
|
||||
|
||||
host:port argument, for example, is accepted in the forms of:
|
||||
- hostname
|
||||
- hostname:1234
|
||||
- hostname.domain.tld
|
||||
- hostname.domain.tld:5678
|
||||
- [1234::5]:5678
|
||||
- 1234::5
|
||||
- 10.11.12.13:4567
|
||||
- 10.11.12.13
|
||||
"""
|
||||
host, port = None, None # default
|
||||
|
||||
_s_ = host_port[:]
|
||||
if _s_[0] == "[":
|
||||
if "]" in host_port:
|
||||
host, _s_ = _s_.lstrip("[").rsplit("]", 1)
|
||||
host = ipaddress.IPv6Address(host)
|
||||
if _s_[0] == ":":
|
||||
port = int(_s_.lstrip(":"))
|
||||
else:
|
||||
if len(_s_) > 1:
|
||||
raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port))
|
||||
else:
|
||||
if _s_.count(":") == 1:
|
||||
host, _hostport_separator_, port = _s_.partition(":")
|
||||
try:
|
||||
port = int(port)
|
||||
except ValueError as _e_:
|
||||
log.error('host_port "%s" port value "%s" is not an integer.', host_port, port)
|
||||
raise _e_
|
||||
else:
|
||||
host = _s_
|
||||
try:
|
||||
if not isinstance(host, ipaddress._BaseAddress):
|
||||
host_ip = ipaddress.ip_address(host)
|
||||
host = host_ip
|
||||
except ValueError:
|
||||
log.debug('"%s" Not an IP address? Assuming it is a hostname.', host)
|
||||
if host != sanitize_host(host):
|
||||
log.error('bad hostname: "%s"', host)
|
||||
raise ValueError('bad hostname: "{}"'.format(host))
|
||||
|
||||
return host, port
|
||||
|
|
|
@ -66,6 +66,7 @@ import re
|
|||
import tempfile
|
||||
|
||||
# Import Salt libs
|
||||
import salt.modules.cmdmod
|
||||
import salt.utils.files
|
||||
import salt.utils.platform
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
@ -117,8 +118,8 @@ def _auditpol_cmd(cmd):
|
|||
Raises:
|
||||
CommandExecutionError: If the command encounters an error
|
||||
'''
|
||||
ret = __salt__['cmd.run_all'](cmd='auditpol {0}'.format(cmd),
|
||||
python_shell=True)
|
||||
ret = salt.modules.cmdmod.run_all(cmd='auditpol {0}'.format(cmd),
|
||||
python_shell=True)
|
||||
if ret['retcode'] == 0:
|
||||
return ret['stdout'].splitlines()
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ from __future__ import absolute_import, print_function, unicode_literals
|
|||
import logging
|
||||
import tornado.ioloop
|
||||
from salt.exceptions import SaltSystemExit
|
||||
from salt._compat import ipaddress
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -82,6 +83,5 @@ def ip_bracket(addr):
|
|||
Convert IP address representation to ZMQ (URL) format. ZMQ expects
|
||||
brackets around IPv6 literals, since they are used in URLs.
|
||||
'''
|
||||
if addr and ':' in addr and not addr.startswith('['):
|
||||
return '[{0}]'.format(addr)
|
||||
return addr
|
||||
addr = ipaddress.ip_address(addr)
|
||||
return ('[{}]' if addr.version == 6 else '{}').format(addr)
|
||||
|
|
5
setup.py
5
setup.py
|
@ -874,7 +874,10 @@ class SaltDistribution(distutils.dist.Distribution):
|
|||
self.name = 'salt-ssh' if PACKAGED_FOR_SALT_SSH else 'salt'
|
||||
self.salt_version = __version__ # pylint: disable=undefined-variable
|
||||
self.description = 'Portable, distributed, remote execution and configuration management system'
|
||||
with open(SALT_LONG_DESCRIPTION_FILE) as f:
|
||||
kwargs = {}
|
||||
if IS_PY3:
|
||||
kwargs['encoding'] = 'utf-8'
|
||||
with open(SALT_LONG_DESCRIPTION_FILE, **kwargs) as f:
|
||||
self.long_description = f.read()
|
||||
self.long_description_content_type = 'text/x-rst'
|
||||
self.author = 'Thomas S Hatch'
|
||||
|
|
|
@ -6,45 +6,51 @@ Tests for minion blackout
|
|||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
from time import sleep
|
||||
import time
|
||||
import textwrap
|
||||
|
||||
# Import Salt Testing libs
|
||||
from tests.support.case import ModuleCase
|
||||
from tests.support.paths import PILLAR_DIR
|
||||
from tests.support.helpers import destructiveTest, flaky
|
||||
from tests.support.helpers import flaky
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.files
|
||||
|
||||
|
||||
BLACKOUT_PILLAR = os.path.join(PILLAR_DIR, 'base', 'blackout.sls')
|
||||
|
||||
|
||||
@destructiveTest
|
||||
class MinionBlackoutTestCase(ModuleCase):
|
||||
'''
|
||||
Test minion blackout functionality
|
||||
'''
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.blackout_pillar = os.path.join(PILLAR_DIR, 'base', 'blackout.sls')
|
||||
|
||||
def tearDown(self):
|
||||
self.end_blackout(sleep=False)
|
||||
# Be sure to also refresh the sub_minion pillar
|
||||
self.run_function('saltutil.refresh_pillar', minion_tgt='sub_minion')
|
||||
time.sleep(10) # wait for minion to exit blackout mode
|
||||
|
||||
def begin_blackout(self, blackout_data='minion_blackout: True'):
|
||||
'''
|
||||
setup minion blackout mode
|
||||
'''
|
||||
with salt.utils.files.fopen(BLACKOUT_PILLAR, 'w') as wfh:
|
||||
with salt.utils.files.fopen(self.blackout_pillar, 'w') as wfh:
|
||||
wfh.write(blackout_data)
|
||||
self.run_function('saltutil.refresh_pillar')
|
||||
sleep(10) # wait for minion to enter blackout mode
|
||||
time.sleep(10) # wait for minion to enter blackout mode
|
||||
|
||||
def end_blackout(self):
|
||||
def end_blackout(self, sleep=True):
|
||||
'''
|
||||
takedown minion blackout mode
|
||||
'''
|
||||
with salt.utils.files.fopen(BLACKOUT_PILLAR, 'w') as blackout_pillar:
|
||||
blackout_pillar.write(textwrap.dedent('''\
|
||||
minion_blackout: False
|
||||
'''))
|
||||
with salt.utils.files.fopen(self.blackout_pillar, 'w') as wfh:
|
||||
wfh.write('minion_blackout: False\n')
|
||||
self.run_function('saltutil.refresh_pillar')
|
||||
sleep(10) # wait for minion to exit blackout mode
|
||||
if sleep:
|
||||
time.sleep(10) # wait for minion to exit blackout mode
|
||||
|
||||
@flaky
|
||||
def test_blackout(self):
|
||||
|
@ -66,22 +72,19 @@ class MinionBlackoutTestCase(ModuleCase):
|
|||
'''
|
||||
Test that minion blackout whitelist works
|
||||
'''
|
||||
try:
|
||||
self.begin_blackout(textwrap.dedent('''\
|
||||
minion_blackout: True
|
||||
minion_blackout_whitelist:
|
||||
- test.ping
|
||||
- test.fib
|
||||
'''))
|
||||
self.begin_blackout(textwrap.dedent('''\
|
||||
minion_blackout: True
|
||||
minion_blackout_whitelist:
|
||||
- test.ping
|
||||
- test.fib
|
||||
'''))
|
||||
|
||||
ping_ret = self.run_function('test.ping')
|
||||
self.assertEqual(ping_ret, True)
|
||||
ping_ret = self.run_function('test.ping')
|
||||
self.assertEqual(ping_ret, True)
|
||||
|
||||
fib_ret = self.run_function('test.fib', [7])
|
||||
self.assertTrue(isinstance(fib_ret, list))
|
||||
self.assertEqual(fib_ret[0], 13)
|
||||
finally:
|
||||
self.end_blackout()
|
||||
fib_ret = self.run_function('test.fib', [7])
|
||||
self.assertTrue(isinstance(fib_ret, list))
|
||||
self.assertEqual(fib_ret[0], 13)
|
||||
|
||||
@flaky
|
||||
def test_blackout_nonwhitelist(self):
|
||||
|
@ -89,18 +92,15 @@ class MinionBlackoutTestCase(ModuleCase):
|
|||
Test that minion refuses to run non-whitelisted functions during
|
||||
blackout whitelist
|
||||
'''
|
||||
try:
|
||||
self.begin_blackout(textwrap.dedent('''\
|
||||
minion_blackout: True
|
||||
minion_blackout_whitelist:
|
||||
- test.ping
|
||||
- test.fib
|
||||
'''))
|
||||
self.begin_blackout(textwrap.dedent('''\
|
||||
minion_blackout: True
|
||||
minion_blackout_whitelist:
|
||||
- test.ping
|
||||
- test.fib
|
||||
'''))
|
||||
|
||||
state_ret = self.run_function('state.apply')
|
||||
self.assertIn('Minion in blackout mode.', state_ret)
|
||||
state_ret = self.run_function('state.apply')
|
||||
self.assertIn('Minion in blackout mode.', state_ret)
|
||||
|
||||
cloud_ret = self.run_function('cloud.query', ['list_nodes_full'])
|
||||
self.assertIn('Minion in blackout mode.', cloud_ret)
|
||||
finally:
|
||||
self.end_blackout()
|
||||
cloud_ret = self.run_function('cloud.query', ['list_nodes_full'])
|
||||
self.assertIn('Minion in blackout mode.', cloud_ret)
|
||||
|
|
|
@ -67,7 +67,7 @@ class LDAPAuthTestCase(TestCase):
|
|||
'''
|
||||
self.opts['auth.ldap.freeipa'] = True
|
||||
with patch.dict(salt.auth.ldap.__opts__, self.opts):
|
||||
with patch('salt.auth.ldap.auth', return_value=Bind):
|
||||
with patch('salt.auth.ldap._bind', return_value=Bind):
|
||||
self.assertIn('saltusers', salt.auth.ldap.groups('saltuser', password='password'))
|
||||
|
||||
def test_groups(self):
|
||||
|
@ -75,7 +75,7 @@ class LDAPAuthTestCase(TestCase):
|
|||
test groups in ldap
|
||||
'''
|
||||
with patch.dict(salt.auth.ldap.__opts__, self.opts):
|
||||
with patch('salt.auth.ldap.auth', return_value=Bind):
|
||||
with patch('salt.auth.ldap._bind', return_value=Bind):
|
||||
self.assertIn('saltusers', salt.auth.ldap.groups('saltuser', password='password'))
|
||||
|
||||
def test_groups_activedirectory(self):
|
||||
|
@ -84,7 +84,7 @@ class LDAPAuthTestCase(TestCase):
|
|||
'''
|
||||
self.opts['auth.ldap.activedirectory'] = True
|
||||
with patch.dict(salt.auth.ldap.__opts__, self.opts):
|
||||
with patch('salt.auth.ldap.auth', return_value=Bind):
|
||||
with patch('salt.auth.ldap._bind', return_value=Bind):
|
||||
self.assertIn('saltusers', salt.auth.ldap.groups('saltuser', password='password'))
|
||||
|
||||
def test_auth_nopass(self):
|
||||
|
|
|
@ -153,6 +153,17 @@ class RootsTest(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModuleMockMix
|
|||
ret = roots.file_list_emptydirs({'saltenv': 'base'})
|
||||
self.assertIn('empty_dir', ret)
|
||||
|
||||
def test_file_list_with_slash(self):
|
||||
opts = {'file_roots': copy.copy(self.opts['file_roots'])}
|
||||
opts['file_roots']['foo/bar'] = opts['file_roots']['base']
|
||||
load = {
|
||||
'saltenv': 'foo/bar',
|
||||
}
|
||||
with patch.dict(roots.__opts__, opts):
|
||||
ret = roots.file_list(load)
|
||||
self.assertIn('testfile', ret)
|
||||
self.assertIn(UNICODE_FILENAME, ret)
|
||||
|
||||
def test_dir_list(self):
|
||||
ret = roots.dir_list({'saltenv': 'base'})
|
||||
self.assertIn('empty_dir', ret)
|
||||
|
|
|
@ -19,12 +19,14 @@ from tests.support.mock import (
|
|||
|
||||
# Import Salt Libs
|
||||
import salt.modules.debian_ip as debian_ip
|
||||
import salt.utils.platform
|
||||
|
||||
# Import third party libs
|
||||
import jinja2.exceptions
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@skipIf(salt.utils.platform.is_windows(), 'Do not run these tests on Windows')
|
||||
class DebianIpTestCase(TestCase, LoaderModuleMockMixin):
|
||||
'''
|
||||
Test cases for salt.modules.debian_ip
|
||||
|
@ -474,14 +476,17 @@ class DebianIpTestCase(TestCase, LoaderModuleMockMixin):
|
|||
patch('salt.modules.debian_ip._parse_hostname',
|
||||
MagicMock(return_value='SaltStack')), \
|
||||
patch('salt.modules.debian_ip._parse_domainname',
|
||||
MagicMock(return_value='saltstack.com')):
|
||||
MagicMock(return_value='saltstack.com')), \
|
||||
patch('salt.modules.debian_ip._parse_searchdomain',
|
||||
MagicMock(return_value='test.saltstack.com')):
|
||||
mock_avai = MagicMock(return_value=True)
|
||||
with patch.dict(debian_ip.__salt__, {'service.available': mock_avai,
|
||||
'service.status': mock_avai}):
|
||||
self.assertEqual(debian_ip.get_network_settings(),
|
||||
['NETWORKING=yes\n',
|
||||
'HOSTNAME=SaltStack\n',
|
||||
'DOMAIN=saltstack.com\n'])
|
||||
[u'NETWORKING=yes\n',
|
||||
u'HOSTNAME=SaltStack\n',
|
||||
u'DOMAIN=saltstack.com\n',
|
||||
u'SEARCH=test.saltstack.com\n'])
|
||||
|
||||
mock = MagicMock(side_effect=jinja2.exceptions.TemplateNotFound
|
||||
('error'))
|
||||
|
|
|
@ -758,6 +758,28 @@ class TestFileState(TestCase, LoaderModuleMockMixin):
|
|||
(name, user=user, group=group),
|
||||
ret)
|
||||
|
||||
if salt.utils.platform.is_windows():
|
||||
mock_ret = MagicMock(return_value=ret)
|
||||
comt = ('File {0} not updated'.format(name))
|
||||
else:
|
||||
perms = {'luser': user,
|
||||
'lmode': '0644',
|
||||
'lgroup': group}
|
||||
mock_ret = MagicMock(return_value=(ret, perms))
|
||||
comt = ('File {0} will be updated with '
|
||||
'permissions 0400 from its current '
|
||||
'state of 0644'.format(name))
|
||||
|
||||
with patch.dict(filestate.__salt__,
|
||||
{'file.check_perms': mock_ret}):
|
||||
with patch.object(os.path, 'exists', mock_t):
|
||||
with patch.dict(filestate.__opts__, {'test': True}):
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(filestate.managed
|
||||
(name, user=user,
|
||||
group=group,
|
||||
mode=400), ret)
|
||||
|
||||
# 'directory' function tests: 1
|
||||
|
||||
def test_directory(self):
|
||||
|
|
|
@ -282,6 +282,24 @@ class SaltmodTestCase(TestCase, LoaderModuleMockMixin):
|
|||
with patch.dict(saltmod.__salt__, {'saltutil.wheel': wheel_mock}):
|
||||
self.assertDictEqual(saltmod.wheel(name), ret)
|
||||
|
||||
def test_state_ssh(self):
|
||||
'''
|
||||
Test saltmod passes roster to saltutil.cmd
|
||||
'''
|
||||
origcmd = saltmod.__salt__['saltutil.cmd']
|
||||
cmd_kwargs = {}
|
||||
cmd_args = []
|
||||
|
||||
def cmd_mock(*args, **kwargs):
|
||||
cmd_args.extend(args)
|
||||
cmd_kwargs.update(kwargs)
|
||||
return origcmd(*args, **kwargs)
|
||||
|
||||
with patch.dict(saltmod.__salt__, {'saltutil.cmd': cmd_mock}):
|
||||
ret = saltmod.state('state.sls', tgt='*', ssh=True, highstate=True, roster='my_roster')
|
||||
assert 'roster' in cmd_kwargs
|
||||
assert cmd_kwargs['roster'] == 'my_roster'
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class StatemodTests(TestCase, LoaderModuleMockMixin):
|
||||
|
|
|
@ -240,6 +240,7 @@ class UserTestCase(TestCase, LoaderModuleMockMixin):
|
|||
'shadow.default_hash': shadow_hash,
|
||||
'file.group_to_gid': MagicMock(side_effect=['foo']),
|
||||
'file.gid_to_group': MagicMock(side_effect=[5000])}
|
||||
|
||||
def mock_exists(*args):
|
||||
return True
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ import shutil
|
|||
|
||||
# salt testing libs
|
||||
from tests.support.unit import TestCase, skipIf
|
||||
from tests.support.mock import(
|
||||
from tests.support.mock import (
|
||||
patch,
|
||||
mock_open,
|
||||
NO_MOCK,
|
||||
|
|
|
@ -305,17 +305,13 @@ class LazyLoaderSingleItem(TestCase):
|
|||
'''
|
||||
Checks that a KeyError is raised when the function key does not contain a '.'
|
||||
'''
|
||||
key = 'testing_no_dot'
|
||||
expected = "The key '{0}' should contain a '.'".format(key)
|
||||
with self.assertRaises(KeyError) as err:
|
||||
inspect.isfunction(self.loader['testing_no_dot'])
|
||||
|
||||
if six.PY2:
|
||||
self.assertEqual(err.exception[0],
|
||||
'The key \'%s\' should contain a \'.\'')
|
||||
else:
|
||||
self.assertEqual(
|
||||
six.text_type(err.exception),
|
||||
six.text_type(("The key '%s' should contain a '.'", 'testing_no_dot'))
|
||||
)
|
||||
result = err.exception.args[0]
|
||||
assert result == expected, result
|
||||
|
||||
|
||||
module_template = '''
|
||||
|
|
|
@ -317,11 +317,15 @@ class ZMQConfigTest(TestCase):
|
|||
'''
|
||||
test _get_master_uri method
|
||||
'''
|
||||
|
||||
m_ip = '127.0.0.1'
|
||||
m_port = 4505
|
||||
s_ip = '111.1.0.1'
|
||||
s_port = 4058
|
||||
|
||||
m_ip6 = '1234:5678::9abc'
|
||||
s_ip6 = '1234:5678::1:9abc'
|
||||
|
||||
with patch('salt.transport.zeromq.LIBZMQ_VERSION_INFO', (4, 1, 6)), \
|
||||
patch('salt.transport.zeromq.ZMQ_VERSION_INFO', (16, 0, 1)):
|
||||
# pass in both source_ip and source_port
|
||||
|
@ -330,15 +334,27 @@ class ZMQConfigTest(TestCase):
|
|||
source_ip=s_ip,
|
||||
source_port=s_port) == 'tcp://{0}:{1};{2}:{3}'.format(s_ip, s_port, m_ip, m_port)
|
||||
|
||||
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip6,
|
||||
master_port=m_port,
|
||||
source_ip=s_ip6,
|
||||
source_port=s_port) == 'tcp://[{0}]:{1};[{2}]:{3}'.format(s_ip6, s_port, m_ip6, m_port)
|
||||
|
||||
# source ip and source_port empty
|
||||
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip,
|
||||
master_port=m_port) == 'tcp://{0}:{1}'.format(m_ip, m_port)
|
||||
|
||||
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip6,
|
||||
master_port=m_port) == 'tcp://[{0}]:{1}'.format(m_ip6, m_port)
|
||||
|
||||
# pass in only source_ip
|
||||
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip,
|
||||
master_port=m_port,
|
||||
source_ip=s_ip) == 'tcp://{0}:0;{1}:{2}'.format(s_ip, m_ip, m_port)
|
||||
|
||||
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip6,
|
||||
master_port=m_port,
|
||||
source_ip=s_ip6) == 'tcp://[{0}]:0;[{1}]:{2}'.format(s_ip6, m_ip6, m_port)
|
||||
|
||||
# pass in only source_port
|
||||
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip,
|
||||
master_port=m_port,
|
||||
|
|
|
@ -18,6 +18,7 @@ from tests.support.mock import (
|
|||
|
||||
# Import salt libs
|
||||
import salt.utils.network as network
|
||||
from salt._compat import ipaddress
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -181,15 +182,6 @@ class NetworkTestCase(TestCase):
|
|||
with patch('salt.utils.files.fopen', fopen_mock):
|
||||
assert 'thisismyhostname' in network._generate_minion_id()
|
||||
|
||||
def test_generate_minion_id_with_long_hostname(self):
|
||||
'''
|
||||
Test that hostnames longer than 63 characters do not raise
|
||||
an exception when generating the minion ID
|
||||
'''
|
||||
with patch('socket.gethostbyaddr') as mock_gethostbyname:
|
||||
mock_gethostbyname.side_effect = UnicodeError('encoding with \'idna\' codec failed')
|
||||
self.assertTrue(network.generate_minion_id())
|
||||
|
||||
def test_is_ip(self):
|
||||
self.assertTrue(network.is_ip('10.10.0.3'))
|
||||
self.assertFalse(network.is_ip('0.9.800.1000'))
|
||||
|
@ -211,6 +203,35 @@ class NetworkTestCase(TestCase):
|
|||
self.assertFalse(network.is_ipv6('10.0.1.2'))
|
||||
self.assertFalse(network.is_ipv6('2001.0db8.85a3.0000.0000.8a2e.0370.7334'))
|
||||
|
||||
def test_parse_host_port(self):
|
||||
_ip = ipaddress.ip_address
|
||||
good_host_ports = {
|
||||
'10.10.0.3': (_ip('10.10.0.3'), None),
|
||||
'10.10.0.3:1234': (_ip('10.10.0.3'), 1234),
|
||||
'2001:0db8:85a3::8a2e:0370:7334': (_ip('2001:0db8:85a3::8a2e:0370:7334'), None),
|
||||
'[2001:0db8:85a3::8a2e:0370:7334]:1234': (_ip('2001:0db8:85a3::8a2e:0370:7334'), 1234),
|
||||
'2001:0db8:85a3::7334': (_ip('2001:0db8:85a3::7334'), None),
|
||||
'[2001:0db8:85a3::7334]:1234': (_ip('2001:0db8:85a3::7334'), 1234)
|
||||
}
|
||||
bad_host_ports = [
|
||||
'10.10.0.3/24',
|
||||
'10.10.0.3::1234',
|
||||
'2001:0db8:0370:7334',
|
||||
'2001:0db8:0370::7334]:1234',
|
||||
'2001:0db8:0370:0:a:b:c:d:1234'
|
||||
]
|
||||
for host_port, assertion_value in good_host_ports.items():
|
||||
host = port = None
|
||||
host, port = network.parse_host_port(host_port)
|
||||
self.assertEqual((host, port), assertion_value)
|
||||
|
||||
for host_port in bad_host_ports:
|
||||
try:
|
||||
self.assertRaises(ValueError, network.parse_host_port, host_port)
|
||||
except AssertionError as _e_:
|
||||
log.error('bad host_port value: "%s" failed to trigger ValueError exception', host_port)
|
||||
raise _e_
|
||||
|
||||
def test_is_subnet(self):
|
||||
for subnet_data in (IPV4_SUBNETS, IPV6_SUBNETS):
|
||||
for item in subnet_data[True]:
|
||||
|
@ -543,3 +564,15 @@ class NetworkTestCase(TestCase):
|
|||
self.assertRaises(ValueError, network.mac_str_to_bytes, 'a0:b0:c0:d0:e0:fg')
|
||||
self.assertEqual(b'\x10\x08\x06\x04\x02\x00', network.mac_str_to_bytes('100806040200'))
|
||||
self.assertEqual(b'\xf8\xe7\xd6\xc5\xb4\xa3', network.mac_str_to_bytes('f8e7d6c5b4a3'))
|
||||
|
||||
def test_generate_minion_id_with_long_hostname(self):
|
||||
'''
|
||||
Validate the fix for:
|
||||
|
||||
https://github.com/saltstack/salt/issues/51160
|
||||
'''
|
||||
long_name = 'localhost-abcdefghijklmnopqrstuvwxyz-abcdefghijklmnopqrstuvwxyz'
|
||||
with patch('socket.gethostname', MagicMock(return_value=long_name)):
|
||||
# An exception is raised if unicode is passed to socket.getfqdn
|
||||
minion_id = network.generate_minion_id()
|
||||
assert minion_id != '', minion_id
|
||||
|
|
|
@ -55,7 +55,7 @@ class WinLgpoAuditpolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
def test_set_setting(self):
|
||||
names = ['Credential Validation', 'IPsec Driver', 'File System', 'SAM']
|
||||
mock_set = MagicMock(return_value={'retcode': 0, 'stdout': 'Success'})
|
||||
with patch.dict(win_lgpo_auditpol.__salt__, {'cmd.run_all': mock_set}):
|
||||
with patch.object(salt.modules.cmdmod, 'run_all', mock_set):
|
||||
with patch.object(win_lgpo_auditpol, '_get_valid_names',
|
||||
return_value=[k.lower() for k in names]):
|
||||
for name in names:
|
||||
|
|
|
@ -50,6 +50,7 @@ class ValidateNetTestCase(TestCase):
|
|||
Test IPv6 address validation
|
||||
'''
|
||||
true_addrs = [
|
||||
'::',
|
||||
'::1',
|
||||
'::1/32',
|
||||
'::1/32',
|
||||
|
@ -62,6 +63,8 @@ class ValidateNetTestCase(TestCase):
|
|||
'::1/0',
|
||||
'::1/32d',
|
||||
'::1/129',
|
||||
'2a03:4000:c:10aa:1017:f00d:aaaa:a:4506',
|
||||
'2a03::1::2',
|
||||
]
|
||||
|
||||
for addr in true_addrs:
|
||||
|
|
Loading…
Add table
Reference in a new issue