Merge remote-tracking branch 'upstream/master' into merge-forward/3005.1

This commit is contained in:
MKLeb 2022-10-07 12:21:45 -04:00
commit 9fe647100f
No known key found for this signature in database
GPG key ID: 089B64EA1A99DDD1
80 changed files with 1370 additions and 934 deletions

View file

@ -1,7 +1,7 @@
name: PR Checks
on:
pull_request_target:
pull_request:
types: [opened, synchronize]
permissions:

1
changelog/48609.changed Normal file
View file

@ -0,0 +1 @@
More intelligent diffing in changes of file.serialize state.

1
changelog/52354.fixed Normal file
View file

@ -0,0 +1 @@
Don't check for cached pillar errors on state.apply

1
changelog/57180.fixed Normal file
View file

@ -0,0 +1 @@
Don't check for cached pillar errors on state.apply

1
changelog/59339.fixed Normal file
View file

@ -0,0 +1 @@
Don't check for cached pillar errors on state.apply

1
changelog/60700.added Normal file
View file

@ -0,0 +1 @@
Added autostart option to virt.defined and virt.running states, along with virt.update execution modules.

3
changelog/61092.fixed Normal file
View file

@ -0,0 +1,3 @@
state.orchestrate_single only passes a pillar if it is set to the state
function. This allows it to be used with state functions that don't accept a
pillar keyword argument.

1
changelog/61650.fixed Normal file
View file

@ -0,0 +1 @@
Fix ssh config roster to correctly parse the ssh config files that contain spaces.

1
changelog/62030.fixed Normal file
View file

@ -0,0 +1 @@
Fix inconsitency regarding name and pkgs parameters between zypperpkg.upgrade() and yumpkg.upgrade()

1
changelog/62031.added Normal file
View file

@ -0,0 +1 @@
Add `diff_attr` parameter to pkg.upgrade() (zypper/yum).

1
changelog/62032.fixed Normal file
View file

@ -0,0 +1 @@
Fix attr=all handling in pkg.list_pkgs() (yum/zypper).

1
changelog/62474.fixed Normal file
View file

@ -0,0 +1 @@
Fixed parsing CDROM apt sources

View file

@ -0,0 +1 @@
The `expand_repo_def` function in `salt.modules.aptpkg` is now deprecated. It's only used in `salt.states.pkgrepo` and it has no use of being exposed to the CLI.

1
changelog/62676.fixed Normal file
View file

@ -0,0 +1 @@
Modified "_get_flags" function so that it returns regex flags instead of integers

1
changelog/62817.fixed Normal file
View file

@ -0,0 +1 @@
Prevent annoying RuntimeWarning message about line buffering (buffering=1) not being supported in binary mode

2
changelog/62818.fixed Normal file
View file

@ -0,0 +1,2 @@
Include UID and GID checks in modules.file.check_perms as well as comparing
ownership by username and group name.

View file

@ -51,7 +51,7 @@
# Key cache. Increases master speed for large numbers of accepted
# keys. Available options: 'sched'. (Updates on a fixed schedule.)
# Note that enabling this feature means that minions will not be
# available to target for up to the length of the maintanence loop
# available to target for up to the length of the maintenance loop
# which by default is 60s.
#key_cache: ''

View file

@ -49,7 +49,7 @@ syndic_user: salt
# Key cache. Increases master speed for large numbers of accepted
# keys. Available options: 'sched'. (Updates on a fixed schedule.)
# Note that enabling this feature means that minions will not be
# available to target for up to the length of the maintanence loop
# available to target for up to the length of the maintenance loop
# which by default is 60s.
#key_cache: ''

View file

@ -1102,7 +1102,7 @@ Name of existing container.
signed_identifiers
``````````````````
SignedIdentifers instance
SignedIdentifiers instance
blob_public_access
``````````````````

View file

@ -13,7 +13,7 @@ Requirements
.. note::
Support ``winexe`` and ``impacket`` has been deprecated and will be removed in
3001. These dependencies are replaced by ``pypsexec`` and ``smbprotocol``
respectivly. These are pure python alternatives that are compatible with all
respectively. These are pure python alternatives that are compatible with all
supported python versions.
Salt Cloud makes use of `impacket` and `winexe` to set up the Windows Salt

View file

@ -20,7 +20,7 @@ is hosted by Google Groups. It is open to new members.
.. _`salt-users mailing list`: https://groups.google.com/forum/#!forum/salt-users
Additionally, all users of Salt should be subscribed to the Announcements mailing
list which contains important updates about Salt, such as new releaes and
list which contains important updates about Salt, such as new releases and
security-related announcements. This list is low-traffic.
.. _`salt-announce mailing list`: https://groups.google.com/forum/#!forum/salt-announce

View file

@ -160,7 +160,7 @@ New modules must be added to the index manually.
:blob:`state modules<doc/ref/states/all/index.rst>`,
:blob:`renderer modules <doc/ref/renderers/all/index.rst>`, etc.
2. Add the new module to the alphebetized list.
2. Add the new module to the alphabetized list.
3. :ref:`Build the documentation <docs-building>` which will generate an ``.rst``
file for the new module in the same directory as the ``index.rst``.

View file

@ -598,7 +598,7 @@ Avoid heavy logic and programming
`````````````````````````````````
Jinja is not Python. It was made by Python programmers and shares many
semantics and some syntax but it does not allow for abitrary Python function
semantics and some syntax but it does not allow for arbitrary Python function
calls or Python imports. Jinja is a fast and efficient templating language but
the syntax can be verbose and visually noisy.

View file

@ -32,7 +32,7 @@ example):
#. Announce new RC to salt-users and salt-announce google groups.
#. Triage incoming issues based on the new RC release.
#. Fix RC issues once they are categorized as a release blocker.
#. Depending on the issues found during the RC process make a decesion
#. Depending on the issues found during the RC process make a decision
on whether to release based off the RC or go through another RC process
#. If a RC is categorized as stable, build all required packages.
#. Test all release packages.

View file

@ -23,7 +23,7 @@ In an issue report, please include the following information:
* Any configuration options set in a configuration file that may be relevant.
* A reproduceable test case. This may be as simple as an SLS file that
* A reproducible test case. This may be as simple as an SLS file that
illustrates a problem or it may be a link to a repository that contains a
number of SLS files that can be used together to re-produce a problem. If
the problem is transitory, any information that can be used to try and

View file

@ -1144,7 +1144,7 @@ Example:
This option may have adverse effects when using the default renderer,
``jinja|yaml``. This is due to the fact that YAML requires proper handling
in regard to special characters. Please see the section on :ref:`YAML ASCII
support <yaml_plain_ascii>` in the :ref:`YAML Idiosyncracies
support <yaml_plain_ascii>` in the :ref:`YAML Idiosyncrasies
<yaml-idiosyncrasies>` documentation for more information.
.. jinja_ref:: json_decode_list
@ -1988,7 +1988,7 @@ Example:
This option may have adverse effects when using the default renderer,
``jinja|yaml``. This is due to the fact that YAML requires proper handling
in regard to special characters. Please see the section on :ref:`YAML ASCII
support <yaml_plain_ascii>` in the :ref:`YAML Idiosyncracies
support <yaml_plain_ascii>` in the :ref:`YAML Idiosyncrasies
<yaml-idiosyncrasies>` documentation for more information.
.. jinja_ref:: dns_check
@ -2366,8 +2366,8 @@ external template file.
.. note::
Macros and variables can be shared across templates. They should not be
starting with one or more underscores, and should be managed by one of the
Macros and variables can be shared across templates. They should not start
with one or more underscores, and should be managed by one of the
following tags: `macro`, `set`, `load_yaml`, `load_json`, `import_yaml` and
`import_json`.

View file

@ -522,7 +522,7 @@ The :py:func:`pillar.get <salt.modules.pillar.get>` Function
The :mod:`pillar.get <salt.modules.pillar.get>` function works much in the same
way as the ``get`` method in a python dict, but with an enhancement: nested
dictonaries can be traversed using a colon as a delimiter.
dictionaries can be traversed using a colon as a delimiter.
If a structure like this is in pillar:
@ -706,7 +706,7 @@ The following functions support passing pillar data on the CLI via the
- :py:func:`state.highstate <salt.modules.state.highstate>`
- :py:func:`state.sls <salt.modules.state.sls>`
Triggerring decryption of this CLI pillar data can be done in one of two ways:
Triggering decryption of this CLI pillar data can be done in one of two ways:
1. Using the ``pillar_enc`` argument:

View file

@ -100,7 +100,7 @@ the 'url' key above should say ``url: http://127.0.0.1:8000``
8. The REST service implements a degenerately simple pkg and service provider as
well as a small set of grains. To "install" a package, use a standard
``pkg.install``. If you pass '==' and a verrsion number after the package
``pkg.install``. If you pass '==' and a version number after the package
name then the service will parse that and accept that as the package's
version.

View file

@ -76,6 +76,6 @@ from the file ``/srv/pillar/p8000.sls`` (if you have not changed your default pi
8. The SSH shell implements a degenerately simple pkg.
To "install" a package, use a standard
``pkg.install``. If you pass '==' and a verrsion number after the package
``pkg.install``. If you pass '==' and a version number after the package
name then the service will parse that and accept that as the package's
version.

View file

@ -99,7 +99,7 @@ API and the runner system. In this example, a command is published to the
- mods: orchestrate.runit
{% endif %}
This example will execute the state.orchestrate runner and intiate an execution
This example will execute the state.orchestrate runner and initiate an execution
of the ``runit`` orchestrator located at ``/srv/salt/orchestrate/runit.sls``.
Types of Reactions

View file

@ -19,7 +19,7 @@ issue on github: #37027
Grains
=========================
Not all grains are available or some have empty or 0 as value. Mostly grains
that are depenend on hardware discovery like:
that are dependent on hardware discovery like:
- num_gpus
- gpus

View file

@ -8,7 +8,7 @@ In the 2019.2.0 release the ``ssh_ext_alternatives`` feature was added.
This allows salt-ssh to work across different supported python versions. You will
need to ensure you have the following:
- Salt is installed, with all required dependnecies for the Python version.
- Salt is installed, with all required dependencies for the Python version.
- Everything needs to be importable from the respective Python environment.
To enable using this feature you will need to edit the master configuration similar
@ -56,7 +56,7 @@ does not require you to define them under ``dependencies``.
py_bin: /usr/bin/python2.7 # Python binary path used to auto detect dependencies
If ``py_bin`` is not set alongside ``auto_detect``, it will attempt to auto detect
the dependnecies using the major version set in ``py-version``. For example if you
the dependencies using the major version set in ``py-version``. For example if you
have ``[2, 7]`` set as your ``py-version``, it will attempt to use the binary ``python2``.
You can also use ``auto_detect`` and ``dependencies`` together.
@ -72,7 +72,7 @@ You can also use ``auto_detect`` and ``dependencies`` together.
dependencies: # List of dependencies and their installation paths
jinja2: /opt/jinja2
If a dependency is defined in the ``dependecies`` list ``ssh_ext_alternatives`` will use
If a dependency is defined in the ``dependencies`` list ``ssh_ext_alternatives`` will use
this dependency, instead of the path that ``auto_detect`` finds. For example, if you define
``/opt/jinja2`` under your ``dependencies`` for jinja2, it will not try to autodetect the
file path to the jinja2 module, and will favor ``/opt/jinja2``.

View file

@ -282,7 +282,7 @@ that information back to the waiting client before the job can be published.
To mitigate this, a key cache may be enabled. This will reduce the load
on the master to a single file open instead of thousands or tens of thousands.
This cache is updated by the maintanence process, however, which means that
This cache is updated by the maintenance process, however, which means that
minions with keys that are accepted may not be targeted by the master
for up to sixty seconds by default.

View file

@ -538,7 +538,7 @@ def do_cleanup(cleanup):
behavior over time. Passed in volumes and domains are deleted, any errors
are ignored. Used when cloning/provisioning a domain fails.
:param cleanup: list containing dictonaries with two keys: 'what' and 'item'.
:param cleanup: list containing dictionaries with two keys: 'what' and 'item'.
If 'what' is domain the 'item' is a libvirt domain object.
If 'what' is volume then the item is a libvirt volume object.

View file

@ -23,7 +23,7 @@
#======================================================================================================================
set -o nounset # Treat unset variables as an error
__ScriptVersion="2022.08.12"
__ScriptVersion="2022.10.04"
__ScriptName="bootstrap-salt.sh"
__ScriptFullName="$0"
@ -584,7 +584,6 @@ fi
echoinfo "Running version: ${__ScriptVersion}"
echoinfo "Executed by: ${CALLER}"
echoinfo "Command line: '${__ScriptFullName} ${__ScriptArgs}'"
echowarn "Running the unstable version of ${__ScriptName}"
# Define installation type
if [ "$#" -gt 0 ];then
@ -636,10 +635,10 @@ elif [ "$ITYPE" = "onedir" ]; then
if [ "$#" -eq 0 ];then
ONEDIR_REV="latest"
else
if [ "$(echo "$1" | grep -E '^(latest)$')" != "" ]; then
if [ "$(echo "$1" | grep -E '^(latest|3005)$')" != "" ]; then
ONEDIR_REV="$1"
shift
elif [ "$(echo "$1" | grep -E '^([3-9][0-9]{3}(\.[0-9]*)?)$')" != "" ]; then
elif [ "$(echo "$1" | grep -E '^([3-9][0-9]{3}(\.[0-9]*)?)')" != "" ]; then
# Handle the 3xxx.0 version as 3xxx archive (pin to minor) and strip the fake ".0" suffix
ONEDIR_REV=$(echo "$1" | sed -E 's/^([3-9][0-9]{3})\.0$/\1/')
ONEDIR_REV="minor/$ONEDIR_REV"
@ -669,7 +668,7 @@ elif [ "$ITYPE" = "onedir_rc" ]; then
ONEDIR_REV="minor/$1"
shift
else
echo "Unknown stable version: $1 (valid: 3005, latest.)"
echo "Unknown stable version: $1 (valid: 3005-1, latest.)"
exit 1
fi
fi
@ -1439,8 +1438,8 @@ __check_dpkg_architecture() {
warn_msg="Support for arm64 is experimental, make sure the custom repository used has the expected structure and contents."
else
# Saltstack official repository does not yet have arm64 metadata,
# use amd64 repositories on arm64, since all pkgs are arch-independent
__REPO_ARCH="amd64"
# use arm64 repositories on arm64, since all pkgs are arch-independent
__REPO_ARCH="arm64"
__REPO_ARCH_DEB="deb [signed-by=/usr/share/keyrings/salt-archive-keyring.gpg arch=$__REPO_ARCH]"
warn_msg="Support for arm64 packages is experimental and might rely on architecture-independent packages from the amd64 repository."
fi
@ -4491,7 +4490,7 @@ enabled=1
enabled_metadata=1
_eof
fetch_url="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/"
fetch_url="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${ONEDIR_REV}/"
for key in $gpg_key; do
__rpm_import_gpg "${fetch_url}${key}" || return 1
done
@ -5287,6 +5286,15 @@ install_red_hat_enterprise_workstation_testing_post() {
# Oracle Linux Install Functions
#
install_oracle_linux_stable_deps() {
# Install Oracle's EPEL.
if [ ${_EPEL_REPOS_INSTALLED} -eq $BS_FALSE ]; then
_EPEL_REPO=oracle-epel-release-el${DISTRO_MAJOR_VERSION}
if ! rpm -q "${_EPEL_REPO}" > /dev/null; then
__yum_install_noinput "${_EPEL_REPO}"
fi
_EPEL_REPOS_INSTALLED=$BS_TRUE
fi
install_centos_stable_deps || return 1
return 0
}

View file

@ -119,7 +119,7 @@ VALID_OPTS = immutabletypes.freeze(
"master_uri_format": str,
# The following options refer to the Minion only, and they specify
# the details of the source address / port to be used when connecting to
# the Master. This is useful when dealing withmachines where due to firewall
# the Master. This is useful when dealing with machines where due to firewall
# rules you are restricted to use a certain IP/port combination only.
"source_interface_name": str,
"source_address": str,
@ -131,7 +131,7 @@ VALID_OPTS = immutabletypes.freeze(
# Deprecated in 2019.2.0. Use 'random_master' instead.
# Do not remove! Keep as an alias for usability.
"master_shuffle": bool,
# When in multi-master mode, temporarily remove a master from the list if a conenction
# When in multi-master mode, temporarily remove a master from the list if a connection
# is interrupted and try another master in the list.
"master_alive_interval": int,
# When in multi-master failover mode, fail back to the first master in the list if it's back
@ -165,7 +165,7 @@ VALID_OPTS = immutabletypes.freeze(
"syndic_finger": str,
# The caching mechanism to use for the PKI key store. Can substantially decrease master publish
# times. Available types:
# 'maint': Runs on a schedule as a part of the maintanence process.
# 'maint': Runs on a schedule as a part of the maintenance process.
# '': Disable the key cache [default]
"key_cache": str,
# The user under which the daemon should run
@ -208,7 +208,7 @@ VALID_OPTS = immutabletypes.freeze(
"renderer": str,
# Renderer whitelist. The only renderers from this list are allowed.
"renderer_whitelist": list,
# Rendrerer blacklist. Renderers from this list are disalloed even if specified in whitelist.
# Renderer blacklist. Renderers from this list are disallowed even if specified in whitelist.
"renderer_blacklist": list,
# A flag indicating that a highstate run should immediately cease if a failure occurs.
"failhard": bool,

View file

@ -47,6 +47,7 @@ from salt.exceptions import (
SaltInvocationError,
)
from salt.modules.cmdmod import _parse_env
from salt.utils.versions import warn_until_date
log = logging.getLogger(__name__)
@ -3018,7 +3019,7 @@ def file_dict(*packages, **kwargs):
return __salt__["lowpkg.file_dict"](*packages)
def expand_repo_def(**kwargs):
def _expand_repo_def(os_name, lsb_distrib_codename=None, **kwargs):
"""
Take a repository definition and expand it to the full pkg repository dict
that can be used for comparison. This is a helper function to make
@ -3032,8 +3033,8 @@ def expand_repo_def(**kwargs):
sanitized = {}
repo = kwargs["repo"]
if repo.startswith("ppa:") and __grains__["os"] in ("Ubuntu", "Mint", "neon"):
dist = __grains__["lsb_distrib_codename"]
if repo.startswith("ppa:") and os_name in ("Ubuntu", "Mint", "neon"):
dist = lsb_distrib_codename
owner_name, ppa_name = repo[4:].split("/", 1)
if "ppa_auth" in kwargs:
auth_info = "{}@".format(kwargs["ppa_auth"])
@ -3120,6 +3121,32 @@ def expand_repo_def(**kwargs):
return sanitized
def expand_repo_def(**kwargs):
"""
Take a repository definition and expand it to the full pkg repository dict
that can be used for comparison. This is a helper function to make
the Debian/Ubuntu apt sources sane for comparison in the pkgrepo states.
This is designed to be called from pkgrepo states and will have little use
being called on the CLI.
CLI Examples:
.. code-block:: bash
NOT USABLE IN THE CLI
"""
warn_until_date(
"20240101",
"The pkg.expand_repo_def function is deprecated and set for removal "
"after {date}. This is only unsed internally by the apt pkg state "
"module. If that's not the case, please file an new issue requesting "
"the removal of this deprecation warning",
stacklevel=3,
)
return _expand_repo_def(**kwargs)
def _parse_selections(dpkgselection):
"""
Parses the format from ``dpkg --get-selections`` and return a format that

View file

@ -1822,7 +1822,7 @@ def delete_nat_gateway(
nat_gateway_id
Id of the NAT Gateway
releaes_eips
release_eips
whether to release the elastic IPs associated with the given NAT Gateway Id
region

View file

@ -1,19 +1,23 @@
"""
An execution module that interacts with the Datadog API
The following parameters are required for all functions.
:depends: datadog_ Python module
api_key
The datadog API key
.. _datadog: https://pypi.python.org/pypi/datadog
app_key
The datadog application key
.. note::
The following parameters are required for all functions:
api_key
The datadog API key
app_key
The datadog application key
Full argument reference is available on the Datadog API reference page
https://docs.datadoghq.com/api/
"""
import requests
from salt.exceptions import SaltInvocationError

View file

@ -16,7 +16,6 @@ import hashlib
import itertools
import logging
import mmap
import operator
import os
import re
import shutil
@ -28,7 +27,6 @@ import time
import urllib.parse
from collections import namedtuple
from collections.abc import Iterable, Mapping
from functools import reduce
import salt.utils.args
import salt.utils.atomicfile
@ -1622,38 +1620,38 @@ def comment_line(path, regex, char="#", cmnt=True, backup=".bak"):
def _get_flags(flags):
"""
Return an integer appropriate for use as a flag for the re module from a
list of human-readable strings
Return the names of the Regex flags that correspond to flags
.. code-block:: python
>>> _get_flags(['MULTILINE', 'IGNORECASE'])
10
>>> _get_flags(['IGNORECASE', 'MULTILINE'])
re.IGNORECASE|re.MULTILINE
>>> _get_flags('MULTILINE')
8
>>> _get_flags(2)
2
re.MULTILINE
>>> _get_flags(8)
re.MULTILINE
>>> _get_flags(re.IGNORECASE)
re.IGNORECASE
"""
if isinstance(flags, str):
if isinstance(flags, re.RegexFlag):
return flags
elif isinstance(flags, int):
return re.RegexFlag(flags)
elif isinstance(flags, str):
flags = [flags]
if isinstance(flags, Iterable) and not isinstance(flags, Mapping):
_flags_acc = [0] # An initial 0 avoids resucing on empty list, an error
_flags = re.RegexFlag(0)
for flag in flags:
_flag = getattr(re, str(flag).upper())
if not isinstance(_flag, int):
raise SaltInvocationError("Invalid re flag given: {}".format(flag))
_flags_acc.append(_flag)
return reduce(operator.__or__, _flags_acc)
elif isinstance(flags, int):
return flags
_flag = getattr(re.RegexFlag, str(flag).upper(), None)
if not _flag:
raise CommandExecutionError(f"Invalid re flag given: {flag}")
_flags |= _flag
return _flags
else:
raise SaltInvocationError(
'Invalid re flags: "{}", must be given either as a single flag '
"string, a list of strings, or as an integer".format(flags)
raise CommandExecutionError(
f'Invalid re flags: "{flags}", must be given either as a single flag '
"string, a list of strings, as an integer, or as an re flag"
)
@ -2513,8 +2511,8 @@ def replace(
"Only one of append and prepend_if_not_found is permitted"
)
flags_num = _get_flags(flags)
cpattern = re.compile(salt.utils.stringutils.to_bytes(pattern), flags_num)
re_flags = _get_flags(flags)
cpattern = re.compile(salt.utils.stringutils.to_bytes(pattern), re_flags)
filesize = os.path.getsize(path)
if bufsize == "file":
bufsize = filesize
@ -2582,7 +2580,7 @@ def replace(
"^{}($|(?=\r\n))".format(re.escape(content))
),
r_data,
flags=flags_num,
flags=re_flags,
):
# Content was found, so set found.
found = True
@ -3132,7 +3130,11 @@ def search(path, pattern, flags=8, bufsize=1, ignore_if_missing=False, multiline
salt '*' file.search /etc/crontab 'mymaintenance.sh'
"""
if multiline:
flags = _add_flags(flags, "MULTILINE")
re_flags = _add_flags(flags, "MULTILINE")
else:
re_flags = _get_flags(flags)
if re.RegexFlag.MULTILINE in re_flags:
bufsize = "file"
# This function wraps file.replace on purpose in order to enforce
@ -3142,7 +3144,7 @@ def search(path, pattern, flags=8, bufsize=1, ignore_if_missing=False, multiline
path,
pattern,
"",
flags=flags,
flags=re_flags,
bufsize=bufsize,
dry_run=True,
search_only=True,
@ -4597,7 +4599,7 @@ def get_managed(
skip_verify=False,
verify_ssl=True,
use_etag=False,
**kwargs
**kwargs,
):
"""
Return the managed file data for file.managed
@ -4805,7 +4807,7 @@ def get_managed(
pillar=__pillar__,
grains=__opts__["grains"],
opts=__opts__,
**kwargs
**kwargs,
)
else:
return (
@ -5099,6 +5101,7 @@ def check_perms(
``follow_symlinks`` option added
"""
name = os.path.expanduser(name)
mode = salt.utils.files.normalize_mode(mode)
if not ret:
ret = {"name": name, "changes": {}, "comment": [], "result": True}
@ -5107,123 +5110,125 @@ def check_perms(
orig_comment = ret["comment"]
ret["comment"] = []
# Check permissions
perms = {}
# Check current permissions
cur = stats(name, follow_symlinks=follow_symlinks)
perms["luser"] = cur["user"]
perms["lgroup"] = cur["group"]
perms["lmode"] = salt.utils.files.normalize_mode(cur["mode"])
# Record initial stat for return later. Check whether we're receiving IDs
# or names so luser == cuser comparison makes sense.
perms = {}
perms["luser"] = cur["uid"] if isinstance(user, int) else cur["user"]
perms["lgroup"] = cur["gid"] if isinstance(group, int) else cur["group"]
perms["lmode"] = cur["mode"]
is_dir = os.path.isdir(name)
is_link = os.path.islink(name)
# user/group changes if needed, then check if it worked
# Check and make user/group/mode changes, then verify they were successful
if user:
if isinstance(user, int):
user = uid_to_user(user)
if (
salt.utils.platform.is_windows()
and user_to_uid(user) != user_to_uid(perms["luser"])
) or (not salt.utils.platform.is_windows() and user != perms["luser"]):
salt.utils.platform.is_windows() and not user_to_uid(user) == cur["uid"]
) or (
not salt.utils.platform.is_windows()
and not user == cur["user"]
and not user == cur["uid"]
):
perms["cuser"] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if (
salt.utils.platform.is_windows()
and group_to_gid(group) != group_to_gid(perms["lgroup"])
) or (not salt.utils.platform.is_windows() and group != perms["lgroup"]):
salt.utils.platform.is_windows() and not group_to_gid(group) == cur["gid"]
) or (
not salt.utils.platform.is_windows()
and not group == cur["group"]
and not group == cur["gid"]
):
perms["cgroup"] = group
if "cuser" in perms or "cgroup" in perms:
if not __opts__["test"]:
if os.path.islink(name) and not follow_symlinks:
if is_link and not follow_symlinks:
chown_func = lchown
else:
chown_func = chown
if user is None:
user = perms["luser"]
user = cur["user"]
if group is None:
group = perms["lgroup"]
group = cur["group"]
try:
chown_func(name, user, group)
# Python os.chown() does reset the suid and sgid,
# that's why setting the right mode again is needed here.
set_mode(name, mode)
err = chown_func(name, user, group)
if err:
ret["result"] = False
ret["comment"].append(err)
else:
# Python os.chown() resets the suid and sgid, hence we
# setting the previous mode again. Pending mode changes
# will be applied later.
set_mode(name, cur["mode"])
except OSError:
ret["result"] = False
# Mode changes if needed
if mode is not None:
if not __opts__["test"] is True:
# File is a symlink, ignore the mode setting
# if follow_symlinks is False
if not (is_link and not follow_symlinks):
if not mode == cur["mode"]:
perms["cmode"] = mode
set_mode(name, mode)
# verify user/group/mode changes
post = stats(name, follow_symlinks=follow_symlinks)
if user:
if isinstance(user, int):
user = uid_to_user(user)
if (
salt.utils.platform.is_windows()
and user_to_uid(user)
!= user_to_uid(get_user(name, follow_symlinks=follow_symlinks))
and user != ""
salt.utils.platform.is_windows() and not user_to_uid(user) == post["uid"]
) or (
not salt.utils.platform.is_windows()
and user != get_user(name, follow_symlinks=follow_symlinks)
and user != ""
and not user == post["user"]
and not user == post["uid"]
):
if __opts__["test"] is True:
ret["changes"]["user"] = user
else:
ret["result"] = False
ret["comment"].append("Failed to change user to {}".format(user))
elif "cuser" in perms and user != "":
elif "cuser" in perms:
ret["changes"]["user"] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if (
salt.utils.platform.is_windows()
and group_to_gid(group)
!= group_to_gid(get_group(name, follow_symlinks=follow_symlinks))
and user != ""
salt.utils.platform.is_windows() and not group_to_gid(group) == post["gid"]
) or (
not salt.utils.platform.is_windows()
and group != get_group(name, follow_symlinks=follow_symlinks)
and user != ""
and not group == post["group"]
and not group == post["gid"]
):
if __opts__["test"] is True:
ret["changes"]["group"] = group
else:
ret["result"] = False
ret["comment"].append("Failed to change group to {}".format(group))
elif "cgroup" in perms and user != "":
elif "cgroup" in perms:
ret["changes"]["group"] = group
# Mode changes if needed
if mode is not None:
# File is a symlink, ignore the mode setting
# if follow_symlinks is False
if os.path.islink(name) and not follow_symlinks:
pass
else:
mode = salt.utils.files.normalize_mode(mode)
if mode != perms["lmode"]:
if not (is_link and not follow_symlinks):
if not mode == post["mode"]:
if __opts__["test"] is True:
ret["changes"]["mode"] = mode
else:
set_mode(name, mode)
if mode != salt.utils.files.normalize_mode(get_mode(name)):
ret["result"] = False
ret["comment"].append(
"Failed to change mode to {}".format(mode)
)
else:
ret["changes"]["mode"] = mode
ret["result"] = False
ret["comment"].append("Failed to change mode to {}".format(mode))
elif "cmode" in perms:
ret["changes"]["mode"] = mode
# Modify attributes of file if needed
if attrs is not None and not is_dir:
# File is a symlink, ignore the mode setting
# if follow_symlinks is False
if os.path.islink(name) and not follow_symlinks:
pass
else:
if not (is_link and not follow_symlinks):
diff_attrs = _cmp_attrs(name, attrs)
if diff_attrs and any(attr for attr in diff_attrs):
changes = {
@ -5417,7 +5422,7 @@ def check_managed(
setype=None,
serange=None,
follow_symlinks=False,
**kwargs
**kwargs,
):
"""
Check to see what changes need to be made for a file
@ -5458,7 +5463,7 @@ def check_managed(
context,
defaults,
skip_verify,
**kwargs
**kwargs,
)
if comments:
__clean_tmp(sfn)
@ -5516,7 +5521,7 @@ def check_managed_changes(
serange=None,
verify_ssl=True,
follow_symlinks=False,
**kwargs
**kwargs,
):
"""
Return a dictionary of what changes need to be made for a file
@ -5568,7 +5573,7 @@ def check_managed_changes(
defaults,
skip_verify,
verify_ssl=verify_ssl,
**kwargs
**kwargs,
)
# Ensure that user-provided hash string is lowercase
@ -5981,7 +5986,7 @@ def manage_file(
serange=None,
verify_ssl=True,
use_etag=False,
**kwargs
**kwargs,
):
"""
Checks the destination against what was retrieved with get_managed and

View file

@ -106,18 +106,17 @@ def _set_retcode(ret, highstate=None):
def _get_pillar_errors(kwargs, pillar=None):
"""
Checks all pillars (external and internal) for errors.
Return an error message, if anywhere or None.
Check pillar for errors.
If a pillar is passed, it will be checked. Otherwise, the in-memory pillar
will checked instead. Passing kwargs['force'] = True short cuts the check
and always returns None, indicating no errors.
:param kwargs: dictionary of options
:param pillar: external pillar
:return: None or an error message
:param pillar: pillar
:return: None or a list of error messages
"""
return (
None
if kwargs.get("force")
else (pillar or {}).get("_errors", __pillar__.get("_errors")) or None
)
return None if kwargs.get("force") else (pillar or __pillar__).get("_errors")
def _wait(jid):

View file

@ -3498,6 +3498,7 @@ def update(
consoles=None,
stop_on_reboot=False,
host_devices=None,
autostart=False,
**kwargs
):
"""
@ -3692,6 +3693,10 @@ def update(
.. versionadded:: 3003
:param autostart:
If set to ``True`` the host will start the guest after boot.
(Default: ``False``)
:return:
Returns a dictionary indicating the status of what has been done. It is structured in
@ -3759,6 +3764,7 @@ def update(
**kwargs
)
)
set_autostart(name, "on" if autostart else "off")
if clock:
offset = "utc" if clock.get("utc", True) else "localtime"

View file

@ -735,7 +735,7 @@ def list_pkgs(versions_as_list=False, **kwargs):
return {}
attr = kwargs.get("attr")
if attr is not None:
if attr is not None and attr != "all":
attr = salt.utils.args.split_input(attr)
contextkey = "pkg.list_pkgs"
@ -1834,6 +1834,7 @@ def upgrade(
normalize=True,
minimal=False,
obsoletes=True,
diff_attr=None,
**kwargs
):
"""
@ -1968,6 +1969,26 @@ def upgrade(
.. versionadded:: 2019.2.0
diff_attr:
If a list of package attributes is specified, returned value will
contain them, eg.::
{'<package>': {
'old': {
'version': '<old-version>',
'arch': '<old-arch>'},
'new': {
'version': '<new-version>',
'arch': '<new-arch>'}}}
Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``,
``install_date``, ``install_date_time_t``.
If ``all`` is specified, all valid attributes will be returned.
.. versionadded:: 3006.0
.. note::
To add extra arguments to the ``yum upgrade`` command, pass them as key
word arguments. For arguments without assignments, pass ``True``
@ -1990,7 +2011,7 @@ def upgrade(
if salt.utils.data.is_true(refresh):
refresh_db(**kwargs)
old = list_pkgs()
old = list_pkgs(attr=diff_attr)
targets = []
if name or pkgs:
@ -2022,7 +2043,7 @@ def upgrade(
cmd.extend(targets)
result = _call_yum(cmd)
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
new = list_pkgs(attr=diff_attr)
ret = salt.utils.data.compare_dicts(old, new)
if result["retcode"] != 0:

View file

@ -914,7 +914,7 @@ def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs):
return {}
attr = kwargs.get("attr")
if attr is not None:
if attr is not None and attr != "all":
attr = salt.utils.args.split_input(attr)
includes = includes if includes else []
@ -1744,6 +1744,8 @@ def install(
def upgrade(
name=None,
pkgs=None,
refresh=True,
dryrun=False,
dist_upgrade=False,
@ -1752,6 +1754,7 @@ def upgrade(
skip_verify=False,
no_recommends=False,
root=None,
diff_attr=None,
**kwargs
): # pylint: disable=unused-argument
"""
@ -1771,6 +1774,27 @@ def upgrade(
Run a full system upgrade, a zypper upgrade
name
The name of the package to be installed. Note that this parameter is
ignored if ``pkgs`` is passed or if ``dryrun`` is set to True.
CLI Example:
.. code-block:: bash
salt '*' pkg.install name=<package name>
pkgs
A list of packages to install from a software repository. Must be
passed as a python list. Note that this parameter is ignored if
``dryrun`` is set to True.
CLI Examples:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo", "bar"]'
refresh
force a refresh if set to True (default).
If set to False it depends on zypper if a refresh is
@ -1798,6 +1822,26 @@ def upgrade(
root
Operate on a different root directory.
diff_attr:
If a list of package attributes is specified, returned value will
contain them, eg.::
{'<package>': {
'old': {
'version': '<old-version>',
'arch': '<old-arch>'},
'new': {
'version': '<new-version>',
'arch': '<new-arch>'}}}
Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``,
``install_date``, ``install_date_time_t``.
If ``all`` is specified, all valid attributes will be returned.
.. versionadded:: 3006.0
Returns a dictionary containing the changes:
.. code-block:: python
@ -1805,11 +1849,27 @@ def upgrade(
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
If an attribute list is specified in ``diff_attr``, the dict will also contain
any specified attribute, eg.::
.. code-block:: python
{'<package>': {
'old': {
'version': '<old-version>',
'arch': '<old-arch>'},
'new': {
'version': '<new-version>',
'arch': '<new-arch>'}}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
salt '*' pkg.upgrade name=mypackage
salt '*' pkg.upgrade pkgs='["package1", "package2"]'
salt '*' pkg.upgrade dist_upgrade=True fromrepo='["MyRepoName"]' novendorchange=True
salt '*' pkg.upgrade dist_upgrade=True dryrun=True
"""
@ -1855,12 +1915,23 @@ def upgrade(
__zypper__(systemd_scope=_systemd_scope(), root=root).noraise.call(
*cmd_update + ["--debug-solver"]
)
else:
if name or pkgs:
try:
(pkg_params, _) = __salt__["pkg_resource.parse_targets"](
name=name, pkgs=pkgs, sources=None, **kwargs
)
if pkg_params:
cmd_update.extend(pkg_params.keys())
old = list_pkgs(root=root)
except MinionError as exc:
raise CommandExecutionError(exc)
old = list_pkgs(root=root, attr=diff_attr)
__zypper__(systemd_scope=_systemd_scope(), root=root).noraise.call(*cmd_update)
_clean_cache()
new = list_pkgs(root=root)
new = list_pkgs(root=root, attr=diff_attr)
ret = salt.utils.data.compare_dicts(old, new)
if __zypper__.exit_code not in __zypper__.SUCCESS_EXIT_CODES:

View file

@ -64,7 +64,7 @@ def parse_ssh_config(lines):
for field in _ROSTER_FIELDS:
match = re.match(field.pattern, line)
if match:
target[field.target_field] = match.group(1)
target[field.target_field] = match.group(1).strip()
for hostname in hostnames:
targets[hostname] = target

View file

@ -150,12 +150,16 @@ def orchestrate_single(fun, name, test=None, queue=False, pillar=None, **kwargs)
salt-run state.orchestrate_single fun=salt.wheel name=key.list_all
"""
if pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError("Pillar data must be formatted as a dictionary")
if pillar is not None:
if isinstance(pillar, dict):
kwargs["pillar"] = pillar
else:
raise SaltInvocationError("Pillar data must be formatted as a dictionary")
__opts__["file_client"] = "local"
minion = salt.minion.MasterMinion(__opts__)
running = minion.functions["state.single"](
fun, name, test=None, queue=False, pillar=pillar, **kwargs
fun, name, test=None, queue=False, **kwargs
)
ret = {minion.opts["id"]: running}
__jid_event__.fire_event({"data": ret, "outputter": "highstate"}, "progress")

View file

@ -299,6 +299,7 @@ import salt.loader
import salt.payload
import salt.utils.data
import salt.utils.dateutils
import salt.utils.dictdiffer
import salt.utils.dictupdate
import salt.utils.files
import salt.utils.hashutils
@ -745,9 +746,17 @@ def _check_directory(
fchange = {}
path = os.path.join(root, fname)
stats = __salt__["file.stats"](path, None, follow_symlinks)
if user is not None and user != stats.get("user"):
if (
user is not None
and not user == stats.get("user")
and not user == stats.get("uid")
):
fchange["user"] = user
if group is not None and group != stats.get("group"):
if (
group is not None
and not group == stats.get("group")
and not group == stats.get("gid")
):
fchange["group"] = group
smode = salt.utils.files.normalize_mode(stats.get("mode"))
file_mode = salt.utils.files.normalize_mode(file_mode)
@ -8010,6 +8019,7 @@ def serialize(
salt.utils.data.repack_dictlist(deserializer_opts)
)
existing_data = None
if merge_if_exists:
if os.path.isfile(name):
if deserializer_name not in __serializers__:
@ -8100,27 +8110,33 @@ def serialize(
else:
ret["result"] = True
ret["comment"] = "The file {} is in the correct state".format(name)
return ret
else:
ret = __salt__["file.manage_file"](
name=name,
sfn="",
ret=ret,
source=None,
source_sum={},
user=user,
group=group,
mode=mode,
attrs=None,
saltenv=__env__,
backup=backup,
makedirs=makedirs,
template=None,
show_changes=show_changes,
encoding=encoding,
encoding_errors=encoding_errors,
contents=contents,
)
return __salt__["file.manage_file"](
name=name,
sfn="",
ret=ret,
source=None,
source_sum={},
user=user,
group=group,
mode=mode,
attrs=None,
saltenv=__env__,
backup=backup,
makedirs=makedirs,
template=None,
show_changes=show_changes,
encoding=encoding,
encoding_errors=encoding_errors,
contents=contents,
)
if isinstance(existing_data, dict) and isinstance(merged_data, dict):
ret["changes"]["diff"] = salt.utils.dictdiffer.recursive_diff(
existing_data, merged_data
).diffs
return ret
def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode="0600"):

View file

@ -567,8 +567,6 @@ def latest(
directories. The example also sets up the ``ssh_known_hosts`` ssh key
required to perform the git checkout.
Also, it has been reported that the SCP-like syntax for
.. code-block:: yaml
gitlab.example.com:

View file

@ -446,8 +446,18 @@ def managed(name, ppa=None, copr=None, aptkey=True, **kwargs):
# out of the state itself and into a module that it makes more sense
# to use. Most package providers will simply return the data provided
# it doesn't require any "specialized" data massaging.
if "pkg.expand_repo_def" in __salt__:
sanitizedkwargs = __salt__["pkg.expand_repo_def"](repo=repo, **kwargs)
if __grains__.get("os_family") == "Debian":
from salt.modules.aptpkg import _expand_repo_def
os_name = __grains__["os"]
lsb_distrib_codename = __grains__["lsb_distrib_codename"]
sanitizedkwargs = _expand_repo_def(
os_name=os_name,
lsb_distrib_codename=lsb_distrib_codename,
repo=repo,
**kwargs
)
else:
sanitizedkwargs = kwargs

View file

@ -296,6 +296,7 @@ def defined(
stop_on_reboot=False,
live=True,
host_devices=None,
autostart=False,
):
"""
Starts an existing guest, or defines and starts a new VM with specified arguments.
@ -598,6 +599,10 @@ def defined(
.. versionadded:: 3003
:param autostart:
If set to ``True`` the host will start the guest after boot.
(Default: ``False``)
.. rubric:: Example States
Make sure a virtual machine called ``domain_name`` is defined:
@ -667,6 +672,7 @@ def defined(
clock=clock,
stop_on_reboot=stop_on_reboot,
host_devices=host_devices,
autostart=autostart,
)
ret["changes"][name] = status
if not status.get("definition"):
@ -749,6 +755,7 @@ def running(
consoles=None,
stop_on_reboot=False,
host_devices=None,
autostart=False,
):
"""
Starts an existing guest, or defines and starts a new VM with specified arguments.
@ -952,6 +959,10 @@ def running(
.. versionadded:: 3003
:param autostart:
If set to ``True`` the host will start the guest after boot.
(Default: ``False``)
.. rubric:: Example States
Make sure an already-defined virtual machine called ``domain_name`` is running:
@ -1023,6 +1034,7 @@ def running(
serials=serials,
consoles=consoles,
host_devices=host_devices,
autostart=autostart,
)
result = True if not __opts__["test"] else None

View file

@ -6,6 +6,7 @@ Functions for working with files
import codecs
import contextlib
import errno
import io
import logging
import os
import re
@ -382,6 +383,13 @@ def fopen(*args, **kwargs):
if not binary and not kwargs.get("newline", None):
kwargs["newline"] = ""
# Workaround callers with bad buffering setting for binary files
if kwargs.get("buffering") == 1 and "b" in kwargs.get("mode", ""):
log.debug(
"Line buffering (buffering=1) isn't supported in binary mode, the default buffer size will be used"
)
kwargs["buffering"] = io.DEFAULT_BUFFER_SIZE
f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage
if is_fcntl_available():

View file

@ -2052,6 +2052,12 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
)
ret = ret[next(iter(ret))]
assert ret["result"], ret
assert "changes" in ret
assert "diff" in ret["changes"]
assert "foo" in ret["changes"]["diff"]
assert "abc" in ret["changes"]["diff"]["foo"]
assert "new" in ret["changes"]["diff"]["foo"]["abc"]
assert ret["changes"]["diff"]["foo"]["abc"]["new"], 123
with salt.utils.files.fopen(name) as fp_:
serialized_data = salt.serializers.configparser.deserialize(fp_)
@ -2098,6 +2104,12 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
)
ret = ret[next(iter(ret))]
assert ret["result"], ret
assert "changes" in ret
assert "diff" in ret["changes"]
assert "foo" in ret["changes"]["diff"]
assert "abc" in ret["changes"]["diff"]["foo"]
assert "new" in ret["changes"]["diff"]["foo"]["abc"]
assert ret["changes"]["diff"]["foo"]["abc"]["new"], 123
with salt.utils.files.fopen(name, "rb") as fp_:
serialized_data = salt.serializers.plist.deserialize(fp_)
@ -2134,6 +2146,12 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
)
ret = ret[next(iter(ret))]
assert ret["result"], ret
assert "changes" in ret
assert "diff" in ret["changes"]
assert "foo" in ret["changes"]["diff"]
assert "abc" in ret["changes"]["diff"]["foo"]
assert "new" in ret["changes"]["diff"]["foo"]["abc"]
assert ret["changes"]["diff"]["foo"]["abc"]["new"], 123
with salt.utils.files.fopen(name, "rb") as fp_:
serialized_data = salt.serializers.plist.deserialize(fp_)

View file

@ -1,3 +1,4 @@
import os
import pathlib
import shutil
@ -207,12 +208,19 @@ def test_del_repo(revert_repo_file):
assert "Repo {} doesn't exist".format(test_repo) in exc.value.message
def test_expand_repo_def():
@pytest.mark.skipif(
not os.path.isfile("/etc/apt/sources.list"), reason="Missing /etc/apt/sources.list"
)
def test__expand_repo_def(grains):
"""
Test aptpkg.expand_repo_def when the repo exists.
Test aptpkg._expand_repo_def when the repo exists.
"""
test_repo, comps = get_current_repo()
ret = aptpkg.expand_repo_def(repo=test_repo)
ret = aptpkg._expand_repo_def(
os_name=grains["os"],
lsb_distrib_codename=grains.get("lsb_distrib_codename"),
repo=test_repo,
)
for key in [
"comps",
"dist",

View file

@ -0,0 +1,134 @@
import textwrap
import pytest
from saltfactories.utils.functional import StateResult
pytestmark = [
pytest.mark.slow_test,
]
@pytest.fixture(scope="module")
def reset_pillar(salt_call_cli):
try:
# Run tests
yield
finally:
# Refresh pillar once all tests are done.
ret = salt_call_cli.run("saltutil.refresh_pillar", wait=True)
assert ret.exitcode == 0
assert ret.json is True
@pytest.fixture
def testfile_path(tmp_path, base_env_state_tree_root_dir):
testfile = tmp_path / "testfile"
sls_contents = textwrap.dedent(
"""
{}:
file:
- managed
- source: salt://testfile
- makedirs: true
- mode: 644
""".format(
testfile
)
)
with pytest.helpers.temp_file(
"sls-id-test.sls", sls_contents, base_env_state_tree_root_dir
):
yield testfile
@pytest.mark.usefixtures("testfile_path", "reset_pillar")
def test_state_apply_aborts_on_pillar_error(
salt_cli,
salt_minion,
base_env_pillar_tree_root_dir,
):
"""
Test state.apply with error in pillar.
"""
pillar_top_file = textwrap.dedent(
"""
base:
'{}':
- basic
""".format(
salt_minion.id
)
)
basic_pillar_file = textwrap.dedent(
"""
syntax_error
"""
)
with pytest.helpers.temp_file(
"top.sls", pillar_top_file, base_env_pillar_tree_root_dir
), pytest.helpers.temp_file(
"basic.sls", basic_pillar_file, base_env_pillar_tree_root_dir
):
expected_comment = [
"Pillar failed to render with the following messages:",
"SLS 'basic' does not render to a dictionary",
]
shell_result = salt_cli.run(
"state.apply", "sls-id-test", minion_tgt=salt_minion.id
)
assert shell_result.exitcode == 1
assert shell_result.json == expected_comment
@pytest.mark.usefixtures("testfile_path", "reset_pillar")
def test_state_apply_continues_after_pillar_error_is_fixed(
salt_cli,
salt_minion,
base_env_pillar_tree_root_dir,
):
"""
Test state.apply with error in pillar.
"""
pillar_top_file = textwrap.dedent(
"""
base:
'{}':
- basic
"""
).format(salt_minion.id)
basic_pillar_file_error = textwrap.dedent(
"""
syntax_error
"""
)
basic_pillar_file = textwrap.dedent(
"""
syntax_error: Fixed!
"""
)
# save pillar render error in minion's in-memory pillar
with pytest.helpers.temp_file(
"top.sls", pillar_top_file, base_env_pillar_tree_root_dir
), pytest.helpers.temp_file(
"basic.sls", basic_pillar_file_error, base_env_pillar_tree_root_dir
):
shell_result = salt_cli.run(
"saltutil.refresh_pillar", minion_tgt=salt_minion.id
)
assert shell_result.exitcode == 0
# run state.apply with fixed pillar render error
with pytest.helpers.temp_file(
"top.sls", pillar_top_file, base_env_pillar_tree_root_dir
), pytest.helpers.temp_file(
"basic.sls", basic_pillar_file, base_env_pillar_tree_root_dir
):
shell_result = salt_cli.run(
"state.apply", "sls-id-test", minion_tgt=salt_minion.id
)
assert shell_result.exitcode == 0
state_result = StateResult(shell_result.json)
assert state_result.result is True
assert state_result.changes == {"diff": "New file", "mode": "0644"}

View file

@ -7,13 +7,19 @@ import pytest
import salt.modules.file as filemod
import salt.utils.files
import salt.utils.platform
from tests.support.mock import Mock, patch
log = logging.getLogger(__name__)
@pytest.fixture
def configure_loader_modules():
return {filemod: {"__context__": {}}}
return {
filemod: {
"__context__": {},
"__opts__": {"test": False},
}
}
@pytest.fixture
@ -143,3 +149,63 @@ def test_check_managed_changes_follow_symlinks(a_link, tfile):
follow_symlinks=True,
)
assert ret == {}
@pytest.mark.skip_on_windows(reason="os.symlink is not available on Windows")
@patch("os.path.exists", Mock(return_value=True))
def test_check_perms_user_group_name_and_id():
filename = "/path/to/fnord"
tests = [
# user/group changes needed by name
{
"input": {"user": "cuser", "group": "cgroup"},
"expected": {"user": "cuser", "group": "cgroup"},
},
# no changes needed by name
{"input": {"user": "luser", "group": "lgroup"}, "expected": {}},
# user/group changes needed by id
{
"input": {"user": 1001, "group": 2001},
"expected": {"user": 1001, "group": 2001},
},
# no user/group changes needed by id
{"input": {"user": 3001, "group": 4001}, "expected": {}},
]
for test in tests:
# Consistent initial file stats
stat_out = {
"user": "luser",
"group": "lgroup",
"uid": 3001,
"gid": 4001,
"mode": "123",
}
patch_stats = patch(
"salt.modules.file.stats",
Mock(return_value=stat_out),
)
# "chown" the file to the permissions we want in test["input"]
# pylint: disable=W0640
def fake_chown(cmd, *args, **kwargs):
for k, v in test["input"].items():
stat_out.update({k: v})
patch_chown = patch(
"salt.modules.file.chown",
Mock(side_effect=fake_chown),
)
with patch_stats, patch_chown:
ret, pre_post = filemod.check_perms(
name=filename,
ret={},
user=test["input"]["user"],
group=test["input"]["group"],
mode="123",
follow_symlinks=False,
)
assert ret["changes"] == test["expected"]

View file

@ -1,5 +1,6 @@
import logging
import os
import re
import shutil
import textwrap
@ -361,6 +362,27 @@ def test_group_to_gid_int():
assert ret == group
def test__get_flags():
"""
Test to ensure _get_flags returns a regex flag
"""
flags = 10
ret = filemod._get_flags(flags)
assert ret == re.IGNORECASE | re.MULTILINE
flags = "MULTILINE"
ret = filemod._get_flags(flags)
assert ret == re.MULTILINE
flags = ["IGNORECASE", "MULTILINE"]
ret = filemod._get_flags(flags)
assert ret == re.IGNORECASE | re.MULTILINE
flags = re.IGNORECASE | re.MULTILINE
ret = filemod._get_flags(flags)
assert ret == re.IGNORECASE | re.MULTILINE
def test_patch():
with patch("os.path.isdir", return_value=False) as mock_isdir, patch(
"salt.utils.path.which", return_value="/bin/patch"

View file

@ -112,7 +112,7 @@ def test_file_check_perms(tfile3):
"name": tfile3,
"result": True,
},
{"luser": "root", "lmode": "0644", "lgroup": "root"},
{"cmode": "0664", "luser": "root", "lmode": "0644", "lgroup": "root"},
)
# Disable lsattr calls

View file

@ -5,11 +5,13 @@
import datetime
import logging
import os
from collections import namedtuple
import pytest
import salt.config
import salt.loader
import salt.loader.context
import salt.modules.config as config
import salt.modules.state as state
import salt.state
@ -1201,83 +1203,36 @@ def test_lock_saltenv():
)
def test_get_pillar_errors_CC():
"""
Test _get_pillar_errors function.
CC: External clean, Internal clean
:return:
"""
for int_pillar, ext_pillar in [
({"foo": "bar"}, {"fred": "baz"}),
({"foo": "bar"}, None),
({}, {"fred": "baz"}),
]:
with patch("salt.modules.state.__pillar__", int_pillar):
for opts, res in [
({"force": True}, None),
({"force": False}, None),
({}, None),
]:
assert res == state._get_pillar_errors(kwargs=opts, pillar=ext_pillar)
PillarPair = namedtuple("PillarPair", ["in_memory", "fresh"])
pillar_combinations = [
(PillarPair({"foo": "bar"}, {"fred": "baz"}), None),
(PillarPair({"foo": "bar"}, {"fred": "baz", "_errors": ["Failure"]}), ["Failure"]),
(PillarPair({"foo": "bar"}, None), None),
(PillarPair({"foo": "bar", "_errors": ["Failure"]}, None), ["Failure"]),
(PillarPair({"foo": "bar", "_errors": ["Failure"]}, {"fred": "baz"}), None),
]
def test_get_pillar_errors_EC():
@pytest.mark.parametrize("pillar,expected_errors", pillar_combinations)
def test_get_pillar_errors(pillar: PillarPair, expected_errors):
"""
Test _get_pillar_errors function.
EC: External erroneous, Internal clean
:return:
"""
errors = ["failure", "everywhere"]
for int_pillar, ext_pillar in [
({"foo": "bar"}, {"fred": "baz", "_errors": errors}),
({}, {"fred": "baz", "_errors": errors}),
]:
with patch("salt.modules.state.__pillar__", int_pillar):
for opts, res in [
({"force": True}, None),
({"force": False}, errors),
({}, errors),
]:
assert res == state._get_pillar_errors(kwargs=opts, pillar=ext_pillar)
test _get_pillar_errors function
def test_get_pillar_errors_EE():
There are three cases to consider:
1. kwargs['force'] is True -> None, no matter what's in pillar/__pillar__
2. pillar kwarg is available -> only check pillar, no matter what's in __pillar__
3. pillar kwarg is not available -> check __pillar__
"""
Test _get_pillar_errors function.
CC: External erroneous, Internal erroneous
:return:
"""
errors = ["failure", "everywhere"]
for int_pillar, ext_pillar in [
({"foo": "bar", "_errors": errors}, {"fred": "baz", "_errors": errors})
]:
with patch("salt.modules.state.__pillar__", int_pillar):
for opts, res in [
({"force": True}, None),
({"force": False}, errors),
({}, errors),
]:
assert res == state._get_pillar_errors(kwargs=opts, pillar=ext_pillar)
def test_get_pillar_errors_CE():
"""
Test _get_pillar_errors function.
CC: External clean, Internal erroneous
:return:
"""
errors = ["failure", "everywhere"]
for int_pillar, ext_pillar in [
({"foo": "bar", "_errors": errors}, {"fred": "baz"}),
({"foo": "bar", "_errors": errors}, None),
]:
with patch("salt.modules.state.__pillar__", int_pillar):
for opts, res in [
({"force": True}, None),
({"force": False}, errors),
({}, errors),
]:
assert res == state._get_pillar_errors(kwargs=opts, pillar=ext_pillar)
ctx = salt.loader.context.LoaderContext()
named_ctx = ctx.named_context("__pillar__", pillar.in_memory)
with patch("salt.modules.state.__pillar__", named_ctx, create=True):
assert (
state._get_pillar_errors(kwargs={"force": True}, pillar=pillar.fresh)
is None
)
assert (
state._get_pillar_errors(kwargs={}, pillar=pillar.fresh) == expected_errors
)
def test_event():

View file

@ -962,15 +962,17 @@ def test_list_repos():
assert repos[source_uri][0]["uri"][-1] == "/"
def test_expand_repo_def():
def test__expand_repo_def():
"""
Checks results from expand_repo_def
Checks results from _expand_repo_def
"""
source_file = "/etc/apt/sources.list"
# Valid source
repo = "deb http://cdn-aws.deb.debian.org/debian/ stretch main\n"
sanitized = aptpkg.expand_repo_def(repo=repo, file=source_file)
sanitized = aptpkg._expand_repo_def(
os_name="debian", lsb_distrib_codename="stretch", repo=repo, file=source_file
)
assert isinstance(sanitized, dict)
assert "uri" in sanitized
@ -980,8 +982,51 @@ def test_expand_repo_def():
# Pass the architecture and make sure it is added the the line attribute
repo = "deb http://cdn-aws.deb.debian.org/debian/ stretch main\n"
sanitized = aptpkg.expand_repo_def(
repo=repo, file=source_file, architectures="amd64"
sanitized = aptpkg._expand_repo_def(
os_name="debian",
lsb_distrib_codename="stretch",
repo=repo,
file=source_file,
architectures="amd64",
)
# Make sure line is in the dict
assert isinstance(sanitized, dict)
assert "line" in sanitized
# Make sure the architecture is in line
assert (
sanitized["line"]
== "deb [arch=amd64] http://cdn-aws.deb.debian.org/debian/ stretch main"
)
def test__expand_repo_def_cdrom():
"""
Checks results from _expand_repo_def
"""
source_file = "/etc/apt/sources.list"
# Valid source
repo = "# deb cdrom:[Debian GNU/Linux 11.4.0 _Bullseye_ - Official amd64 NETINST 20220709-10:31]/ bullseye main\n"
sanitized = aptpkg._expand_repo_def(
os_name="debian", lsb_distrib_codename="bullseye", repo=repo, file=source_file
)
assert isinstance(sanitized, dict)
assert "uri" in sanitized
# Make sure last character in of the URI is still a /
assert sanitized["uri"][-1] == "/"
# Pass the architecture and make sure it is added the the line attribute
repo = "deb http://cdn-aws.deb.debian.org/debian/ stretch main\n"
sanitized = aptpkg._expand_repo_def(
os_name="debian",
lsb_distrib_codename="stretch",
repo=repo,
file=source_file,
architectures="amd64",
)
# Make sure line is in the dict

View file

@ -4,17 +4,27 @@
import os
import textwrap
import pytest
import salt.modules.pkg_resource as pkg_resource
import salt.modules.zypperpkg as zypper
from salt.exceptions import CommandExecutionError
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {zypper: {"rpm": None}, pkg_resource: {}}
return {
zypper: {
"rpm": None,
"_systemd_scope": MagicMock(return_value=False),
"osrelease_info": [15, 3],
"__salt__": {"pkg_resource.parse_targets": pkg_resource.parse_targets},
},
pkg_resource: {"__grains__": {"os": "SUSE"}},
}
def test_list_pkgs_no_context():
@ -212,3 +222,137 @@ def test_pkg_list_holds():
ret = zypper.list_holds()
assert len(ret) == 1
assert "bar-2:2.3.4-2.1.*" in ret
@pytest.mark.parametrize(
"package,pre_version,post_version,fromrepo_param,name_param,pkgs_param,diff_attr_param",
[
("vim", "1.1", "1.2", [], "", [], "all"),
("kernel-default", "1.1", "1.1,1.2", ["dummy", "dummy2"], "", [], None),
("vim", "1.1", "1.2", [], "vim", [], None),
],
)
@patch.object(zypper, "refresh_db", MagicMock(return_value=True))
def test_upgrade(
package,
pre_version,
post_version,
fromrepo_param,
name_param,
pkgs_param,
diff_attr_param,
):
with patch(
"salt.modules.zypperpkg.__zypper__.noraise.call"
) as zypper_mock, patch.object(
zypper,
"list_pkgs",
MagicMock(side_effect=[{package: pre_version}, {package: post_version}]),
) as list_pkgs_mock:
expected_call = ["update", "--auto-agree-with-licenses"]
for repo in fromrepo_param:
expected_call.extend(["--repo", repo])
if pkgs_param:
expected_call.extend(pkgs_param)
elif name_param:
expected_call.append(name_param)
result = zypper.upgrade(
name=name_param,
pkgs=pkgs_param,
fromrepo=fromrepo_param,
diff_attr=diff_attr_param,
)
zypper_mock.assert_any_call(*expected_call)
assert result == {package: {"old": pre_version, "new": post_version}}
list_pkgs_mock.assert_any_call(root=None, attr=diff_attr_param)
@pytest.mark.parametrize(
"package,pre_version,post_version,fromrepo_param",
[
("vim", "1.1", "1.2", []),
("emacs", "1.1", "1.2", ["Dummy", "Dummy2"]),
],
)
@patch.object(zypper, "refresh_db", MagicMock(return_value=True))
def test_dist_upgrade(package, pre_version, post_version, fromrepo_param):
with patch(
"salt.modules.zypperpkg.__zypper__.noraise.call"
) as zypper_mock, patch.object(
zypper,
"list_pkgs",
MagicMock(side_effect=[{package: pre_version}, {package: post_version}]),
):
expected_call = ["dist-upgrade", "--auto-agree-with-licenses"]
for repo in fromrepo_param:
expected_call.extend(["--from", repo])
result = zypper.upgrade(dist_upgrade=True, fromrepo=fromrepo_param)
zypper_mock.assert_any_call(*expected_call)
assert result == {package: {"old": pre_version, "new": post_version}}
@pytest.mark.parametrize(
"package,pre_version,post_version,fromrepo_param",
[
("vim", "1.1", "1.1", []),
("emacs", "1.1", "1.1", ["Dummy", "Dummy2"]),
],
)
@patch.object(zypper, "refresh_db", MagicMock(return_value=True))
def test_dist_upgrade_dry_run(package, pre_version, post_version, fromrepo_param):
with patch(
"salt.modules.zypperpkg.__zypper__.noraise.call"
) as zypper_mock, patch.object(
zypper,
"list_pkgs",
MagicMock(side_effect=[{package: pre_version}, {package: post_version}]),
):
expected_call = ["dist-upgrade", "--auto-agree-with-licenses", "--dry-run"]
for repo in fromrepo_param:
expected_call.extend(["--from", repo])
zypper.upgrade(dist_upgrade=True, dryrun=True, fromrepo=fromrepo_param)
zypper_mock.assert_any_call(*expected_call)
# dryrun=True causes two calls, one with a trailing --debug-solver flag
expected_call.append("--debug-solver")
zypper_mock.assert_any_call(*expected_call)
@patch.object(zypper, "refresh_db", MagicMock(return_value=True))
def test_dist_upgrade_failure():
zypper_output = textwrap.dedent(
"""\
Loading repository data...
Reading installed packages...
Computing distribution upgrade...
Use 'zypper repos' to get the list of defined repositories.
Repository 'DUMMY' not found by its alias, number, or URI.
"""
)
call_spy = MagicMock()
zypper_mock = MagicMock()
zypper_mock.stdout = zypper_output
zypper_mock.stderr = ""
zypper_mock.exit_code = 3
zypper_mock.noraise.call = call_spy
with patch("salt.modules.zypperpkg.__zypper__", zypper_mock), patch.object(
zypper, "list_pkgs", MagicMock(side_effect=[{"vim": 1.1}, {"vim": 1.1}])
):
expected_call = [
"dist-upgrade",
"--auto-agree-with-licenses",
"--from",
"Dummy",
]
with pytest.raises(CommandExecutionError) as exc:
zypper.upgrade(dist_upgrade=True, fromrepo=["Dummy"])
call_spy.assert_called_with(*expected_call)
assert exc.exception.info["changes"] == {}
assert exc.exception.info["result"]["stdout"] == zypper_output

View file

@ -1271,6 +1271,15 @@ def test_update_cpu_simple(make_mock_vm):
assert domain_mock.setVcpusFlags.call_args[0][0] == 2
def test_update_autostart(make_mock_vm):
"""
Test virt.update(), simple autostart update
"""
domain_mock = make_mock_vm()
virt.update("my_vm", autostart=True)
domain_mock.setAutostart.assert_called_with(1)
def test_update_add_cpu_topology(make_mock_vm):
"""
Test virt.update(), add cpu topology settings

View file

@ -0,0 +1,83 @@
"""test for pillar stack pillar"""
import pytest
import salt.pillar.stack as stack
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
loader_globals = {
"__grains__": {"os": "Debian", "os_family": "Debian"},
"__opts__": {"saltenv": "dev", "pillarenv": "dev"},
}
return {stack: loader_globals}
def mock_stack_pillar(mock_output, *args, **kwargs):
# mock: jenv.get_template(filename).render(stack=stack)
class MockJinja:
def __call__(self, *args, **kwargs):
return self
render = MagicMock(side_effect=mock_output)
with patch("os.path.isfile", MagicMock(return_value=True)), patch(
"jinja2.environment.Environment.get_template", MockJinja()
), patch("glob.glob", MagicMock(return_value=["/path/to/stack.cfg"])):
result = stack.ext_pillar( # (minion_id, pillar, *args, **kwargs)
"minion_id", {}, *args, **kwargs
)
return result
def test_extpillar_stack1():
mock_output = [
"/path/to/filename.yml\n", # mocked contents of /path/to/stack.cfg
"""
foo: foo1 # jinja test
bar: bar1
""", # mocked contents of filename.yml
]
fake_dict = {"foo": "foo1", "bar": "bar1"}
# config with a single file
result = mock_stack_pillar(mock_output, "/path/to/stack.cfg")
assert fake_dict == result
# config with a opts:saltenv
result = mock_stack_pillar(
mock_output,
**{
"opts:saltenv": { # **kwargs
"dev": "/path/to/dev/static.cfg",
}
}
)
assert fake_dict == result
# config with a opts:saltenv and __env__ substitution
result = mock_stack_pillar(
mock_output,
**{
"opts:saltenv": { # **kwargs
"__env__": "/path/to/__env__/dynamic.cfg",
}
}
)
assert fake_dict == result
def test_extpillar_stack_exceptions():
# yaml indentation error
mock_output = [
"/path/to/filename.yml\n", # mocked contents of /path/to/stack.cfg
"""
foo: foo1
bar: bar1 # yaml indentation error
""", # mocked contents of filename.yml
]
pytest.raises(Exception, mock_stack_pillar, mock_output, "/path/to/stack.cfg")

View file

@ -0,0 +1,49 @@
"""
unit tests for clustershell roster
"""
import pytest
from tests.support.mock import MagicMock, patch
try:
from ClusterShell.NodeSet import NodeSet # pylint: disable=unused-import
HAS_CLUSTERSHELL = True
except (ImportError, OSError) as e:
HAS_CLUSTERSHELL = False
pytestmark = [
pytest.mark.skipif(
HAS_CLUSTERSHELL is False,
reason="Install Python Clustershell bindings before running these tests.",
)
]
def test_targets():
mock_socket = MagicMock()
mock_nodeset = MagicMock()
mock_nodeset.NodeSet.return_value = ["foo"]
with patch.dict(
"sys.modules", **{"socket": mock_socket, "ClusterShell.NodeSet": mock_nodeset}
):
import salt.roster.clustershell
salt.roster.clustershell.__opts__ = {}
with patch.dict(
salt.roster.clustershell.__opts__,
{"ssh_scan_ports": [1, 2, 3], "ssh_scan_timeout": 30},
):
# Reimports are necessary to re-init the namespace.
# pylint: disable=unused-import
import socket
from ClusterShell.NodeSet import NodeSet
# pylint: enable=unused-import
ret = salt.roster.clustershell.targets("foo")
mock_socket.gethostbyname.assert_any_call("foo")
assert "foo" in ret
assert ret["foo"]["port"] == 3

View file

@ -0,0 +1,94 @@
"""
Test the scan roster.
"""
import socket
import pytest
import salt.roster.scan as scan_
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {scan_: {"__opts__": {"ssh_scan_ports": "22", "ssh_scan_timeout": 0.01}}}
def test_single_ip():
"""Test that minion files in the directory roster match and render."""
with patch("salt.utils.network.get_socket"):
ret = scan_.targets("127.0.0.1")
assert ret == {"127.0.0.1": {"host": "127.0.0.1", "port": 22}}
def test_single_network():
"""Test that minion files in the directory roster match and render."""
with patch("salt.utils.network.get_socket"):
ret = scan_.targets("127.0.0.0/30")
assert ret == {
"127.0.0.1": {"host": "127.0.0.1", "port": 22},
"127.0.0.2": {"host": "127.0.0.2", "port": 22},
}
def test_multiple_ips():
"""Test that minion files in the directory roster match and render."""
with patch("salt.utils.network.get_socket"):
ret = scan_.targets(["127.0.0.1", "127.0.0.2"], tgt_type="list")
assert ret == {
"127.0.0.1": {"host": "127.0.0.1", "port": 22},
"127.0.0.2": {"host": "127.0.0.2", "port": 22},
}
def test_multiple_networks():
"""Test that minion files in the directory roster match and render."""
with patch("salt.utils.network.get_socket"):
ret = scan_.targets(
["127.0.0.0/30", "127.0.2.1", "127.0.1.0/30"], tgt_type="list"
)
assert ret == {
"127.0.0.1": {"host": "127.0.0.1", "port": 22},
"127.0.0.2": {"host": "127.0.0.2", "port": 22},
"127.0.2.1": {"host": "127.0.2.1", "port": 22},
"127.0.1.1": {"host": "127.0.1.1", "port": 22},
"127.0.1.2": {"host": "127.0.1.2", "port": 22},
}
def test_malformed_ip():
"""Test that minion files in the directory roster match and render."""
with patch("salt.utils.network.get_socket"):
ret = scan_.targets("127001")
assert ret == {}
def test_multiple_with_malformed():
"""Test that minion files in the directory roster match and render."""
with patch("salt.utils.network.get_socket"):
ret = scan_.targets(["127.0.0.1", "127002", "127.0.1.0/30"], tgt_type="list")
assert ret == {
"127.0.0.1": {"host": "127.0.0.1", "port": 22},
"127.0.1.1": {"host": "127.0.1.1", "port": 22},
"127.0.1.2": {"host": "127.0.1.2", "port": 22},
}
def test_multiple_no_connection():
"""Test that minion files in the directory roster match and render."""
socket_mock = MagicMock()
socket_mock.connect = MagicMock(
side_effect=[None, socket.error(), None, socket.error(), None]
)
with patch("salt.utils.network.get_socket", return_value=socket_mock):
ret = scan_.targets(
["127.0.0.0/30", "127.0.2.1", "127.0.1.0/30"], tgt_type="list"
)
assert ret == {
"127.0.0.1": {"host": "127.0.0.1", "port": 22},
"127.0.0.2": {},
"127.0.2.1": {"host": "127.0.2.1", "port": 22},
"127.0.1.1": {},
"127.0.1.2": {"host": "127.0.1.2", "port": 22},
}

View file

@ -0,0 +1,103 @@
import collections
import textwrap
import pytest
import salt.roster.sshconfig as sshconfig
from tests.support.mock import mock_open, patch
@pytest.fixture
def target_abc():
return collections.OrderedDict(
[
("user", "user.mcuserface"),
("priv", "~/.ssh/id_rsa_abc"),
("host", "abc.asdfgfdhgjkl.com"),
]
)
@pytest.fixture
def target_abc123():
return collections.OrderedDict(
[
("user", "user.mcuserface"),
("priv", "~/.ssh/id_rsa_abc"),
("host", "abc123.asdfgfdhgjkl.com"),
]
)
@pytest.fixture
def target_def():
return collections.OrderedDict(
[
("user", "user.mcuserface"),
("priv", "~/.ssh/id_rsa_def"),
("host", "def.asdfgfdhgjkl.com"),
]
)
@pytest.fixture
def all_(target_abc, target_abc123, target_def):
return {
"abc.asdfgfdhgjkl.com": target_abc,
"abc123.asdfgfdhgjkl.com": target_abc123,
"def.asdfgfdhgjkl.com": target_def,
}
@pytest.fixture
def abc_glob(target_abc, target_abc123):
return {
"abc.asdfgfdhgjkl.com": target_abc,
"abc123.asdfgfdhgjkl.com": target_abc123,
}
@pytest.fixture
def mock_fp():
sample_ssh_config = textwrap.dedent(
"""
Host *
User user.mcuserface
Host abc*
IdentityFile ~/.ssh/id_rsa_abc
Host def*
IdentityFile ~/.ssh/id_rsa_def
Host abc.asdfgfdhgjkl.com
HostName 123.123.123.123
Host abc123.asdfgfdhgjkl.com
HostName 123.123.123.124
Host def.asdfgfdhgjkl.com
HostName 234.234.234.234
"""
)
return mock_open(read_data=sample_ssh_config)
@pytest.fixture
def configure_loader_modules():
return {sshconfig: {}}
def test_all(mock_fp, all_):
with patch("salt.utils.files.fopen", mock_fp):
with patch("salt.roster.sshconfig._get_ssh_config_file"):
targets = sshconfig.targets("*")
assert targets == all_
def test_abc_glob(mock_fp, abc_glob):
with patch("salt.utils.files.fopen", mock_fp):
with patch("salt.roster.sshconfig._get_ssh_config_file"):
targets = sshconfig.targets("abc*")
assert targets == abc_glob

View file

@ -0,0 +1,105 @@
"""
unittests for terraform roster
"""
import pathlib
import pytest
from salt.roster import terraform
from salt.utils import roster_matcher
@pytest.fixture
def roster_file():
return pathlib.Path(__file__).parent / "terraform.data" / "terraform.tfstate"
@pytest.fixture
def pki_dir():
return pathlib.Path(__file__).parent / "terraform.data"
@pytest.fixture
def configure_loader_modules(roster_file, pki_dir):
# opts = salt.config.master_config(
# os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "master")
# )
# utils = salt.loader.utils(opts, whitelist=["roster_matcher"])
return {
terraform: {
"__utils__": {
"roster_matcher.targets": roster_matcher.targets,
},
"__opts__": {
"roster_file": str(roster_file),
"pki_dir": str(pki_dir),
},
},
roster_matcher: {},
}
def test_default_output(pki_dir):
"""
Test the output of a fixture tfstate file which contains libvirt
resources.
"""
expected_result = {
"db0": {
"host": "192.168.122.174",
"user": "root",
"passwd": "dbpw",
"tty": True,
"priv": str(pki_dir / "ssh" / "salt-ssh.rsa"),
},
"db1": {
"host": "192.168.122.190",
"user": "root",
"passwd": "dbpw",
"tty": True,
"priv": str(pki_dir / "ssh" / "salt-ssh.rsa"),
},
"web0": {
"host": "192.168.122.106",
"user": "root",
"passwd": "linux",
"timeout": 22,
"priv": str(pki_dir / "ssh" / "salt-ssh.rsa"),
},
"web1": {
"host": "192.168.122.235",
"user": "root",
"passwd": "linux",
"timeout": 22,
"priv": str(pki_dir / "ssh" / "salt-ssh.rsa"),
},
}
ret = terraform.targets("*")
assert expected_result == ret
def test_default_matching(pki_dir):
"""
Test the output of a fixture tfstate file which contains libvirt
resources using matching
"""
expected_result = {
"web0": {
"host": "192.168.122.106",
"user": "root",
"passwd": "linux",
"timeout": 22,
"priv": str(pki_dir / "ssh" / "salt-ssh.rsa"),
},
"web1": {
"host": "192.168.122.235",
"user": "root",
"passwd": "linux",
"timeout": 22,
"priv": str(pki_dir / "ssh" / "salt-ssh.rsa"),
},
}
ret = terraform.targets("*web*")
assert expected_result == ret

View file

@ -0,0 +1,40 @@
import pytest
from salt.runners import state as state_runner
from tests.support.mock import Mock, patch
@pytest.fixture
def configure_loader_modules():
return {state_runner: {"__opts__": {}, "__jid_event__": Mock()}}
def test_orchestrate_single_passes_pillar():
"""
test state.orchestrate_single passes given pillar to state.single
"""
mock_master_minion = Mock()
mock_state_single = Mock()
mock_master_minion.functions = {"state.single": mock_state_single}
mock_master_minion.opts = {"id": "dummy"}
test_pillar = {"test_entry": "exists"}
with patch("salt.minion.MasterMinion", Mock(return_value=mock_master_minion)):
state_runner.orchestrate_single(
fun="pillar.get", name="test_entry", pillar=test_pillar
)
assert mock_state_single.call_args.kwargs["pillar"] == test_pillar
def test_orchestrate_single_does_not_pass_none_pillar():
"""
test state.orchestrate_single does not pass pillar=None to state.single
"""
mock_master_minion = Mock()
mock_state_single = Mock()
mock_master_minion.functions = {"state.single": mock_state_single}
mock_master_minion.opts = {"id": "dummy"}
with patch("salt.minion.MasterMinion", Mock(return_value=mock_master_minion)):
state_runner.orchestrate_single(
fun="pillar.get", name="test_entry", pillar=None
)
assert "pillar" not in mock_state_single.call_args.kwargs

View file

@ -69,6 +69,7 @@ def domain_update_call(
live=True,
host_devices=None,
test=False,
autostart=False,
):
"""
Create a call object with the missing default parameters from virt.update()
@ -96,4 +97,5 @@ def domain_update_call(
clock=clock,
stop_on_reboot=stop_on_reboot,
host_devices=host_devices,
autostart=autostart,
)

View file

@ -2,10 +2,9 @@ import pytest
import salt.states.virt as virt
from salt.exceptions import CommandExecutionError
from tests.pytests.unit.states.virt.helpers import domain_update_call
from tests.support.mock import MagicMock, patch
from .helpers import domain_update_call
@pytest.fixture
def configure_loader_modules(libvirt_mock):
@ -208,30 +207,9 @@ def test_defined_update_error(test):
"comment": "Domain myvm updated with live update(s) failures",
}
init_mock.assert_not_called()
update_mock.assert_called_with(
"myvm",
cpu=2,
boot_dev="cdrom hd",
mem=None,
disk_profile=None,
disks=None,
nic_profile=None,
interfaces=None,
graphics=None,
live=True,
connection=None,
username=None,
password=None,
boot=None,
numatune=None,
test=test,
hypervisor_features=None,
clock=None,
serials=None,
consoles=None,
stop_on_reboot=False,
host_devices=None,
)
assert update_mock.call_args_list == [
domain_update_call("myvm", cpu=2, test=test, boot_dev="cdrom hd")
]
def test_defined_update_definition_error(test):
@ -509,30 +487,7 @@ def test_running_update_error():
"result": True,
"comment": "Domain myvm updated with live update(s) failures",
}
update_mock.assert_called_with(
"myvm",
cpu=2,
mem=None,
disk_profile=None,
disks=None,
nic_profile=None,
interfaces=None,
graphics=None,
live=True,
connection=None,
username=None,
password=None,
boot=None,
numatune=None,
test=False,
boot_dev=None,
hypervisor_features=None,
clock=None,
serials=None,
consoles=None,
stop_on_reboot=False,
host_devices=None,
)
assert update_mock.call_args_list == [domain_update_call("myvm", cpu=2)]
@pytest.mark.parametrize("running", ["running", "shutdown"])

View file

@ -4,12 +4,13 @@ Unit Tests for functions located in salt/utils/files.py
import copy
import io
import os
import pytest
import salt.utils.files
from tests.support.mock import patch
from tests.support.mock import MagicMock, patch
def test_safe_rm():
@ -75,6 +76,16 @@ def test_fopen_with_disallowed_fds():
)
def test_fopen_binary_line_buffering(tmp_path):
tmp_file = os.path.join(tmp_path, "foobar")
with patch("builtins.open") as open_mock, patch(
"salt.utils.files.is_fcntl_available", MagicMock(return_value=False)
):
salt.utils.files.fopen(os.path.join(tmp_path, "foobar"), mode="b", buffering=1)
assert open_mock.called
assert open_mock.call_args[1]["buffering"] == io.DEFAULT_BUFFER_SIZE
def _create_temp_structure(temp_directory, structure):
for folder, files in structure.items():
current_directory = os.path.join(temp_directory, folder)

View file

@ -624,144 +624,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
{"vim": "7.4.326-2.62", "fakepkg": ""},
)
def test_upgrade_success(self):
"""
Test system upgrade and dist-upgrade success.
:return:
"""
with patch.dict(zypper.__grains__, {"osrelease_info": [12, 1]}), patch(
"salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True)
), patch(
"salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False)
):
with patch(
"salt.modules.zypperpkg.__zypper__.noraise.call", MagicMock()
) as zypper_mock:
with patch(
"salt.modules.zypperpkg.list_pkgs",
MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}]),
):
ret = zypper.upgrade()
self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}})
zypper_mock.assert_any_call("update", "--auto-agree-with-licenses")
with patch(
"salt.modules.zypperpkg.list_pkgs",
MagicMock(
side_effect=[
{"kernel-default": "1.1"},
{"kernel-default": "1.1,1.2"},
]
),
):
ret = zypper.upgrade()
self.assertDictEqual(
ret, {"kernel-default": {"old": "1.1", "new": "1.1,1.2"}}
)
zypper_mock.assert_any_call("update", "--auto-agree-with-licenses")
with patch(
"salt.modules.zypperpkg.list_pkgs",
MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1,1.2"}]),
):
ret = zypper.upgrade()
self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.1,1.2"}})
zypper_mock.assert_any_call("update", "--auto-agree-with-licenses")
with patch(
"salt.modules.zypperpkg.list_pkgs",
MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}]),
):
ret = zypper.upgrade(dist_upgrade=True)
self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}})
zypper_mock.assert_any_call(
"dist-upgrade", "--auto-agree-with-licenses"
)
with patch(
"salt.modules.zypperpkg.list_pkgs",
MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
):
ret = zypper.upgrade(dist_upgrade=True, dryrun=True)
zypper_mock.assert_any_call(
"dist-upgrade", "--auto-agree-with-licenses", "--dry-run"
)
zypper_mock.assert_any_call(
"dist-upgrade",
"--auto-agree-with-licenses",
"--dry-run",
"--debug-solver",
)
with patch(
"salt.modules.zypperpkg.list_pkgs",
MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
):
ret = zypper.upgrade(
dist_upgrade=True,
dryrun=True,
fromrepo=["Dummy", "Dummy2"],
novendorchange=True,
)
zypper_mock.assert_any_call(
"dist-upgrade",
"--auto-agree-with-licenses",
"--dry-run",
"--from",
"Dummy",
"--from",
"Dummy2",
"--no-allow-vendor-change",
)
zypper_mock.assert_any_call(
"dist-upgrade",
"--auto-agree-with-licenses",
"--dry-run",
"--from",
"Dummy",
"--from",
"Dummy2",
"--no-allow-vendor-change",
"--debug-solver",
)
with patch(
"salt.modules.zypperpkg.list_pkgs",
MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
):
ret = zypper.upgrade(
dist_upgrade=False, fromrepo=["Dummy", "Dummy2"], dryrun=False
)
zypper_mock.assert_any_call(
"update",
"--auto-agree-with-licenses",
"--repo",
"Dummy",
"--repo",
"Dummy2",
)
with patch(
"salt.modules.zypperpkg.list_pkgs",
MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.2"}]),
):
ret = zypper.upgrade(
dist_upgrade=True,
fromrepo=["Dummy", "Dummy2"],
novendorchange=True,
)
self.assertDictEqual(ret, {"vim": {"old": "1.1", "new": "1.2"}})
zypper_mock.assert_any_call(
"dist-upgrade",
"--auto-agree-with-licenses",
"--from",
"Dummy",
"--from",
"Dummy2",
"--no-allow-vendor-change",
)
def test_upgrade_kernel(self):
"""
Test kernel package upgrade success.
@ -806,52 +668,6 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
},
)
def test_upgrade_failure(self):
"""
Test system upgrade failure.
:return:
"""
zypper_out = """
Loading repository data...
Reading installed packages...
Computing distribution upgrade...
Use 'zypper repos' to get the list of defined repositories.
Repository 'DUMMY' not found by its alias, number, or URI.
"""
class FailingZypperDummy:
def __init__(self):
self.stdout = zypper_out
self.stderr = ""
self.pid = 1234
self.exit_code = 555
self.noraise = MagicMock()
self.SUCCESS_EXIT_CODES = [0]
def __call__(self, *args, **kwargs):
return self
with patch.dict(zypper.__grains__, {"osrelease_info": [12, 1]}), patch(
"salt.modules.zypperpkg.__zypper__", FailingZypperDummy()
) as zypper_mock, patch(
"salt.modules.zypperpkg.refresh_db", MagicMock(return_value=True)
), patch(
"salt.modules.zypperpkg._systemd_scope", MagicMock(return_value=False)
):
zypper_mock.noraise.call = MagicMock()
with patch(
"salt.modules.zypperpkg.list_pkgs",
MagicMock(side_effect=[{"vim": "1.1"}, {"vim": "1.1"}]),
):
with self.assertRaises(CommandExecutionError) as cmd_exc:
ret = zypper.upgrade(dist_upgrade=True, fromrepo=["DUMMY"])
self.assertEqual(cmd_exc.exception.info["changes"], {})
self.assertEqual(cmd_exc.exception.info["result"]["stdout"], zypper_out)
zypper_mock.noraise.call.assert_called_with(
"dist-upgrade", "--auto-agree-with-licenses", "--from", "DUMMY"
)
def test_upgrade_available(self):
"""
Test whether or not an upgrade is available for a given package.

View file

@ -1,82 +0,0 @@
"""test for pillar csvpillar.py"""
import salt.pillar.stack as stack
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class StackPillarTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
loader_globals = {
"__grains__": {"os": "Debian", "os_family": "Debian"},
"__opts__": {"saltenv": "dev", "pillarenv": "dev"},
}
return {stack: loader_globals}
def mockStackPillar(self, mock_output, *args, **kwargs):
# mock: jenv.get_template(filename).render(stack=stack)
class mockJinja:
def __call__(self, *args, **kwargs):
return self
render = MagicMock(side_effect=mock_output)
with patch("os.path.isfile", MagicMock(return_value=True)), patch(
"jinja2.environment.Environment.get_template", mockJinja()
), patch("glob.glob", MagicMock(return_value=["/path/to/stack.cfg"])):
result = stack.ext_pillar( # (minion_id, pillar, *args, **kwargs)
"minion_id", {}, *args, **kwargs
)
return result
def test_extpillar_stack1(self):
mock_output = [
"/path/to/filename.yml\n", # mocked contents of /path/to/stack.cfg
"""
foo: foo1 # jinja test
bar: bar1
""", # mocked contents of filename.yml
]
fake_dict = {"foo": "foo1", "bar": "bar1"}
# config with a single file
result = self.mockStackPillar(mock_output, "/path/to/stack.cfg")
self.assertDictEqual(fake_dict, result)
# config with a opts:saltenv
result = self.mockStackPillar(
mock_output,
**{
"opts:saltenv": { # **kwargs
"dev": "/path/to/dev/static.cfg",
}
}
)
self.assertDictEqual(fake_dict, result)
# config with a opts:saltenv and __env__ substitution
result = self.mockStackPillar(
mock_output,
**{
"opts:saltenv": { # **kwargs
"__env__": "/path/to/__env__/dynamic.cfg",
}
}
)
self.assertDictEqual(fake_dict, result)
def test_extpillar_stack_exceptions(self):
# yaml indentation error
mock_output = [
"/path/to/filename.yml\n", # mocked contents of /path/to/stack.cfg
"""
foo: foo1
bar: bar1 # yaml indentation error
""", # mocked contents of filename.yml
]
self.assertRaises(
Exception, self.mockStackPillar, mock_output, "/path/to/stack.cfg"
)

View file

@ -1,52 +0,0 @@
"""
unit tests for clustershell roster
"""
from tests.support.mock import MagicMock, patch
# Import Salt Testing libraries
from tests.support.unit import TestCase, skipIf
try:
from ClusterShell.NodeSet import NodeSet # pylint: disable=unused-import
HAS_CLUSTERSHELL = True
except (ImportError, OSError) as e:
HAS_CLUSTERSHELL = False
@skipIf(
HAS_CLUSTERSHELL is False,
"Install Python Clustershell bindings before running these tests.",
)
class ClusterShellTestCase(TestCase):
"""
Test cases for clustershell roster
"""
def test_targets(self):
mock_socket = MagicMock()
mock_nodeset = MagicMock()
mock_nodeset.NodeSet.return_value = ["foo"]
with patch.dict(
"sys.modules",
**{"socket": mock_socket, "ClusterShell.NodeSet": mock_nodeset}
):
import salt.roster.clustershell
salt.roster.clustershell.__opts__ = {}
with patch.dict(
salt.roster.clustershell.__opts__,
{"ssh_scan_ports": [1, 2, 3], "ssh_scan_timeout": 30},
):
# Reimports are necessary to re-init the namespace.
# pylint: disable=unused-import
import socket
from ClusterShell.NodeSet import NodeSet
# pylint: enable=unused-import
ret = salt.roster.clustershell.targets("foo")
mock_socket.gethostbyname.assert_any_call("foo")
self.assertTrue("foo" in ret)
self.assertTrue(ret["foo"]["port"] == 3)

View file

@ -1,106 +0,0 @@
"""
Test the scan roster.
"""
import socket
import salt.roster.scan as scan_
from tests.support import mixins
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class ScanRosterTestCase(TestCase, mixins.LoaderModuleMockMixin):
"""Test the directory roster"""
def setup_loader_modules(self):
return {scan_: {"__opts__": {"ssh_scan_ports": "22", "ssh_scan_timeout": 0.01}}}
def test_single_ip(self):
"""Test that minion files in the directory roster match and render."""
with patch("salt.utils.network.get_socket"):
ret = scan_.targets("127.0.0.1")
self.assertEqual(ret, {"127.0.0.1": {"host": "127.0.0.1", "port": 22}})
def test_single_network(self):
"""Test that minion files in the directory roster match and render."""
with patch("salt.utils.network.get_socket"):
ret = scan_.targets("127.0.0.0/30")
self.assertEqual(
ret,
{
"127.0.0.1": {"host": "127.0.0.1", "port": 22},
"127.0.0.2": {"host": "127.0.0.2", "port": 22},
},
)
def test_multiple_ips(self):
"""Test that minion files in the directory roster match and render."""
with patch("salt.utils.network.get_socket"):
ret = scan_.targets(["127.0.0.1", "127.0.0.2"], tgt_type="list")
self.assertEqual(
ret,
{
"127.0.0.1": {"host": "127.0.0.1", "port": 22},
"127.0.0.2": {"host": "127.0.0.2", "port": 22},
},
)
def test_multiple_networks(self):
"""Test that minion files in the directory roster match and render."""
with patch("salt.utils.network.get_socket"):
ret = scan_.targets(
["127.0.0.0/30", "127.0.2.1", "127.0.1.0/30"], tgt_type="list"
)
self.assertEqual(
ret,
{
"127.0.0.1": {"host": "127.0.0.1", "port": 22},
"127.0.0.2": {"host": "127.0.0.2", "port": 22},
"127.0.2.1": {"host": "127.0.2.1", "port": 22},
"127.0.1.1": {"host": "127.0.1.1", "port": 22},
"127.0.1.2": {"host": "127.0.1.2", "port": 22},
},
)
def test_malformed_ip(self):
"""Test that minion files in the directory roster match and render."""
with patch("salt.utils.network.get_socket"):
ret = scan_.targets("127001")
self.assertEqual(ret, {})
def test_multiple_with_malformed(self):
"""Test that minion files in the directory roster match and render."""
with patch("salt.utils.network.get_socket"):
ret = scan_.targets(
["127.0.0.1", "127002", "127.0.1.0/30"], tgt_type="list"
)
self.assertEqual(
ret,
{
"127.0.0.1": {"host": "127.0.0.1", "port": 22},
"127.0.1.1": {"host": "127.0.1.1", "port": 22},
"127.0.1.2": {"host": "127.0.1.2", "port": 22},
},
)
def test_multiple_no_connection(self):
"""Test that minion files in the directory roster match and render."""
socket_mock = MagicMock()
socket_mock.connect = MagicMock(
side_effect=[None, socket.error(), None, socket.error(), None]
)
with patch("salt.utils.network.get_socket", return_value=socket_mock):
ret = scan_.targets(
["127.0.0.0/30", "127.0.2.1", "127.0.1.0/30"], tgt_type="list"
)
self.assertEqual(
ret,
{
"127.0.0.1": {"host": "127.0.0.1", "port": 22},
"127.0.0.2": {},
"127.0.2.1": {"host": "127.0.2.1", "port": 22},
"127.0.1.1": {},
"127.0.1.2": {"host": "127.0.1.2", "port": 22},
},
)

View file

@ -1,81 +0,0 @@
import collections
import salt.roster.sshconfig as sshconfig
from tests.support import mixins
from tests.support.mock import mock_open, patch
from tests.support.unit import TestCase
_SAMPLE_SSH_CONFIG = """
Host *
User user.mcuserface
Host abc*
IdentityFile ~/.ssh/id_rsa_abc
Host def*
IdentityFile ~/.ssh/id_rsa_def
Host abc.asdfgfdhgjkl.com
HostName 123.123.123.123
Host abc123.asdfgfdhgjkl.com
HostName 123.123.123.124
Host def.asdfgfdhgjkl.com
HostName 234.234.234.234
"""
_TARGET_ABC = collections.OrderedDict(
[
("user", "user.mcuserface"),
("priv", "~/.ssh/id_rsa_abc"),
("host", "abc.asdfgfdhgjkl.com"),
]
)
_TARGET_ABC123 = collections.OrderedDict(
[
("user", "user.mcuserface"),
("priv", "~/.ssh/id_rsa_abc"),
("host", "abc123.asdfgfdhgjkl.com"),
]
)
_TARGET_DEF = collections.OrderedDict(
[
("user", "user.mcuserface"),
("priv", "~/.ssh/id_rsa_def"),
("host", "def.asdfgfdhgjkl.com"),
]
)
_ALL = {
"abc.asdfgfdhgjkl.com": _TARGET_ABC,
"abc123.asdfgfdhgjkl.com": _TARGET_ABC123,
"def.asdfgfdhgjkl.com": _TARGET_DEF,
}
_ABC_GLOB = {
"abc.asdfgfdhgjkl.com": _TARGET_ABC,
"abc123.asdfgfdhgjkl.com": _TARGET_ABC123,
}
class SSHConfigRosterTestCase(TestCase, mixins.LoaderModuleMockMixin):
def setUp(self):
self.mock_fp = mock_open(read_data=_SAMPLE_SSH_CONFIG)
def setup_loader_modules(self):
return {sshconfig: {}}
def test_all(self):
with patch("salt.utils.files.fopen", self.mock_fp):
with patch("salt.roster.sshconfig._get_ssh_config_file"):
targets = sshconfig.targets("*")
self.assertEqual(targets, _ALL)
def test_abc_glob(self):
with patch("salt.utils.files.fopen", self.mock_fp):
with patch("salt.roster.sshconfig._get_ssh_config_file"):
targets = sshconfig.targets("abc*")
self.assertEqual(targets, _ABC_GLOB)

View file

@ -1,109 +0,0 @@
"""
unittests for terraform roster
"""
import os.path
import salt.config
import salt.loader
from salt.roster import terraform
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
class TerraformTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.roster.terraform
"""
def setup_loader_modules(self):
opts = salt.config.master_config(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "master")
)
utils = salt.loader.utils(opts, whitelist=["roster_matcher"])
return {terraform: {"__utils__": utils, "__opts__": {}}}
def test_default_output(self):
"""
Test the output of a fixture tfstate file which contains libvirt
resources.
"""
tfstate = os.path.join(
os.path.dirname(__file__), "terraform.data", "terraform.tfstate"
)
pki_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "terraform.data")
)
with patch.dict(
terraform.__opts__, {"roster_file": tfstate, "pki_dir": pki_dir}
):
expected_result = {
"db0": {
"host": "192.168.122.174",
"user": "root",
"passwd": "dbpw",
"tty": True,
"priv": os.path.join(pki_dir, "ssh", "salt-ssh.rsa"),
},
"db1": {
"host": "192.168.122.190",
"user": "root",
"passwd": "dbpw",
"tty": True,
"priv": os.path.join(pki_dir, "ssh", "salt-ssh.rsa"),
},
"web0": {
"host": "192.168.122.106",
"user": "root",
"passwd": "linux",
"timeout": 22,
"priv": os.path.join(pki_dir, "ssh", "salt-ssh.rsa"),
},
"web1": {
"host": "192.168.122.235",
"user": "root",
"passwd": "linux",
"timeout": 22,
"priv": os.path.join(pki_dir, "ssh", "salt-ssh.rsa"),
},
}
ret = terraform.targets("*")
self.assertDictEqual(expected_result, ret)
def test_default_matching(self):
"""
Test the output of a fixture tfstate file which contains libvirt
resources using matching
"""
tfstate = os.path.join(
os.path.dirname(__file__), "terraform.data", "terraform.tfstate"
)
pki_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "terraform.data")
)
with patch.dict(
terraform.__opts__, {"roster_file": tfstate, "pki_dir": pki_dir}
):
expected_result = {
"web0": {
"host": "192.168.122.106",
"user": "root",
"passwd": "linux",
"timeout": 22,
"priv": os.path.join(pki_dir, "ssh", "salt-ssh.rsa"),
},
"web1": {
"host": "192.168.122.235",
"user": "root",
"passwd": "linux",
"timeout": 22,
"priv": os.path.join(pki_dir, "ssh", "salt-ssh.rsa"),
},
}
ret = terraform.targets("*web*")
self.assertDictEqual(expected_result, ret)