mirror of
https://github.com/saltstack/salt.git
synced 2025-04-16 09:40:20 +00:00
Merge pull request #64449 from s0undt3ch/hotfix/merge-forward
[master] Merge 3006.x into master
This commit is contained in:
commit
98433521d0
86 changed files with 3623 additions and 3211 deletions
|
@ -1190,7 +1190,7 @@ repos:
|
|||
|
||||
- repo: https://github.com/s0undt3ch/salt-rewrite
|
||||
# Automatically rewrite code with known rules
|
||||
rev: 2.0.0
|
||||
rev: 2.4.3
|
||||
hooks:
|
||||
- id: salt-rewrite
|
||||
alias: rewrite-docstrings
|
||||
|
@ -1202,10 +1202,6 @@ repos:
|
|||
salt/ext/.*
|
||||
)$
|
||||
|
||||
- repo: https://github.com/s0undt3ch/salt-rewrite
|
||||
# Automatically rewrite code with known rules
|
||||
rev: 2.0.0
|
||||
hooks:
|
||||
- id: salt-rewrite
|
||||
alias: rewrite-tests
|
||||
name: Rewrite Salt's Test Suite
|
||||
|
|
1
changelog/61049.fixed.md
Normal file
1
changelog/61049.fixed.md
Normal file
|
@ -0,0 +1 @@
|
|||
Do not update the credentials dictionary in `utils/aws.py` while iterating over it, and use the correct delete functionality
|
2
changelog/63296.fixed.md
Normal file
2
changelog/63296.fixed.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
Fixes an issue with failing subsequent state runs with the lgpo state module.
|
||||
The ``lgpo.get_polcy`` function now returns all boolean settings.
|
2
changelog/63473.fixed.md
Normal file
2
changelog/63473.fixed.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
Fixes an issue with boolean settings not being reported after being set. The
|
||||
``lgpo.get_polcy`` function now returns all boolean settings.
|
1
changelog/64237.fixed.md
Normal file
1
changelog/64237.fixed.md
Normal file
|
@ -0,0 +1 @@
|
|||
remove the hard coded python version in error.
|
1
changelog/64280.fixed.md
Normal file
1
changelog/64280.fixed.md
Normal file
|
@ -0,0 +1 @@
|
|||
Fixed file client private attribute reference on `SaltMakoTemplateLookup`
|
1
changelog/64318.fixed.md
Normal file
1
changelog/64318.fixed.md
Normal file
|
@ -0,0 +1 @@
|
|||
Ensure selinux values are handled lowercase
|
5
changelog/64401.fixed.md
Normal file
5
changelog/64401.fixed.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
Fixed an issue with ``lgpo_reg`` where existing entries for the same key in
|
||||
``Registry.pol`` were being overwritten in subsequent runs if the value name in
|
||||
the subesequent run was contained in the existing value name. For example, a
|
||||
key named ``SetUpdateNotificationLevel`` would be overwritten by a subsequent
|
||||
run attempting to set ``UpdateNotificationLevel``
|
1
changelog/64430.fixed.md
Normal file
1
changelog/64430.fixed.md
Normal file
|
@ -0,0 +1 @@
|
|||
Fix regression for user.present on handling groups with dupe GIDs
|
|
@ -46,3 +46,4 @@ xmldiff>=2.4
|
|||
genshi>=0.7.3
|
||||
cheetah3>=3.2.2
|
||||
mako
|
||||
wempy
|
||||
|
|
|
@ -555,6 +555,8 @@ websocket-client==0.40.0
|
|||
# via
|
||||
# docker
|
||||
# kubernetes
|
||||
wempy==0.2.1
|
||||
# via -r requirements/static/ci/common.in
|
||||
werkzeug==2.2.3
|
||||
# via
|
||||
# moto
|
||||
|
|
|
@ -544,6 +544,8 @@ websocket-client==0.40.0
|
|||
# via
|
||||
# docker
|
||||
# kubernetes
|
||||
wempy==0.2.1
|
||||
# via -r requirements/static/ci/common.in
|
||||
werkzeug==2.2.3
|
||||
# via
|
||||
# moto
|
||||
|
|
|
@ -607,6 +607,8 @@ websocket-client==0.40.0
|
|||
# via
|
||||
# docker
|
||||
# kubernetes
|
||||
wempy==0.2.1
|
||||
# via -r requirements/static/ci/common.in
|
||||
werkzeug==2.2.3
|
||||
# via
|
||||
# moto
|
||||
|
|
|
@ -483,6 +483,8 @@ websocket-client==0.40.0
|
|||
# via
|
||||
# docker
|
||||
# kubernetes
|
||||
wempy==0.2.1
|
||||
# via -r requirements/static/ci/common.in
|
||||
werkzeug==2.2.3
|
||||
# via
|
||||
# moto
|
||||
|
|
|
@ -551,6 +551,8 @@ websocket-client==0.40.0
|
|||
# via
|
||||
# docker
|
||||
# kubernetes
|
||||
wempy==0.2.1
|
||||
# via -r requirements/static/ci/common.in
|
||||
werkzeug==2.2.3
|
||||
# via
|
||||
# moto
|
||||
|
|
|
@ -540,6 +540,8 @@ websocket-client==0.40.0
|
|||
# via
|
||||
# docker
|
||||
# kubernetes
|
||||
wempy==0.2.1
|
||||
# via -r requirements/static/ci/common.in
|
||||
werkzeug==2.2.3
|
||||
# via
|
||||
# moto
|
||||
|
|
|
@ -601,6 +601,8 @@ websocket-client==0.40.0
|
|||
# via
|
||||
# docker
|
||||
# kubernetes
|
||||
wempy==0.2.1
|
||||
# via -r requirements/static/ci/common.in
|
||||
werkzeug==2.2.3
|
||||
# via
|
||||
# moto
|
||||
|
|
|
@ -479,6 +479,8 @@ websocket-client==0.40.0
|
|||
# via
|
||||
# docker
|
||||
# kubernetes
|
||||
wempy==0.2.1
|
||||
# via -r requirements/static/ci/common.in
|
||||
werkzeug==2.2.3
|
||||
# via
|
||||
# moto
|
||||
|
|
|
@ -547,6 +547,8 @@ websocket-client==0.40.0
|
|||
# via
|
||||
# docker
|
||||
# kubernetes
|
||||
wempy==0.2.1
|
||||
# via -r requirements/static/ci/common.in
|
||||
werkzeug==2.2.3
|
||||
# via
|
||||
# moto
|
||||
|
|
|
@ -611,6 +611,8 @@ websocket-client==0.40.0
|
|||
# via
|
||||
# docker
|
||||
# kubernetes
|
||||
wempy==0.2.1
|
||||
# via -r requirements/static/ci/common.in
|
||||
werkzeug==2.2.3
|
||||
# via
|
||||
# moto
|
||||
|
|
|
@ -491,6 +491,8 @@ websocket-client==0.40.0
|
|||
# via
|
||||
# docker
|
||||
# kubernetes
|
||||
wempy==0.2.1
|
||||
# via -r requirements/static/ci/common.in
|
||||
werkzeug==2.2.3
|
||||
# via
|
||||
# moto
|
||||
|
|
|
@ -555,6 +555,8 @@ websocket-client==0.40.0
|
|||
# via
|
||||
# docker
|
||||
# kubernetes
|
||||
wempy==0.2.1
|
||||
# via -r requirements/static/ci/common.in
|
||||
werkzeug==2.2.3
|
||||
# via
|
||||
# moto
|
||||
|
|
|
@ -544,6 +544,8 @@ websocket-client==0.40.0
|
|||
# via
|
||||
# docker
|
||||
# kubernetes
|
||||
wempy==0.2.1
|
||||
# via -r requirements/static/ci/common.in
|
||||
werkzeug==2.2.3
|
||||
# via
|
||||
# moto
|
||||
|
|
|
@ -609,6 +609,8 @@ websocket-client==0.40.0
|
|||
# via
|
||||
# docker
|
||||
# kubernetes
|
||||
wempy==0.2.1
|
||||
# via -r requirements/static/ci/common.in
|
||||
werkzeug==2.2.3
|
||||
# via
|
||||
# moto
|
||||
|
|
|
@ -487,6 +487,8 @@ websocket-client==0.40.0
|
|||
# via
|
||||
# docker
|
||||
# kubernetes
|
||||
wempy==0.2.1
|
||||
# via -r requirements/static/ci/common.in
|
||||
werkzeug==2.2.3
|
||||
# via
|
||||
# moto
|
||||
|
|
|
@ -87,7 +87,7 @@ class DeferredStreamHandler(StreamHandler):
|
|||
If anything goes wrong before logging is properly setup, all stored messages
|
||||
will be flushed to the handler's stream, ie, written to console.
|
||||
|
||||
.. versionadded:: 3005.0
|
||||
.. versionadded:: 3005
|
||||
"""
|
||||
|
||||
def __init__(self, stream, max_queue_size=10000):
|
||||
|
|
38
salt/cache/consul.py
vendored
38
salt/cache/consul.py
vendored
|
@ -3,7 +3,7 @@ Minion data cache plugin for Consul key/value data store.
|
|||
|
||||
.. versionadded:: 2016.11.2
|
||||
|
||||
.. versionchanged:: 3005.0
|
||||
.. versionchanged:: 3005
|
||||
|
||||
Timestamp/cache updated support added.
|
||||
|
||||
|
@ -119,33 +119,29 @@ def store(bank, key, data):
|
|||
"""
|
||||
Store a key value.
|
||||
"""
|
||||
c_key = "{}/{}".format(bank, key)
|
||||
tstamp_key = "{}/{}{}".format(bank, key, _tstamp_suffix)
|
||||
c_key = f"{bank}/{key}"
|
||||
tstamp_key = f"{bank}/{key}{_tstamp_suffix}"
|
||||
|
||||
try:
|
||||
c_data = salt.payload.dumps(data)
|
||||
api.kv.put(c_key, c_data)
|
||||
api.kv.put(tstamp_key, salt.payload.dumps(int(time.time())))
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
"There was an error writing the key, {}: {}".format(c_key, exc)
|
||||
)
|
||||
raise SaltCacheError(f"There was an error writing the key, {c_key}: {exc}")
|
||||
|
||||
|
||||
def fetch(bank, key):
|
||||
"""
|
||||
Fetch a key value.
|
||||
"""
|
||||
c_key = "{}/{}".format(bank, key)
|
||||
c_key = f"{bank}/{key}"
|
||||
try:
|
||||
_, value = api.kv.get(c_key)
|
||||
if value is None:
|
||||
return {}
|
||||
return salt.payload.loads(value["Value"])
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
"There was an error reading the key, {}: {}".format(c_key, exc)
|
||||
)
|
||||
raise SaltCacheError(f"There was an error reading the key, {c_key}: {exc}")
|
||||
|
||||
|
||||
def flush(bank, key=None):
|
||||
|
@ -156,16 +152,14 @@ def flush(bank, key=None):
|
|||
c_key = bank
|
||||
tstamp_key = None
|
||||
else:
|
||||
c_key = "{}/{}".format(bank, key)
|
||||
tstamp_key = "{}/{}{}".format(bank, key, _tstamp_suffix)
|
||||
c_key = f"{bank}/{key}"
|
||||
tstamp_key = f"{bank}/{key}{_tstamp_suffix}"
|
||||
try:
|
||||
if tstamp_key:
|
||||
api.kv.delete(tstamp_key)
|
||||
return api.kv.delete(c_key, recurse=key is None)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
"There was an error removing the key, {}: {}".format(c_key, exc)
|
||||
)
|
||||
raise SaltCacheError(f"There was an error removing the key, {c_key}: {exc}")
|
||||
|
||||
|
||||
def list_(bank):
|
||||
|
@ -175,9 +169,7 @@ def list_(bank):
|
|||
try:
|
||||
_, keys = api.kv.get(bank + "/", keys=True, separator="/")
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
'There was an error getting the key "{}": {}'.format(bank, exc)
|
||||
)
|
||||
raise SaltCacheError(f'There was an error getting the key "{bank}": {exc}')
|
||||
if keys is None:
|
||||
keys = []
|
||||
else:
|
||||
|
@ -198,9 +190,7 @@ def contains(bank, key):
|
|||
c_key = "{}/{}".format(bank, key or "")
|
||||
_, value = api.kv.get(c_key, keys=True)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
"There was an error getting the key, {}: {}".format(c_key, exc)
|
||||
)
|
||||
raise SaltCacheError(f"There was an error getting the key, {c_key}: {exc}")
|
||||
return value is not None
|
||||
|
||||
|
||||
|
@ -209,13 +199,11 @@ def updated(bank, key):
|
|||
Return the Unix Epoch timestamp of when the key was last updated. Return
|
||||
None if key is not found.
|
||||
"""
|
||||
c_key = "{}/{}{}".format(bank, key, _tstamp_suffix)
|
||||
c_key = f"{bank}/{key}{_tstamp_suffix}"
|
||||
try:
|
||||
_, value = api.kv.get(c_key)
|
||||
if value is None:
|
||||
return None
|
||||
return salt.payload.loads(value["Value"])
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
"There was an error reading the key, {}: {}".format(c_key, exc)
|
||||
)
|
||||
raise SaltCacheError(f"There was an error reading the key, {c_key}: {exc}")
|
||||
|
|
|
@ -17,7 +17,7 @@ except ImportError:
|
|||
|
||||
def get(key, default="", merge=False, delimiter=DEFAULT_TARGET_DELIM):
|
||||
"""
|
||||
.. versionadded:: 0.14
|
||||
.. versionadded:: 0.14.0
|
||||
|
||||
Attempt to retrieve the named value from pillar, if the named value is not
|
||||
available return the passed default. The default return is an empty string.
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
CenturyLink Cloud Module
|
||||
========================
|
||||
|
||||
.. versionadded:: 2018.3
|
||||
.. versionadded:: 2018.3.0
|
||||
|
||||
The CLC cloud module allows you to manage CLC Via the CLC SDK.
|
||||
|
||||
|
@ -421,7 +421,7 @@ def create(vm_):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"waiting for ssh",
|
||||
"salt/cloud/{}/waiting_for_ssh".format(name),
|
||||
f"salt/cloud/{name}/waiting_for_ssh",
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
args={"ip_address": vm_["ssh_host"]},
|
||||
transport=__opts__["transport"],
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
Grains from cloud metadata servers at 169.254.169.254 in
|
||||
google compute engine
|
||||
|
||||
.. versionadded:: 3005.0
|
||||
.. versionadded:: 3005
|
||||
|
||||
:depends: requests
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
ACME / Let's Encrypt module
|
||||
===========================
|
||||
|
||||
.. versionadded:: 2016.3
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
This module currently looks for certbot script in the $PATH as
|
||||
- certbot,
|
||||
|
@ -72,7 +72,7 @@ def _cert_file(name, cert_type):
|
|||
"""
|
||||
Return expected path of a Let's Encrypt live cert
|
||||
"""
|
||||
return os.path.join(LE_LIVE, name, "{}.pem".format(cert_type))
|
||||
return os.path.join(LE_LIVE, name, f"{cert_type}.pem")
|
||||
|
||||
|
||||
def _expires(name):
|
||||
|
@ -88,9 +88,9 @@ def _expires(name):
|
|||
expiry = __salt__["tls.cert_info"](cert_file).get("not_after", 0)
|
||||
# Cobble it together using the openssl binary
|
||||
else:
|
||||
openssl_cmd = "openssl x509 -in {} -noout -enddate".format(cert_file)
|
||||
openssl_cmd = f"openssl x509 -in {cert_file} -noout -enddate"
|
||||
# No %e format on my Linux'es here
|
||||
strptime_sux_cmd = 'date --date="$({} | cut -d= -f2)" +%s'.format(openssl_cmd)
|
||||
strptime_sux_cmd = f'date --date="$({openssl_cmd} | cut -d= -f2)" +%s'
|
||||
expiry = float(__salt__["cmd.shell"](strptime_sux_cmd, output_loglevel="quiet"))
|
||||
# expiry = datetime.datetime.strptime(expiry.split('=', 1)[-1], '%b %e %H:%M:%S %Y %Z')
|
||||
return datetime.datetime.fromtimestamp(expiry)
|
||||
|
@ -195,10 +195,10 @@ def cert(
|
|||
cmd.append("--renew-by-default")
|
||||
renew = True
|
||||
if server:
|
||||
cmd.append("--server {}".format(server))
|
||||
cmd.append(f"--server {server}")
|
||||
|
||||
if certname:
|
||||
cmd.append("--cert-name {}".format(certname))
|
||||
cmd.append(f"--cert-name {certname}")
|
||||
|
||||
if test_cert:
|
||||
if server:
|
||||
|
@ -211,41 +211,41 @@ def cert(
|
|||
if webroot:
|
||||
cmd.append("--authenticator webroot")
|
||||
if webroot is not True:
|
||||
cmd.append("--webroot-path {}".format(webroot))
|
||||
cmd.append(f"--webroot-path {webroot}")
|
||||
elif dns_plugin in supported_dns_plugins:
|
||||
if dns_plugin == "cloudflare":
|
||||
cmd.append("--dns-cloudflare")
|
||||
cmd.append("--dns-cloudflare-credentials {}".format(dns_plugin_credentials))
|
||||
cmd.append(f"--dns-cloudflare-credentials {dns_plugin_credentials}")
|
||||
else:
|
||||
return {
|
||||
"result": False,
|
||||
"comment": "DNS plugin '{}' is not supported".format(dns_plugin),
|
||||
"comment": f"DNS plugin '{dns_plugin}' is not supported",
|
||||
}
|
||||
else:
|
||||
cmd.append("--authenticator standalone")
|
||||
|
||||
if email:
|
||||
cmd.append("--email {}".format(email))
|
||||
cmd.append(f"--email {email}")
|
||||
|
||||
if keysize:
|
||||
cmd.append("--rsa-key-size {}".format(keysize))
|
||||
cmd.append(f"--rsa-key-size {keysize}")
|
||||
|
||||
cmd.append("--domains {}".format(name))
|
||||
cmd.append(f"--domains {name}")
|
||||
if aliases is not None:
|
||||
for dns in aliases:
|
||||
cmd.append("--domains {}".format(dns))
|
||||
cmd.append(f"--domains {dns}")
|
||||
|
||||
if preferred_challenges:
|
||||
cmd.append("--preferred-challenges {}".format(preferred_challenges))
|
||||
cmd.append(f"--preferred-challenges {preferred_challenges}")
|
||||
|
||||
if tls_sni_01_port:
|
||||
cmd.append("--tls-sni-01-port {}".format(tls_sni_01_port))
|
||||
cmd.append(f"--tls-sni-01-port {tls_sni_01_port}")
|
||||
if tls_sni_01_address:
|
||||
cmd.append("--tls-sni-01-address {}".format(tls_sni_01_address))
|
||||
cmd.append(f"--tls-sni-01-address {tls_sni_01_address}")
|
||||
if http_01_port:
|
||||
cmd.append("--http-01-port {}".format(http_01_port))
|
||||
cmd.append(f"--http-01-port {http_01_port}")
|
||||
if http_01_address:
|
||||
cmd.append("--http-01-address {}".format(http_01_address))
|
||||
cmd.append(f"--http-01-address {http_01_address}")
|
||||
|
||||
res = __salt__["cmd.run_all"](" ".join(cmd))
|
||||
|
||||
|
@ -269,13 +269,13 @@ def cert(
|
|||
}
|
||||
|
||||
if "no action taken" in res["stdout"]:
|
||||
comment = "Certificate {} unchanged".format(cert_file)
|
||||
comment = f"Certificate {cert_file} unchanged"
|
||||
result = None
|
||||
elif renew:
|
||||
comment = "Certificate {} renewed".format(certname)
|
||||
comment = f"Certificate {certname} renewed"
|
||||
result = True
|
||||
else:
|
||||
comment = "Certificate {} obtained".format(certname)
|
||||
comment = f"Certificate {certname} obtained"
|
||||
result = True
|
||||
|
||||
ret = {
|
||||
|
@ -339,7 +339,7 @@ def info(name):
|
|||
cert_info = __salt__["x509.read_certificate"](cert_file)
|
||||
else:
|
||||
# Cobble it together using the openssl binary
|
||||
openssl_cmd = "openssl x509 -in {} -noout -text".format(cert_file)
|
||||
openssl_cmd = f"openssl x509 -in {cert_file} -noout -text"
|
||||
cert_info = {"text": __salt__["cmd.run"](openssl_cmd, output_loglevel="quiet")}
|
||||
return cert_info
|
||||
|
||||
|
|
|
@ -207,14 +207,14 @@ if not HAS_APT:
|
|||
if self.architectures:
|
||||
opts.append("arch={}".format(",".join(self.architectures)))
|
||||
if self.signedby:
|
||||
opts.append("signed-by={}".format(self.signedby))
|
||||
opts.append(f"signed-by={self.signedby}")
|
||||
|
||||
if opts:
|
||||
repo_line.append("[{}]".format(" ".join(opts)))
|
||||
|
||||
repo_line = repo_line + [self.uri, self.dist, " ".join(self.comps)]
|
||||
if self.comment:
|
||||
repo_line.append("#{}".format(self.comment))
|
||||
repo_line.append(f"#{self.comment}")
|
||||
return " ".join(repo_line) + "\n"
|
||||
|
||||
def _parse_sources(self, line):
|
||||
|
@ -277,7 +277,7 @@ if not HAS_APT:
|
|||
architectures = "arch={}".format(",".join(architectures))
|
||||
opts_count.append(architectures)
|
||||
if signedby:
|
||||
signedby = "signed-by={}".format(signedby)
|
||||
signedby = f"signed-by={signedby}"
|
||||
opts_count.append(signedby)
|
||||
if len(opts_count) > 1:
|
||||
opts_line = "[" + " ".join(opts_count) + "]"
|
||||
|
@ -340,7 +340,7 @@ def _reconstruct_ppa_name(owner_name, ppa_name):
|
|||
"""
|
||||
Stringify PPA name from args.
|
||||
"""
|
||||
return "ppa:{}/{}".format(owner_name, ppa_name)
|
||||
return f"ppa:{owner_name}/{ppa_name}"
|
||||
|
||||
|
||||
def _call_apt(args, scope=True, **kwargs):
|
||||
|
@ -353,7 +353,7 @@ def _call_apt(args, scope=True, **kwargs):
|
|||
and salt.utils.systemd.has_scope(__context__)
|
||||
and __salt__["config.get"]("systemd.scope", True)
|
||||
):
|
||||
cmd.extend(["systemd-run", "--scope", "--description", '"{}"'.format(__name__)])
|
||||
cmd.extend(["systemd-run", "--scope", "--description", f'"{__name__}"'])
|
||||
cmd.extend(args)
|
||||
|
||||
params = {
|
||||
|
@ -465,7 +465,7 @@ def latest_version(*names, **kwargs):
|
|||
for name in names:
|
||||
ret[name] = ""
|
||||
pkgs = list_pkgs(versions_as_list=True)
|
||||
repo = ["-o", "APT::Default-Release={}".format(fromrepo)] if fromrepo else None
|
||||
repo = ["-o", f"APT::Default-Release={fromrepo}"] if fromrepo else None
|
||||
|
||||
# Refresh before looking for the latest version available
|
||||
if refresh:
|
||||
|
@ -942,7 +942,7 @@ def install(
|
|||
continue
|
||||
else:
|
||||
version_num = target
|
||||
pkgstr = "{}={}".format(pkgname, version_num)
|
||||
pkgstr = f"{pkgname}={version_num}"
|
||||
else:
|
||||
pkgstr = pkgpath
|
||||
|
||||
|
@ -1318,7 +1318,7 @@ def upgrade(refresh=True, dist_upgrade=False, **kwargs):
|
|||
]
|
||||
for option in dpkg_options:
|
||||
cmd.append("-o")
|
||||
cmd.append("DPkg::Options::={}".format(option))
|
||||
cmd.append(f"DPkg::Options::={option}")
|
||||
|
||||
if kwargs.get("force_yes", False):
|
||||
cmd.append("--force-yes")
|
||||
|
@ -1391,15 +1391,15 @@ def hold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
|
|||
|
||||
state = get_selections(pattern=target, state="hold")
|
||||
if not state:
|
||||
ret[target]["comment"] = "Package {} not currently held.".format(target)
|
||||
ret[target]["comment"] = f"Package {target} not currently held."
|
||||
elif not salt.utils.data.is_true(state.get("hold", False)):
|
||||
if "test" in __opts__ and __opts__["test"]:
|
||||
ret[target].update(result=None)
|
||||
ret[target]["comment"] = "Package {} is set to be held.".format(target)
|
||||
ret[target]["comment"] = f"Package {target} is set to be held."
|
||||
else:
|
||||
result = set_selections(selection={"hold": [target]})
|
||||
ret[target].update(changes=result[target], result=True)
|
||||
ret[target]["comment"] = "Package {} is now being held.".format(target)
|
||||
ret[target]["comment"] = f"Package {target} is now being held."
|
||||
else:
|
||||
ret[target].update(result=True)
|
||||
ret[target]["comment"] = "Package {} is already set to be held.".format(
|
||||
|
@ -1456,7 +1456,7 @@ def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W06
|
|||
|
||||
state = get_selections(pattern=target)
|
||||
if not state:
|
||||
ret[target]["comment"] = "Package {} does not have a state.".format(target)
|
||||
ret[target]["comment"] = f"Package {target} does not have a state."
|
||||
elif salt.utils.data.is_true(state.get("hold", False)):
|
||||
if "test" in __opts__ and __opts__["test"]:
|
||||
ret[target].update(result=None)
|
||||
|
@ -1552,7 +1552,7 @@ def list_pkgs(
|
|||
if __grains__.get("cpuarch", "") == "x86_64":
|
||||
osarch = __grains__.get("osarch", "")
|
||||
if arch != "all" and osarch == "amd64" and osarch != arch:
|
||||
name += ":{}".format(arch)
|
||||
name += f":{arch}"
|
||||
if cols:
|
||||
if ("install" in linetype or "hold" in linetype) and "installed" in status:
|
||||
__salt__["pkg_resource.add_pkg"](ret["installed"], name, version_num)
|
||||
|
@ -1780,7 +1780,7 @@ def _consolidate_repo_sources(sources):
|
|||
Consolidate APT sources.
|
||||
"""
|
||||
if not isinstance(sources, SourcesList):
|
||||
raise TypeError("'{}' not a '{}'".format(type(sources), SourcesList))
|
||||
raise TypeError(f"'{type(sources)}' not a '{SourcesList}'")
|
||||
|
||||
consolidated = {}
|
||||
delete_files = set()
|
||||
|
@ -1961,7 +1961,7 @@ def get_repo(repo, **kwargs):
|
|||
dist = __grains__["oscodename"]
|
||||
owner_name, ppa_name = repo[4:].split("/")
|
||||
if ppa_auth:
|
||||
auth_info = "{}@".format(ppa_auth)
|
||||
auth_info = f"{ppa_auth}@"
|
||||
repo = LP_PVT_SRC_FORMAT.format(auth_info, owner_name, ppa_name, dist)
|
||||
else:
|
||||
if HAS_SOFTWAREPROPERTIES:
|
||||
|
@ -1974,7 +1974,7 @@ def get_repo(repo, **kwargs):
|
|||
repo = softwareproperties.ppa.expand_ppa_line(repo, dist)[0]
|
||||
except NameError as name_error:
|
||||
raise CommandExecutionError(
|
||||
"Could not find ppa {}: {}".format(repo, name_error)
|
||||
f"Could not find ppa {repo}: {name_error}"
|
||||
)
|
||||
else:
|
||||
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
|
||||
|
@ -2000,7 +2000,7 @@ def get_repo(repo, **kwargs):
|
|||
)
|
||||
except SyntaxError:
|
||||
raise CommandExecutionError(
|
||||
"Error: repo '{}' is not a well formatted definition".format(repo)
|
||||
f"Error: repo '{repo}' is not a well formatted definition"
|
||||
)
|
||||
|
||||
for source in repos.values():
|
||||
|
@ -2070,7 +2070,7 @@ def del_repo(repo, **kwargs):
|
|||
) = _split_repo_str(repo)
|
||||
except SyntaxError:
|
||||
raise SaltInvocationError(
|
||||
"Error: repo '{}' not a well formatted definition".format(repo)
|
||||
f"Error: repo '{repo}' not a well formatted definition"
|
||||
)
|
||||
|
||||
for source in repos:
|
||||
|
@ -2132,9 +2132,7 @@ def del_repo(repo, **kwargs):
|
|||
refresh_db()
|
||||
return ret
|
||||
|
||||
raise CommandExecutionError(
|
||||
"Repo {} doesn't exist in the sources.list(s)".format(repo)
|
||||
)
|
||||
raise CommandExecutionError(f"Repo {repo} doesn't exist in the sources.list(s)")
|
||||
|
||||
|
||||
def _convert_if_int(value):
|
||||
|
@ -2427,11 +2425,11 @@ def add_repo_key(
|
|||
else:
|
||||
cmd.extend(["adv", "--batch", "--keyserver", keyserver, "--recv", keyid])
|
||||
elif keyid:
|
||||
error_msg = "No keyserver specified for keyid: {}".format(keyid)
|
||||
error_msg = f"No keyserver specified for keyid: {keyid}"
|
||||
raise SaltInvocationError(error_msg)
|
||||
else:
|
||||
raise TypeError(
|
||||
"{}() takes at least 1 argument (0 given)".format(add_repo_key.__name__)
|
||||
f"{add_repo_key.__name__}() takes at least 1 argument (0 given)"
|
||||
)
|
||||
|
||||
cmd_ret = _call_apt(cmd, **kwargs)
|
||||
|
@ -2731,7 +2729,7 @@ def mod_repo(repo, saltenv="base", aptkey=True, **kwargs):
|
|||
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
|
||||
else:
|
||||
raise CommandExecutionError(
|
||||
'cannot parse "ppa:" style repo definitions: {}'.format(repo)
|
||||
f'cannot parse "ppa:" style repo definitions: {repo}'
|
||||
)
|
||||
|
||||
sources = SourcesList()
|
||||
|
@ -2769,9 +2767,7 @@ def mod_repo(repo, saltenv="base", aptkey=True, **kwargs):
|
|||
repo_signedby,
|
||||
) = _split_repo_str(repo)
|
||||
except SyntaxError:
|
||||
raise SyntaxError(
|
||||
"Error: repo '{}' not a well formatted definition".format(repo)
|
||||
)
|
||||
raise SyntaxError(f"Error: repo '{repo}' not a well formatted definition")
|
||||
|
||||
full_comp_list = {comp.strip() for comp in repo_comps}
|
||||
no_proxy = __salt__["config.option"]("no_proxy")
|
||||
|
@ -2813,7 +2809,7 @@ def mod_repo(repo, saltenv="base", aptkey=True, **kwargs):
|
|||
"adv",
|
||||
"--batch",
|
||||
"--keyserver-options",
|
||||
"http-proxy={}".format(http_proxy_url),
|
||||
f"http-proxy={http_proxy_url}",
|
||||
"--keyserver",
|
||||
keyserver,
|
||||
"--logger-fd",
|
||||
|
@ -2859,7 +2855,7 @@ def mod_repo(repo, saltenv="base", aptkey=True, **kwargs):
|
|||
key_url = kwargs["key_url"]
|
||||
fn_ = pathlib.Path(__salt__["cp.cache_file"](key_url, saltenv))
|
||||
if not fn_:
|
||||
raise CommandExecutionError("Error: file not found: {}".format(key_url))
|
||||
raise CommandExecutionError(f"Error: file not found: {key_url}")
|
||||
|
||||
if kwargs["signedby"] and fn_.name != kwargs["signedby"].name:
|
||||
# override the signedby defined in the name with the
|
||||
|
@ -2879,9 +2875,7 @@ def mod_repo(repo, saltenv="base", aptkey=True, **kwargs):
|
|||
cmd = ["apt-key", "add", str(fn_)]
|
||||
out = __salt__["cmd.run_stdout"](cmd, python_shell=False, **kwargs)
|
||||
if not out.upper().startswith("OK"):
|
||||
raise CommandExecutionError(
|
||||
"Error: failed to add key from {}".format(key_url)
|
||||
)
|
||||
raise CommandExecutionError(f"Error: failed to add key from {key_url}")
|
||||
|
||||
elif "key_text" in kwargs:
|
||||
key_text = kwargs["key_text"]
|
||||
|
@ -2890,9 +2884,7 @@ def mod_repo(repo, saltenv="base", aptkey=True, **kwargs):
|
|||
cmd, stdin=key_text, python_shell=False, **kwargs
|
||||
)
|
||||
if not out.upper().startswith("OK"):
|
||||
raise CommandExecutionError(
|
||||
"Error: failed to add key:\n{}".format(key_text)
|
||||
)
|
||||
raise CommandExecutionError(f"Error: failed to add key:\n{key_text}")
|
||||
|
||||
if "comps" in kwargs:
|
||||
kwargs["comps"] = [comp.strip() for comp in kwargs["comps"].split(",")]
|
||||
|
@ -3276,7 +3268,7 @@ def set_selections(path=None, selection=None, clear=False, saltenv="base"):
|
|||
salt.utils.yaml.parser.ParserError,
|
||||
salt.utils.yaml.scanner.ScannerError,
|
||||
) as exc:
|
||||
raise SaltInvocationError("Improperly-formatted selection: {}".format(exc))
|
||||
raise SaltInvocationError(f"Improperly-formatted selection: {exc}")
|
||||
|
||||
if path:
|
||||
path = __salt__["cp.cache_file"](path, saltenv)
|
||||
|
@ -3312,7 +3304,7 @@ def set_selections(path=None, selection=None, clear=False, saltenv="base"):
|
|||
if _state == sel_revmap.get(_pkg):
|
||||
continue
|
||||
cmd = ["dpkg", "--set-selections"]
|
||||
cmd_in = "{} {}".format(_pkg, _state)
|
||||
cmd_in = f"{_pkg} {_state}"
|
||||
if not __opts__["test"]:
|
||||
result = _call_apt(cmd, scope=False, stdin=cmd_in)
|
||||
if result["retcode"] != 0:
|
||||
|
@ -3508,16 +3500,16 @@ def _get_http_proxy_url():
|
|||
# Set http_proxy_url for use in various internet facing actions...eg apt-key adv
|
||||
if host and port:
|
||||
if username and password:
|
||||
http_proxy_url = "http://{}:{}@{}:{}".format(username, password, host, port)
|
||||
http_proxy_url = f"http://{username}:{password}@{host}:{port}"
|
||||
else:
|
||||
http_proxy_url = "http://{}:{}".format(host, port)
|
||||
http_proxy_url = f"http://{host}:{port}"
|
||||
|
||||
return http_proxy_url
|
||||
|
||||
|
||||
def list_downloaded(root=None, **kwargs):
|
||||
"""
|
||||
.. versionadded:: 3000?
|
||||
.. versionadded:: 3000
|
||||
|
||||
List prefetched packages downloaded by apt in the local disk.
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ def _config_getter(
|
|||
password=None,
|
||||
ignore_retcode=False,
|
||||
output_encoding=None,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Common code for config.get_* functions, builds and runs the git CLI command
|
||||
|
@ -224,7 +224,7 @@ def _git_run(
|
|||
redirect_stderr=False,
|
||||
saltenv="base",
|
||||
output_encoding=None,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
simple, throw an exception with the error message on an error return code.
|
||||
|
@ -323,7 +323,7 @@ def _git_run(
|
|||
ignore_retcode=ignore_retcode,
|
||||
redirect_stderr=redirect_stderr,
|
||||
output_encoding=output_encoding,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
)
|
||||
finally:
|
||||
if tmp_ssh_wrapper:
|
||||
|
@ -390,7 +390,7 @@ def _git_run(
|
|||
ignore_retcode=ignore_retcode,
|
||||
redirect_stderr=redirect_stderr,
|
||||
output_encoding=output_encoding,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if result["retcode"] == 0:
|
||||
|
@ -403,7 +403,7 @@ def _git_run(
|
|||
)
|
||||
err = result["stdout" if redirect_stderr else "stderr"]
|
||||
if err:
|
||||
msg += ": {}".format(salt.utils.url.redact_http_basic_auth(err))
|
||||
msg += f": {salt.utils.url.redact_http_basic_auth(err)}"
|
||||
raise CommandExecutionError(msg)
|
||||
return result
|
||||
|
||||
|
@ -564,7 +564,7 @@ def archive(
|
|||
password=None,
|
||||
ignore_retcode=False,
|
||||
output_encoding=None,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
.. versionchanged:: 2015.8.0
|
||||
|
@ -1019,7 +1019,7 @@ def clone(
|
|||
https_user
|
||||
Set HTTP Basic Auth username. Only accepted for HTTPS URLs.
|
||||
|
||||
.. versionadded:: 20515.5.0
|
||||
.. versionadded:: 2015.5.0
|
||||
|
||||
https_pass
|
||||
Set HTTP Basic Auth password. Only accepted for HTTPS URLs.
|
||||
|
@ -1215,7 +1215,7 @@ def config_get(
|
|||
password=None,
|
||||
ignore_retcode=False,
|
||||
output_encoding=None,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Get the value of a key in the git configuration file
|
||||
|
@ -1293,7 +1293,7 @@ def config_get(
|
|||
password=password,
|
||||
ignore_retcode=ignore_retcode,
|
||||
output_encoding=output_encoding,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
# git config --get exits with retcode of 1 when key does not exist
|
||||
|
@ -1318,7 +1318,7 @@ def config_get_regexp(
|
|||
password=None,
|
||||
ignore_retcode=False,
|
||||
output_encoding=None,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
r"""
|
||||
.. versionadded:: 2015.8.0
|
||||
|
@ -1395,7 +1395,7 @@ def config_get_regexp(
|
|||
password=password,
|
||||
ignore_retcode=ignore_retcode,
|
||||
output_encoding=output_encoding,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
# git config --get exits with retcode of 1 when key does not exist
|
||||
|
@ -1425,7 +1425,7 @@ def config_set(
|
|||
password=None,
|
||||
ignore_retcode=False,
|
||||
output_encoding=None,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
.. versionchanged:: 2015.8.0
|
||||
|
@ -1574,7 +1574,7 @@ def config_set(
|
|||
cwd=cwd,
|
||||
ignore_retcode=ignore_retcode,
|
||||
output_encoding=output_encoding,
|
||||
**{"all": True, "global": global_}
|
||||
**{"all": True, "global": global_},
|
||||
)
|
||||
|
||||
|
||||
|
@ -1586,7 +1586,7 @@ def config_unset(
|
|||
password=None,
|
||||
ignore_retcode=False,
|
||||
output_encoding=None,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
.. versionadded:: 2015.8.0
|
||||
|
@ -1695,9 +1695,9 @@ def config_unset(
|
|||
)
|
||||
is None
|
||||
):
|
||||
raise CommandExecutionError("Key '{}' does not exist".format(key))
|
||||
raise CommandExecutionError(f"Key '{key}' does not exist")
|
||||
else:
|
||||
msg = "Multiple values exist for key '{}'".format(key)
|
||||
msg = f"Multiple values exist for key '{key}'"
|
||||
if value_regex is not None:
|
||||
msg += " and value_regex matches multiple values"
|
||||
raise CommandExecutionError(msg)
|
||||
|
@ -2355,9 +2355,9 @@ def init(
|
|||
if bare:
|
||||
command.append("--bare")
|
||||
if template is not None:
|
||||
command.append("--template={}".format(template))
|
||||
command.append(f"--template={template}")
|
||||
if separate_git_dir is not None:
|
||||
command.append("--separate-git-dir={}".format(separate_git_dir))
|
||||
command.append(f"--separate-git-dir={separate_git_dir}")
|
||||
if shared is not None:
|
||||
if isinstance(shared, int) and not isinstance(shared, bool):
|
||||
shared = "0" + str(shared)
|
||||
|
@ -2365,7 +2365,7 @@ def init(
|
|||
# Using lower here because booleans would be capitalized when
|
||||
# converted to a string.
|
||||
shared = str(shared).lower()
|
||||
command.append("--shared={}".format(shared))
|
||||
command.append(f"--shared={shared}")
|
||||
command.extend(_format_opts(opts))
|
||||
command.append(cwd)
|
||||
return _git_run(
|
||||
|
@ -2814,7 +2814,7 @@ def list_worktrees(
|
|||
worktree_root = os.path.join(cwd, worktree_root)
|
||||
if not os.path.isdir(worktree_root):
|
||||
raise CommandExecutionError(
|
||||
"Worktree admin directory {} not present".format(worktree_root)
|
||||
f"Worktree admin directory {worktree_root} not present"
|
||||
)
|
||||
|
||||
def _read_file(path):
|
||||
|
@ -3081,7 +3081,7 @@ def merge(
|
|||
identity=None,
|
||||
ignore_retcode=False,
|
||||
output_encoding=None,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Interface to `git-merge(1)`_
|
||||
|
@ -3205,7 +3205,7 @@ def merge_base(
|
|||
password=None,
|
||||
ignore_retcode=False,
|
||||
output_encoding=None,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
.. versionadded:: 2015.8.0
|
||||
|
@ -3487,7 +3487,7 @@ def merge_tree(
|
|||
base = merge_base(cwd, refs=[ref1, ref2], output_encoding=output_encoding)
|
||||
except (SaltInvocationError, CommandExecutionError):
|
||||
raise CommandExecutionError(
|
||||
"Unable to determine merge base for {} and {}".format(ref1, ref2)
|
||||
f"Unable to determine merge base for {ref1} and {ref2}"
|
||||
)
|
||||
command.extend([base, ref1, ref2])
|
||||
return _git_run(
|
||||
|
@ -3627,7 +3627,7 @@ def push(
|
|||
ignore_retcode=False,
|
||||
saltenv="base",
|
||||
output_encoding=None,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Interface to `git-push(1)`_
|
||||
|
@ -3927,7 +3927,7 @@ def remote_get(
|
|||
)
|
||||
if remote not in all_remotes:
|
||||
raise CommandExecutionError(
|
||||
"Remote '{}' not present in git checkout located at {}".format(remote, cwd)
|
||||
f"Remote '{remote}' not present in git checkout located at {cwd}"
|
||||
)
|
||||
return all_remotes[remote]
|
||||
|
||||
|
@ -3944,7 +3944,7 @@ def remote_refs(
|
|||
ignore_retcode=False,
|
||||
output_encoding=None,
|
||||
saltenv="base",
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
.. versionadded:: 2015.8.0
|
||||
|
@ -4850,7 +4850,7 @@ def submodule(
|
|||
ignore_retcode=False,
|
||||
saltenv="base",
|
||||
output_encoding=None,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
.. versionchanged:: 2015.8.0
|
||||
|
@ -5290,7 +5290,7 @@ def worktree_add(
|
|||
password=None,
|
||||
ignore_retcode=False,
|
||||
output_encoding=None,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
.. versionadded:: 2015.8.0
|
||||
|
@ -5602,5 +5602,5 @@ def worktree_rm(cwd, user=None, output_encoding=None):
|
|||
try:
|
||||
salt.utils.files.rm_rf(cwd)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise CommandExecutionError("Unable to remove {}: {}".format(cwd, exc))
|
||||
raise CommandExecutionError(f"Unable to remove {cwd}: {exc}")
|
||||
return True
|
||||
|
|
|
@ -89,7 +89,7 @@ def get_str(
|
|||
):
|
||||
"""
|
||||
.. versionadded:: 2014.7.0
|
||||
.. versionchanged:: 3004.0
|
||||
.. versionchanged:: 3004
|
||||
|
||||
Changed the default character set used to include symbols and implemented arguments to control the used character set.
|
||||
|
||||
|
@ -99,14 +99,14 @@ def get_str(
|
|||
Any valid number of bytes.
|
||||
|
||||
chars : None
|
||||
.. versionadded:: 3004.0
|
||||
.. versionadded:: 3004
|
||||
|
||||
String with any character that should be used to generate random string.
|
||||
|
||||
This argument supersedes all other character controlling arguments.
|
||||
|
||||
lowercase : True
|
||||
.. versionadded:: 3004.0
|
||||
.. versionadded:: 3004
|
||||
|
||||
Use lowercase letters in generated random string.
|
||||
(see :py:data:`string.ascii_lowercase`)
|
||||
|
@ -114,7 +114,7 @@ def get_str(
|
|||
This argument is superseded by chars.
|
||||
|
||||
uppercase : True
|
||||
.. versionadded:: 3004.0
|
||||
.. versionadded:: 3004
|
||||
|
||||
Use uppercase letters in generated random string.
|
||||
(see :py:data:`string.ascii_uppercase`)
|
||||
|
@ -122,7 +122,7 @@ def get_str(
|
|||
This argument is superseded by chars.
|
||||
|
||||
digits : True
|
||||
.. versionadded:: 3004.0
|
||||
.. versionadded:: 3004
|
||||
|
||||
Use digits in generated random string.
|
||||
(see :py:data:`string.digits`)
|
||||
|
@ -130,7 +130,7 @@ def get_str(
|
|||
This argument is superseded by chars.
|
||||
|
||||
printable : False
|
||||
.. versionadded:: 3004.0
|
||||
.. versionadded:: 3004
|
||||
|
||||
Use printable characters in generated random string and includes lowercase, uppercase,
|
||||
digits, punctuation and whitespace.
|
||||
|
@ -143,7 +143,7 @@ def get_str(
|
|||
This argument is superseded by chars.
|
||||
|
||||
punctuation : True
|
||||
.. versionadded:: 3004.0
|
||||
.. versionadded:: 3004
|
||||
|
||||
Use punctuation characters in generated random string.
|
||||
(see :py:data:`string.punctuation`)
|
||||
|
@ -151,7 +151,7 @@ def get_str(
|
|||
This argument is superseded by chars.
|
||||
|
||||
whitespace : False
|
||||
.. versionadded:: 3004.0
|
||||
.. versionadded:: 3004
|
||||
|
||||
Use whitespace characters in generated random string.
|
||||
(see :py:data:`string.whitespace`)
|
||||
|
|
|
@ -175,7 +175,7 @@ def bridge_exists(br):
|
|||
|
||||
salt '*' openvswitch.bridge_exists br0
|
||||
"""
|
||||
cmd = "ovs-vsctl br-exists {}".format(br)
|
||||
cmd = f"ovs-vsctl br-exists {br}"
|
||||
result = __salt__["cmd.run_all"](cmd)
|
||||
retcode = result["retcode"]
|
||||
return _retcode_to_bool(retcode)
|
||||
|
@ -193,11 +193,11 @@ def bridge_create(br, may_exist=True, parent=None, vlan=None):
|
|||
parent : string
|
||||
name of the parent bridge (if the bridge shall be created as a fake
|
||||
bridge). If specified, vlan must also be specified.
|
||||
.. versionadded:: 3006
|
||||
.. versionadded:: 3006.0
|
||||
vlan : int
|
||||
VLAN ID of the bridge (if the bridge shall be created as a fake
|
||||
bridge). If specified, parent must also be specified.
|
||||
.. versionadded:: 3006
|
||||
.. versionadded:: 3006.0
|
||||
|
||||
Returns:
|
||||
True on success, else False.
|
||||
|
@ -215,8 +215,8 @@ def bridge_create(br, may_exist=True, parent=None, vlan=None):
|
|||
raise ArgumentValueError("If parent is specified, vlan must also be specified.")
|
||||
if vlan is not None and parent is None:
|
||||
raise ArgumentValueError("If vlan is specified, parent must also be specified.")
|
||||
param_parent = "" if parent is None else " {}".format(parent)
|
||||
param_vlan = "" if vlan is None else " {}".format(vlan)
|
||||
param_parent = "" if parent is None else f" {parent}"
|
||||
param_vlan = "" if vlan is None else f" {vlan}"
|
||||
cmd = "ovs-vsctl {1}add-br {0}{2}{3}".format(
|
||||
br, param_may_exist, param_parent, param_vlan
|
||||
)
|
||||
|
@ -244,7 +244,7 @@ def bridge_delete(br, if_exists=True):
|
|||
salt '*' openvswitch.bridge_delete br0
|
||||
"""
|
||||
param_if_exists = _param_if_exists(if_exists)
|
||||
cmd = "ovs-vsctl {1}del-br {0}".format(br, param_if_exists)
|
||||
cmd = f"ovs-vsctl {param_if_exists}del-br {br}"
|
||||
result = __salt__["cmd.run_all"](cmd)
|
||||
retcode = result["retcode"]
|
||||
return _retcode_to_bool(retcode)
|
||||
|
@ -252,7 +252,7 @@ def bridge_delete(br, if_exists=True):
|
|||
|
||||
def bridge_to_parent(br):
|
||||
"""
|
||||
.. versionadded:: 3006
|
||||
.. versionadded:: 3006.0
|
||||
|
||||
Returns the parent bridge of a bridge.
|
||||
|
||||
|
@ -271,7 +271,7 @@ def bridge_to_parent(br):
|
|||
|
||||
salt '*' openvswitch.bridge_to_parent br0
|
||||
"""
|
||||
cmd = "ovs-vsctl br-to-parent {}".format(br)
|
||||
cmd = f"ovs-vsctl br-to-parent {br}"
|
||||
result = __salt__["cmd.run_all"](cmd)
|
||||
if result["retcode"] != 0:
|
||||
return False
|
||||
|
@ -280,7 +280,7 @@ def bridge_to_parent(br):
|
|||
|
||||
def bridge_to_vlan(br):
|
||||
"""
|
||||
.. versionadded:: 3006
|
||||
.. versionadded:: 3006.0
|
||||
|
||||
Returns the VLAN ID of a bridge.
|
||||
|
||||
|
@ -298,7 +298,7 @@ def bridge_to_vlan(br):
|
|||
|
||||
salt '*' openvswitch.bridge_to_parent br0
|
||||
"""
|
||||
cmd = "ovs-vsctl br-to-vlan {}".format(br)
|
||||
cmd = f"ovs-vsctl br-to-vlan {br}"
|
||||
result = __salt__["cmd.run_all"](cmd)
|
||||
if result["retcode"] != 0:
|
||||
return False
|
||||
|
@ -327,9 +327,9 @@ def port_add(br, port, may_exist=False, internal=False):
|
|||
salt '*' openvswitch.port_add br0 8080
|
||||
"""
|
||||
param_may_exist = _param_may_exist(may_exist)
|
||||
cmd = "ovs-vsctl {2}add-port {0} {1}".format(br, port, param_may_exist)
|
||||
cmd = f"ovs-vsctl {param_may_exist}add-port {br} {port}"
|
||||
if internal:
|
||||
cmd += " -- set interface {} type=internal".format(port)
|
||||
cmd += f" -- set interface {port} type=internal"
|
||||
result = __salt__["cmd.run_all"](cmd)
|
||||
retcode = result["retcode"]
|
||||
return _retcode_to_bool(retcode)
|
||||
|
@ -358,9 +358,9 @@ def port_remove(br, port, if_exists=True):
|
|||
param_if_exists = _param_if_exists(if_exists)
|
||||
|
||||
if port and not br:
|
||||
cmd = "ovs-vsctl {1}del-port {0}".format(port, param_if_exists)
|
||||
cmd = f"ovs-vsctl {param_if_exists}del-port {port}"
|
||||
else:
|
||||
cmd = "ovs-vsctl {2}del-port {0} {1}".format(br, port, param_if_exists)
|
||||
cmd = f"ovs-vsctl {param_if_exists}del-port {br} {port}"
|
||||
result = __salt__["cmd.run_all"](cmd)
|
||||
retcode = result["retcode"]
|
||||
return _retcode_to_bool(retcode)
|
||||
|
@ -384,7 +384,7 @@ def port_list(br):
|
|||
|
||||
salt '*' openvswitch.port_list br0
|
||||
"""
|
||||
cmd = "ovs-vsctl list-ports {}".format(br)
|
||||
cmd = f"ovs-vsctl list-ports {br}"
|
||||
result = __salt__["cmd.run_all"](cmd)
|
||||
retcode = result["retcode"]
|
||||
stdout = result["stdout"]
|
||||
|
@ -409,7 +409,7 @@ def port_get_tag(port):
|
|||
|
||||
salt '*' openvswitch.port_get_tag tap0
|
||||
"""
|
||||
cmd = "ovs-vsctl get port {} tag".format(port)
|
||||
cmd = f"ovs-vsctl get port {port} tag"
|
||||
result = __salt__["cmd.run_all"](cmd)
|
||||
retcode = result["retcode"]
|
||||
stdout = result["stdout"]
|
||||
|
@ -434,7 +434,7 @@ def interface_get_options(port):
|
|||
|
||||
salt '*' openvswitch.interface_get_options tap0
|
||||
"""
|
||||
cmd = "ovs-vsctl get interface {} options".format(port)
|
||||
cmd = f"ovs-vsctl get interface {port} options"
|
||||
result = __salt__["cmd.run_all"](cmd)
|
||||
retcode = result["retcode"]
|
||||
stdout = result["stdout"]
|
||||
|
@ -459,7 +459,7 @@ def interface_get_type(port):
|
|||
|
||||
salt '*' openvswitch.interface_get_type tap0
|
||||
"""
|
||||
cmd = "ovs-vsctl get interface {} type".format(port)
|
||||
cmd = f"ovs-vsctl get interface {port} type"
|
||||
result = __salt__["cmd.run_all"](cmd)
|
||||
retcode = result["retcode"]
|
||||
stdout = result["stdout"]
|
||||
|
@ -495,15 +495,15 @@ def port_create_vlan(br, port, id, internal=False):
|
|||
elif not internal and port not in interfaces:
|
||||
return False
|
||||
elif port in port_list(br):
|
||||
cmd = "ovs-vsctl set port {} tag={}".format(port, id)
|
||||
cmd = f"ovs-vsctl set port {port} tag={id}"
|
||||
if internal:
|
||||
cmd += " -- set interface {} type=internal".format(port)
|
||||
cmd += f" -- set interface {port} type=internal"
|
||||
result = __salt__["cmd.run_all"](cmd)
|
||||
return _retcode_to_bool(result["retcode"])
|
||||
else:
|
||||
cmd = "ovs-vsctl add-port {} {} tag={}".format(br, port, id)
|
||||
cmd = f"ovs-vsctl add-port {br} {port} tag={id}"
|
||||
if internal:
|
||||
cmd += " -- set interface {} type=internal".format(port)
|
||||
cmd += f" -- set interface {port} type=internal"
|
||||
result = __salt__["cmd.run_all"](cmd)
|
||||
return _retcode_to_bool(result["retcode"])
|
||||
|
||||
|
@ -599,7 +599,7 @@ def port_create_vxlan(br, port, id, remote, dst_port=None):
|
|||
|
||||
def db_get(table, record, column, if_exists=False):
|
||||
"""
|
||||
.. versionadded:: 3006
|
||||
.. versionadded:: 3006.0
|
||||
|
||||
Gets a column's value for a specific record.
|
||||
|
||||
|
@ -622,7 +622,7 @@ def db_get(table, record, column, if_exists=False):
|
|||
|
||||
salt '*' openvswitch.db_get Port br0 vlan_mode
|
||||
"""
|
||||
cmd = ["ovs-vsctl", "--format=json", "--columns={}".format(column)]
|
||||
cmd = ["ovs-vsctl", "--format=json", f"--columns={column}"]
|
||||
if if_exists:
|
||||
cmd += ["--if-exists"]
|
||||
cmd += ["list", table, record]
|
||||
|
@ -638,7 +638,7 @@ def db_get(table, record, column, if_exists=False):
|
|||
|
||||
def db_set(table, record, column, value, if_exists=False):
|
||||
"""
|
||||
.. versionadded:: 3006
|
||||
.. versionadded:: 3006.0
|
||||
|
||||
Sets a column's value for a specific record.
|
||||
|
||||
|
@ -666,7 +666,7 @@ def db_set(table, record, column, value, if_exists=False):
|
|||
cmd = ["ovs-vsctl"]
|
||||
if if_exists:
|
||||
cmd += ["--if-exists"]
|
||||
cmd += ["set", table, record, "{}={}".format(column, json.dumps(value))]
|
||||
cmd += ["set", table, record, f"{column}={json.dumps(value)}"]
|
||||
result = __salt__["cmd.run_all"](cmd)
|
||||
if result["retcode"] != 0:
|
||||
return result["stderr"]
|
||||
|
|
|
@ -32,7 +32,7 @@ def get(
|
|||
saltenv=None,
|
||||
):
|
||||
"""
|
||||
.. versionadded:: 0.14
|
||||
.. versionadded:: 0.14.0
|
||||
|
||||
Attempt to retrieve the named value from :ref:`in-memory pillar data
|
||||
<pillar-in-memory>`. If the pillar key is not present in the in-memory
|
||||
|
@ -184,7 +184,7 @@ def get(
|
|||
|
||||
ret = salt.utils.data.traverse_dict_and_list(pillar_dict, key, default, delimiter)
|
||||
if ret is KeyError:
|
||||
raise KeyError("Pillar key not found: {}".format(key))
|
||||
raise KeyError(f"Pillar key not found: {key}")
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -264,9 +264,7 @@ def items(*args, **kwargs):
|
|||
valid_rend=__opts__["decrypt_pillar_renderers"],
|
||||
)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise CommandExecutionError(
|
||||
"Failed to decrypt pillar override: {}".format(exc)
|
||||
)
|
||||
raise CommandExecutionError(f"Failed to decrypt pillar override: {exc}")
|
||||
|
||||
pillar = salt.pillar.get_pillar(
|
||||
__opts__,
|
||||
|
@ -295,7 +293,7 @@ def _obfuscate_inner(var):
|
|||
elif isinstance(var, (list, set, tuple)):
|
||||
return type(var)(_obfuscate_inner(v) for v in var)
|
||||
else:
|
||||
return "<{}>".format(var.__class__.__name__)
|
||||
return f"<{var.__class__.__name__}>"
|
||||
|
||||
|
||||
def obfuscate(*args, **kwargs):
|
||||
|
@ -538,10 +536,10 @@ def keys(key, delimiter=DEFAULT_TARGET_DELIM):
|
|||
ret = salt.utils.data.traverse_dict_and_list(__pillar__, key, KeyError, delimiter)
|
||||
|
||||
if ret is KeyError:
|
||||
raise KeyError("Pillar key not found: {}".format(key))
|
||||
raise KeyError(f"Pillar key not found: {key}")
|
||||
|
||||
if not isinstance(ret, dict):
|
||||
raise ValueError("Pillar value in key {} is not a dict".format(key))
|
||||
raise ValueError(f"Pillar value in key {key} is not a dict")
|
||||
|
||||
return list(ret)
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ def __virtual__():
|
|||
"load rh_service.py as virtual 'service'",
|
||||
)
|
||||
return __virtualname__
|
||||
return (False, "Cannot load rh_service module: OS not in {}".format(enable))
|
||||
return (False, f"Cannot load rh_service module: OS not in {enable}")
|
||||
|
||||
|
||||
def _runlevel():
|
||||
|
@ -137,7 +137,7 @@ def _chkconfig_add(name):
|
|||
/etc/init.d. The service is initially configured to be disabled at all
|
||||
run-levels.
|
||||
"""
|
||||
cmd = "/sbin/chkconfig --add {}".format(name)
|
||||
cmd = f"/sbin/chkconfig --add {name}"
|
||||
if __salt__["cmd.retcode"](cmd, python_shell=False) == 0:
|
||||
log.info('Added initscript "%s" to chkconfig', name)
|
||||
return True
|
||||
|
@ -150,7 +150,7 @@ def _service_is_upstart(name):
|
|||
"""
|
||||
Return True if the service is an upstart service, otherwise return False.
|
||||
"""
|
||||
return HAS_UPSTART and os.path.exists("/etc/init/{}.conf".format(name))
|
||||
return HAS_UPSTART and os.path.exists(f"/etc/init/{name}.conf")
|
||||
|
||||
|
||||
def _service_is_sysv(name):
|
||||
|
@ -169,7 +169,7 @@ def _service_is_chkconfig(name):
|
|||
"""
|
||||
Return True if the service is managed by chkconfig.
|
||||
"""
|
||||
cmdline = "/sbin/chkconfig --list {}".format(name)
|
||||
cmdline = f"/sbin/chkconfig --list {name}"
|
||||
return (
|
||||
__salt__["cmd.retcode"](cmdline, python_shell=False, ignore_retcode=True) == 0
|
||||
)
|
||||
|
@ -188,7 +188,7 @@ def _sysv_is_enabled(name, runlevel=None):
|
|||
|
||||
if runlevel is None:
|
||||
runlevel = _runlevel()
|
||||
return len(glob.glob("/etc/rc.d/rc{}.d/S??{}".format(runlevel, name))) > 0
|
||||
return len(glob.glob(f"/etc/rc.d/rc{runlevel}.d/S??{name}")) > 0
|
||||
|
||||
|
||||
def _chkconfig_is_enabled(name, runlevel=None):
|
||||
|
@ -197,14 +197,14 @@ def _chkconfig_is_enabled(name, runlevel=None):
|
|||
return ``False``. If ``runlevel`` is ``None``, then use the current
|
||||
runlevel.
|
||||
"""
|
||||
cmdline = "/sbin/chkconfig --list {}".format(name)
|
||||
cmdline = f"/sbin/chkconfig --list {name}"
|
||||
result = __salt__["cmd.run_all"](cmdline, python_shell=False)
|
||||
|
||||
if runlevel is None:
|
||||
runlevel = _runlevel()
|
||||
if result["retcode"] == 0:
|
||||
for row in result["stdout"].splitlines():
|
||||
if "{}:on".format(runlevel) in row:
|
||||
if f"{runlevel}:on" in row:
|
||||
if row.split()[0] == name:
|
||||
return True
|
||||
elif row.split() == [name, "on"]:
|
||||
|
@ -220,7 +220,7 @@ def _sysv_enable(name):
|
|||
"""
|
||||
if not _service_is_chkconfig(name) and not _chkconfig_add(name):
|
||||
return False
|
||||
cmd = "/sbin/chkconfig {} on".format(name)
|
||||
cmd = f"/sbin/chkconfig {name} on"
|
||||
return not __salt__["cmd.retcode"](cmd, python_shell=False)
|
||||
|
||||
|
||||
|
@ -233,7 +233,7 @@ def _sysv_disable(name):
|
|||
"""
|
||||
if not _service_is_chkconfig(name) and not _chkconfig_add(name):
|
||||
return False
|
||||
cmd = "/sbin/chkconfig {} off".format(name)
|
||||
cmd = f"/sbin/chkconfig {name} off"
|
||||
return not __salt__["cmd.retcode"](cmd, python_shell=False)
|
||||
|
||||
|
||||
|
@ -244,7 +244,7 @@ def _sysv_delete(name):
|
|||
"""
|
||||
if not _service_is_chkconfig(name):
|
||||
return False
|
||||
cmd = "/sbin/chkconfig --del {}".format(name)
|
||||
cmd = f"/sbin/chkconfig --del {name}"
|
||||
return not __salt__["cmd.retcode"](cmd)
|
||||
|
||||
|
||||
|
@ -253,10 +253,10 @@ def _upstart_delete(name):
|
|||
Delete an upstart service. This will only rename the .conf file
|
||||
"""
|
||||
if HAS_UPSTART:
|
||||
if os.path.exists("/etc/init/{}.conf".format(name)):
|
||||
if os.path.exists(f"/etc/init/{name}.conf"):
|
||||
os.rename(
|
||||
"/etc/init/{}.conf".format(name),
|
||||
"/etc/init/{}.conf.removed".format(name),
|
||||
f"/etc/init/{name}.conf",
|
||||
f"/etc/init/{name}.conf.removed",
|
||||
)
|
||||
return True
|
||||
|
||||
|
@ -435,9 +435,9 @@ def start(name):
|
|||
salt '*' service.start <service name>
|
||||
"""
|
||||
if _service_is_upstart(name):
|
||||
cmd = "start {}".format(name)
|
||||
cmd = f"start {name}"
|
||||
else:
|
||||
cmd = "/sbin/service {} start".format(name)
|
||||
cmd = f"/sbin/service {name} start"
|
||||
return not __salt__["cmd.retcode"](cmd, python_shell=False)
|
||||
|
||||
|
||||
|
@ -452,9 +452,9 @@ def stop(name):
|
|||
salt '*' service.stop <service name>
|
||||
"""
|
||||
if _service_is_upstart(name):
|
||||
cmd = "stop {}".format(name)
|
||||
cmd = f"stop {name}"
|
||||
else:
|
||||
cmd = "/sbin/service {} stop".format(name)
|
||||
cmd = f"/sbin/service {name} stop"
|
||||
return not __salt__["cmd.retcode"](cmd, python_shell=False)
|
||||
|
||||
|
||||
|
@ -469,9 +469,9 @@ def restart(name):
|
|||
salt '*' service.restart <service name>
|
||||
"""
|
||||
if _service_is_upstart(name):
|
||||
cmd = "restart {}".format(name)
|
||||
cmd = f"restart {name}"
|
||||
else:
|
||||
cmd = "/sbin/service {} restart".format(name)
|
||||
cmd = f"/sbin/service {name} restart"
|
||||
return not __salt__["cmd.retcode"](cmd, python_shell=False)
|
||||
|
||||
|
||||
|
@ -486,9 +486,9 @@ def reload_(name):
|
|||
salt '*' service.reload <service name>
|
||||
"""
|
||||
if _service_is_upstart(name):
|
||||
cmd = "reload {}".format(name)
|
||||
cmd = f"reload {name}"
|
||||
else:
|
||||
cmd = "/sbin/service {} reload".format(name)
|
||||
cmd = f"/sbin/service {name} reload"
|
||||
return not __salt__["cmd.retcode"](cmd, python_shell=False)
|
||||
|
||||
|
||||
|
@ -526,12 +526,12 @@ def status(name, sig=None):
|
|||
results = {}
|
||||
for service in services:
|
||||
if _service_is_upstart(service):
|
||||
cmd = "status {}".format(service)
|
||||
cmd = f"status {service}"
|
||||
results[service] = "start/running" in __salt__["cmd.run"](
|
||||
cmd, python_shell=False
|
||||
)
|
||||
else:
|
||||
cmd = "/sbin/service {} status".format(service)
|
||||
cmd = f"/sbin/service {service} status"
|
||||
results[service] = (
|
||||
__salt__["cmd.retcode"](cmd, python_shell=False, ignore_retcode=True)
|
||||
== 0
|
||||
|
@ -545,7 +545,7 @@ def delete(name, **kwargs):
|
|||
"""
|
||||
Delete the named service
|
||||
|
||||
.. versionadded:: 2016.3
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
|
|
@ -135,22 +135,22 @@ def setenforce(mode):
|
|||
if isinstance(mode, str):
|
||||
if mode.lower() == "enforcing":
|
||||
mode = "1"
|
||||
modestring = "Enforcing"
|
||||
modestring = "enforcing"
|
||||
elif mode.lower() == "permissive":
|
||||
mode = "0"
|
||||
modestring = "Permissive"
|
||||
modestring = "permissive"
|
||||
elif mode.lower() == "disabled":
|
||||
mode = "0"
|
||||
modestring = "Disabled"
|
||||
modestring = "disabled"
|
||||
else:
|
||||
return "Invalid mode {}".format(mode)
|
||||
return f"Invalid mode {mode}"
|
||||
elif isinstance(mode, int):
|
||||
if mode:
|
||||
mode = "1"
|
||||
else:
|
||||
mode = "0"
|
||||
else:
|
||||
return "Invalid mode {}".format(mode)
|
||||
return f"Invalid mode {mode}"
|
||||
|
||||
# enforce file does not exist if currently disabled. Only for toggling enforcing/permissive
|
||||
if getenforce() != "Disabled":
|
||||
|
@ -204,9 +204,9 @@ def setsebool(boolean, value, persist=False):
|
|||
salt '*' selinux.setsebool virt_use_usb off
|
||||
"""
|
||||
if persist:
|
||||
cmd = "setsebool -P {} {}".format(boolean, value)
|
||||
cmd = f"setsebool -P {boolean} {value}"
|
||||
else:
|
||||
cmd = "setsebool {} {}".format(boolean, value)
|
||||
cmd = f"setsebool {boolean} {value}"
|
||||
return not __salt__["cmd.retcode"](cmd, python_shell=False)
|
||||
|
||||
|
||||
|
@ -227,7 +227,7 @@ def setsebools(pairs, persist=False):
|
|||
else:
|
||||
cmd = "setsebool "
|
||||
for boolean, value in pairs.items():
|
||||
cmd = "{} {}={}".format(cmd, boolean, value)
|
||||
cmd = f"{cmd} {boolean}={value}"
|
||||
return not __salt__["cmd.retcode"](cmd, python_shell=False)
|
||||
|
||||
|
||||
|
@ -284,9 +284,9 @@ def setsemod(module, state):
|
|||
.. versionadded:: 2016.3.0
|
||||
"""
|
||||
if state.lower() == "enabled":
|
||||
cmd = "semodule -e {}".format(module)
|
||||
cmd = f"semodule -e {module}"
|
||||
elif state.lower() == "disabled":
|
||||
cmd = "semodule -d {}".format(module)
|
||||
cmd = f"semodule -d {module}"
|
||||
return not __salt__["cmd.retcode"](cmd)
|
||||
|
||||
|
||||
|
@ -304,7 +304,7 @@ def install_semod(module_path):
|
|||
"""
|
||||
if module_path.find("salt://") == 0:
|
||||
module_path = __salt__["cp.cache_file"](module_path)
|
||||
cmd = "semodule -i {}".format(module_path)
|
||||
cmd = f"semodule -i {module_path}"
|
||||
return not __salt__["cmd.retcode"](cmd)
|
||||
|
||||
|
||||
|
@ -320,7 +320,7 @@ def remove_semod(module):
|
|||
|
||||
.. versionadded:: 2016.11.6
|
||||
"""
|
||||
cmd = "semodule -r {}".format(module)
|
||||
cmd = f"semodule -r {module}"
|
||||
return not __salt__["cmd.retcode"](cmd)
|
||||
|
||||
|
||||
|
@ -376,7 +376,7 @@ def _validate_filetype(filetype):
|
|||
specification. Throws an SaltInvocationError if it isn't.
|
||||
"""
|
||||
if filetype not in _SELINUX_FILETYPES.keys():
|
||||
raise SaltInvocationError("Invalid filetype given: {}".format(filetype))
|
||||
raise SaltInvocationError(f"Invalid filetype given: {filetype}")
|
||||
return True
|
||||
|
||||
|
||||
|
@ -394,7 +394,7 @@ def _parse_protocol_port(name, protocol, port):
|
|||
protocol_port_pattern = r"^(tcp|udp)\/(([\d]+)\-?[\d]+)$"
|
||||
name_parts = re.match(protocol_port_pattern, name)
|
||||
if not name_parts:
|
||||
name_parts = re.match(protocol_port_pattern, "{}/{}".format(protocol, port))
|
||||
name_parts = re.match(protocol_port_pattern, f"{protocol}/{port}")
|
||||
if not name_parts:
|
||||
raise SaltInvocationError(
|
||||
'Invalid name "{}" format and protocol and port not provided or invalid:'
|
||||
|
@ -609,20 +609,20 @@ def _fcontext_add_or_delete_policy(
|
|||
"""
|
||||
if action not in ["add", "delete"]:
|
||||
raise SaltInvocationError(
|
||||
'Actions supported are "add" and "delete", not "{}".'.format(action)
|
||||
f'Actions supported are "add" and "delete", not "{action}".'
|
||||
)
|
||||
cmd = "semanage fcontext --{}".format(action)
|
||||
cmd = f"semanage fcontext --{action}"
|
||||
# "semanage --ftype a" isn't valid on Centos 6,
|
||||
# don't pass --ftype since "a" is the default filetype.
|
||||
if filetype is not None and filetype != "a":
|
||||
_validate_filetype(filetype)
|
||||
cmd += " --ftype {}".format(filetype)
|
||||
cmd += f" --ftype {filetype}"
|
||||
if sel_type is not None:
|
||||
cmd += " --type {}".format(sel_type)
|
||||
cmd += f" --type {sel_type}"
|
||||
if sel_user is not None:
|
||||
cmd += " --seuser {}".format(sel_user)
|
||||
cmd += f" --seuser {sel_user}"
|
||||
if sel_level is not None:
|
||||
cmd += " --range {}".format(sel_level)
|
||||
cmd += f" --range {sel_level}"
|
||||
cmd += " " + re.escape(name)
|
||||
return __salt__["cmd.run_all"](cmd)
|
||||
|
||||
|
@ -841,15 +841,15 @@ def _port_add_or_delete_policy(
|
|||
"""
|
||||
if action not in ["add", "delete"]:
|
||||
raise SaltInvocationError(
|
||||
'Actions supported are "add" and "delete", not "{}".'.format(action)
|
||||
f'Actions supported are "add" and "delete", not "{action}".'
|
||||
)
|
||||
if action == "add" and not sel_type:
|
||||
raise SaltInvocationError("SELinux Type is required to add a policy")
|
||||
(protocol, port) = _parse_protocol_port(name, protocol, port)
|
||||
cmd = "semanage port --{} --proto {}".format(action, protocol)
|
||||
cmd = f"semanage port --{action} --proto {protocol}"
|
||||
if sel_type:
|
||||
cmd += " --type {}".format(sel_type)
|
||||
cmd += f" --type {sel_type}"
|
||||
if sel_range:
|
||||
cmd += " --range {}".format(sel_range)
|
||||
cmd += " {}".format(port)
|
||||
cmd += f" --range {sel_range}"
|
||||
cmd += f" {port}"
|
||||
return __salt__["cmd.run_all"](cmd)
|
||||
|
|
|
@ -546,7 +546,7 @@ def list_state_functions(*args, **kwargs): # pylint: disable=unused-argument
|
|||
salt '*' sys.list_state_functions 'file.*'
|
||||
salt '*' sys.list_state_functions 'file.s*'
|
||||
|
||||
.. versionadded:: 2016.9
|
||||
.. versionadded:: 2016.9.0
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
|
|
@ -4650,7 +4650,7 @@ class _policy_info:
|
|||
"""
|
||||
add quotes around the string
|
||||
"""
|
||||
return '"{}"'.format(val)
|
||||
return f'"{val}"'
|
||||
|
||||
@classmethod
|
||||
def _binary_enable_zero_disable_one_conversion(cls, val, **kwargs):
|
||||
|
@ -4664,7 +4664,7 @@ class _policy_info:
|
|||
elif ord(val) == 1:
|
||||
return "Enabled"
|
||||
else:
|
||||
return "Invalid Value: {!r}".format(val)
|
||||
return f"Invalid Value: {val!r}"
|
||||
else:
|
||||
return "Not Defined"
|
||||
except TypeError:
|
||||
|
@ -4806,9 +4806,9 @@ class _policy_info:
|
|||
try:
|
||||
userSid = win32security.LookupAccountSid("", _sid)
|
||||
if userSid[1]:
|
||||
userSid = "{1}\\{0}".format(userSid[0], userSid[1])
|
||||
userSid = f"{userSid[1]}\\{userSid[0]}"
|
||||
else:
|
||||
userSid = "{}".format(userSid[0])
|
||||
userSid = f"{userSid[0]}"
|
||||
# TODO: This needs to be more specific
|
||||
except Exception: # pylint: disable=broad-except
|
||||
userSid = win32security.ConvertSidToStringSid(_sid)
|
||||
|
@ -5000,7 +5000,7 @@ def _updateNamespace(item, new_namespace):
|
|||
temp_item = item.tag[i + 1 :]
|
||||
else:
|
||||
temp_item = item.tag
|
||||
item.tag = "{{{0}}}{1}".format(new_namespace, temp_item)
|
||||
item.tag = f"{{{new_namespace}}}{temp_item}"
|
||||
for child in item.getiterator():
|
||||
if isinstance(child.tag, str):
|
||||
temp_item = ""
|
||||
|
@ -5009,7 +5009,7 @@ def _updateNamespace(item, new_namespace):
|
|||
temp_item = child.tag[i + 1 :]
|
||||
else:
|
||||
temp_item = child.tag
|
||||
child.tag = "{{{0}}}{1}".format(new_namespace, temp_item)
|
||||
child.tag = f"{{{new_namespace}}}{temp_item}"
|
||||
return item
|
||||
|
||||
|
||||
|
@ -5077,10 +5077,10 @@ def _parse_xml(adm_file):
|
|||
|
||||
modified_xml = ""
|
||||
with salt.utils.files.fopen(adm_file, "rb") as rfh:
|
||||
file_hash = "{:X}".format(zlib.crc32(rfh.read()) & 0xFFFFFFFF)
|
||||
file_hash = f"{zlib.crc32(rfh.read()) & 0xFFFFFFFF:X}"
|
||||
|
||||
name, ext = os.path.splitext(os.path.basename(adm_file))
|
||||
hashed_filename = "{}-{}{}".format(name, file_hash, ext)
|
||||
hashed_filename = f"{name}-{file_hash}{ext}"
|
||||
|
||||
cache_dir = os.path.join(__opts__["cachedir"], "lgpo", "policy_defs")
|
||||
if not os.path.exists(cache_dir):
|
||||
|
@ -5092,7 +5092,7 @@ def _parse_xml(adm_file):
|
|||
log.debug("LGPO: Generating policy template cache for %s%s", name, ext)
|
||||
|
||||
# Remove old files, keep the cache clean
|
||||
file_list = glob.glob(os.path.join(cache_dir, "{}*{}".format(name, ext)))
|
||||
file_list = glob.glob(os.path.join(cache_dir, f"{name}*{ext}"))
|
||||
for file_path in file_list:
|
||||
os.remove(file_path)
|
||||
|
||||
|
@ -5650,7 +5650,7 @@ def _set_advaudit_value(option, value):
|
|||
"""
|
||||
# Set the values in both audit.csv files
|
||||
if not _set_advaudit_file_data(option=option, value=value):
|
||||
raise CommandExecutionError("Failed to set audit.csv option: {}".format(option))
|
||||
raise CommandExecutionError(f"Failed to set audit.csv option: {option}")
|
||||
# Apply the settings locally
|
||||
if not _set_advaudit_pol_data(option=option, value=value):
|
||||
# Only log this error, it will be in effect the next time the machine
|
||||
|
@ -5695,7 +5695,7 @@ def _get_netsh_value(profile, option):
|
|||
|
||||
def _set_netsh_value(profile, section, option, value):
|
||||
if section not in ("firewallpolicy", "settings", "logging", "state"):
|
||||
raise ValueError("LGPO: Invalid section: {}".format(section))
|
||||
raise ValueError(f"LGPO: Invalid section: {section}")
|
||||
log.trace(
|
||||
"LGPO: Setting the following\nProfile: %s\nSection: %s\nOption: %s\nValue: %s",
|
||||
profile,
|
||||
|
@ -5739,7 +5739,7 @@ def _load_secedit_data():
|
|||
Returns:
|
||||
str: The contents of the file generated by the secedit command
|
||||
"""
|
||||
f_exp = os.path.join(__opts__["cachedir"], "secedit-{}.txt".format(UUID))
|
||||
f_exp = os.path.join(__opts__["cachedir"], f"secedit-{UUID}.txt")
|
||||
try:
|
||||
__salt__["cmd.run"](["secedit", "/export", "/cfg", f_exp])
|
||||
with salt.utils.files.fopen(f_exp, encoding="utf-16") as fp:
|
||||
|
@ -5789,7 +5789,7 @@ def _write_secedit_data(inf_data):
|
|||
# Set file names
|
||||
# The database must persist in order for the settings to remain in effect
|
||||
f_sdb = os.path.join(os.getenv("WINDIR"), "security", "database", "salt.sdb")
|
||||
f_inf = os.path.join(__opts__["cachedir"], "secedit-{}.inf".format(UUID))
|
||||
f_inf = os.path.join(__opts__["cachedir"], f"secedit-{UUID}.inf")
|
||||
|
||||
try:
|
||||
# Write the changes to the inf file
|
||||
|
@ -5949,9 +5949,7 @@ def _getAdmlPresentationRefId(adml_data, ref_id):
|
|||
"""
|
||||
helper function to check for a presentation label for a policy element
|
||||
"""
|
||||
search_results = adml_data.xpath(
|
||||
'//*[@*[local-name() = "refId"] = "{}"]'.format(ref_id)
|
||||
)
|
||||
search_results = adml_data.xpath(f'//*[@*[local-name() = "refId"] = "{ref_id}"]')
|
||||
alternate_label = ""
|
||||
if search_results:
|
||||
for result in search_results:
|
||||
|
@ -6217,7 +6215,7 @@ def _encode_string(value):
|
|||
elif not isinstance(value, str):
|
||||
# Should we raise an error here, or attempt to cast to a string
|
||||
raise TypeError(
|
||||
"Value {} is not a string type\nType: {}".format(repr(value), type(value))
|
||||
f"Value {repr(value)} is not a string type\nType: {type(value)}"
|
||||
)
|
||||
return b"".join([value.encode("utf-16-le"), encoded_null])
|
||||
|
||||
|
@ -6258,7 +6256,7 @@ def _buildKnownDataSearchString(
|
|||
encoded_semicolon,
|
||||
chr(registry.vtype[reg_vtype]).encode("utf-32-le"),
|
||||
encoded_semicolon,
|
||||
chr(len(" {}".format(chr(0)).encode("utf-16-le"))).encode("utf-32-le"),
|
||||
chr(len(f" {chr(0)}".encode("utf-16-le"))).encode("utf-32-le"),
|
||||
encoded_semicolon,
|
||||
" ".encode("utf-16-le"),
|
||||
encoded_null,
|
||||
|
@ -6438,7 +6436,7 @@ def _processValueItem(
|
|||
encoded_semicolon,
|
||||
chr(registry.vtype[this_vtype]).encode("utf-32-le"),
|
||||
encoded_semicolon,
|
||||
chr(len(" {}".format(chr(0)).encode("utf-16-le"))).encode(
|
||||
chr(len(f" {chr(0)}".encode("utf-16-le"))).encode(
|
||||
"utf-32-le"
|
||||
),
|
||||
encoded_semicolon,
|
||||
|
@ -6493,7 +6491,7 @@ def _processValueItem(
|
|||
encoded_semicolon,
|
||||
chr(
|
||||
len(
|
||||
"{}{}".format(element_values[i], chr(0)).encode(
|
||||
f"{element_values[i]}{chr(0)}".encode(
|
||||
"utf-16-le"
|
||||
)
|
||||
)
|
||||
|
@ -6524,9 +6522,7 @@ def _processValueItem(
|
|||
encoded_semicolon,
|
||||
chr(registry.vtype[this_vtype]).encode("utf-32-le"),
|
||||
encoded_semicolon,
|
||||
chr(len(" {}".format(chr(0)).encode("utf-16-le"))).encode(
|
||||
"utf-32-le"
|
||||
),
|
||||
chr(len(f" {chr(0)}".encode("utf-16-le"))).encode("utf-32-le"),
|
||||
encoded_semicolon,
|
||||
" ".encode("utf-16-le"),
|
||||
encoded_null,
|
||||
|
@ -6590,9 +6586,7 @@ def _processValueItem(
|
|||
encoded_semicolon,
|
||||
chr(registry.vtype[this_vtype]).encode("utf-32-le"),
|
||||
encoded_semicolon,
|
||||
chr(len(" {}".format(chr(0)).encode("utf-16-le"))).encode(
|
||||
"utf-32-le"
|
||||
),
|
||||
chr(len(f" {chr(0)}".encode("utf-16-le"))).encode("utf-32-le"),
|
||||
encoded_semicolon,
|
||||
" ".encode("utf-16-le"),
|
||||
encoded_null,
|
||||
|
@ -6644,10 +6638,10 @@ def _checkAllAdmxPolicies(
|
|||
if policy_file_data:
|
||||
log.trace("POLICY CLASS %s has file data", policy_class)
|
||||
policy_filedata_split = re.sub(
|
||||
salt.utils.stringutils.to_bytes(r"\]{}$".format(chr(0))),
|
||||
salt.utils.stringutils.to_bytes(rf"\]{chr(0)}$"),
|
||||
b"",
|
||||
re.sub(
|
||||
salt.utils.stringutils.to_bytes(r"^\[{}".format(chr(0))),
|
||||
salt.utils.stringutils.to_bytes(rf"^\[{chr(0)}"),
|
||||
b"",
|
||||
re.sub(
|
||||
re.escape(REG_POL_HEADER.encode("utf-16-le")),
|
||||
|
@ -6661,7 +6655,7 @@ def _checkAllAdmxPolicies(
|
|||
# Get the policy for each item defined in Registry.pol
|
||||
for policy_item in policy_filedata_split:
|
||||
policy_item_key = (
|
||||
policy_item.split("{};".format(chr(0)).encode("utf-16-le"))[0]
|
||||
policy_item.split(f"{chr(0)};".encode("utf-16-le"))[0]
|
||||
.decode("utf-16-le")
|
||||
.lower()
|
||||
)
|
||||
|
@ -6927,7 +6921,7 @@ def _checkAllAdmxPolicies(
|
|||
|
||||
if etree.QName(child_item).localname == "boolean":
|
||||
# https://msdn.microsoft.com/en-us/library/dn605978(v=vs.85).aspx
|
||||
if child_item is not None:
|
||||
if len(child_item) > 0:
|
||||
if (
|
||||
TRUE_VALUE_XPATH(child_item)
|
||||
and this_element_name not in configured_elements
|
||||
|
@ -7424,7 +7418,7 @@ def _build_parent_list(policy_definition, return_full_policy_names, adml_languag
|
|||
parent_list = []
|
||||
policy_namespace = next(iter(policy_definition.nsmap))
|
||||
parent_category = policy_definition.xpath(
|
||||
"{}:parentCategory/@ref".format(policy_namespace),
|
||||
f"{policy_namespace}:parentCategory/@ref",
|
||||
namespaces=policy_definition.nsmap,
|
||||
)
|
||||
admx_policy_definitions = _get_policy_definitions(language=adml_language)
|
||||
|
@ -7495,14 +7489,14 @@ def _admx_policy_parent_walk(
|
|||
)
|
||||
path.append(this_parent_name)
|
||||
if tparent_category.xpath(
|
||||
"{}:parentCategory/@ref".format(policy_namespace), namespaces=policy_nsmap
|
||||
f"{policy_namespace}:parentCategory/@ref", namespaces=policy_nsmap
|
||||
):
|
||||
# parent has a parent
|
||||
path = _admx_policy_parent_walk(
|
||||
path=path,
|
||||
policy_namespace=policy_namespace,
|
||||
parent_category=tparent_category.xpath(
|
||||
"{}:parentCategory/@ref".format(policy_namespace),
|
||||
f"{policy_namespace}:parentCategory/@ref",
|
||||
namespaces=policy_nsmap,
|
||||
)[0],
|
||||
policy_nsmap=policy_nsmap,
|
||||
|
@ -8534,7 +8528,7 @@ def _lookup_admin_template(policy_name, policy_class, adml_language="en-US"):
|
|||
False,
|
||||
None,
|
||||
[],
|
||||
"Unable to find {} policy {}".format(policy_class, policy_name),
|
||||
f"Unable to find {policy_class} policy {policy_name}",
|
||||
)
|
||||
|
||||
|
||||
|
@ -9195,7 +9189,7 @@ def _get_policy_adm_setting(
|
|||
)
|
||||
if etree.QName(child_item).localname == "boolean":
|
||||
# https://msdn.microsoft.com/en-us/library/dn605978(v=vs.85).aspx
|
||||
if child_item is not None:
|
||||
if len(child_item) > 0:
|
||||
if (
|
||||
TRUE_VALUE_XPATH(child_item)
|
||||
and this_element_name not in configured_elements
|
||||
|
|
|
@ -341,22 +341,22 @@ def set_value(
|
|||
"REG_SZ",
|
||||
]
|
||||
if v_type not in valid_types:
|
||||
msg = "Invalid type: {}".format(v_type)
|
||||
msg = f"Invalid type: {v_type}"
|
||||
raise SaltInvocationError(msg)
|
||||
|
||||
if v_type in ["REG_SZ", "REG_EXPAND_SZ"]:
|
||||
if not isinstance(v_data, str):
|
||||
msg = "{} data must be a string".format(v_type)
|
||||
msg = f"{v_type} data must be a string"
|
||||
raise SaltInvocationError(msg)
|
||||
elif v_type == "REG_MULTI_SZ":
|
||||
if not isinstance(v_data, list):
|
||||
msg = "{} data must be a list".format(v_type)
|
||||
msg = f"{v_type} data must be a list"
|
||||
raise SaltInvocationError(msg)
|
||||
elif v_type in ["REG_DWORD", "REG_QWORD"]:
|
||||
try:
|
||||
int(v_data)
|
||||
except (TypeError, ValueError):
|
||||
msg = "{} data must be an integer".format(v_type)
|
||||
msg = f"{v_type} data must be an integer"
|
||||
raise SaltInvocationError(msg)
|
||||
|
||||
pol_data = read_reg_pol(policy_class=policy_class)
|
||||
|
@ -367,29 +367,40 @@ def set_value(
|
|||
if key.lower() == p_key.lower():
|
||||
found_key = p_key
|
||||
for p_name in pol_data[p_key]:
|
||||
if v_name.lower() in p_name.lower():
|
||||
if v_name.lower() == p_name.lower().lstrip("**del."):
|
||||
found_name = p_name
|
||||
|
||||
if found_key:
|
||||
if found_name:
|
||||
if "**del." in found_name:
|
||||
log.debug(f"LGPO_REG Mod: Found disabled name: {found_name}")
|
||||
pol_data[found_key][v_name] = pol_data[found_key].pop(found_name)
|
||||
found_name = v_name
|
||||
log.debug(f"LGPO_REG Mod: Updating value: {found_name}")
|
||||
pol_data[found_key][found_name] = {"data": v_data, "type": v_type}
|
||||
else:
|
||||
log.debug(f"LGPO_REG Mod: Setting new value: {found_name}")
|
||||
pol_data[found_key][v_name] = {"data": v_data, "type": v_type}
|
||||
else:
|
||||
log.debug(f"LGPO_REG Mod: Adding new key and value: {found_name}")
|
||||
pol_data[key] = {v_name: {"data": v_data, "type": v_type}}
|
||||
|
||||
write_reg_pol(pol_data, policy_class=policy_class)
|
||||
success = True
|
||||
if not write_reg_pol(pol_data, policy_class=policy_class):
|
||||
log.error("LGPO_REG Mod: Failed to write registry.pol file")
|
||||
success = False
|
||||
|
||||
return salt.utils.win_reg.set_value(
|
||||
if not salt.utils.win_reg.set_value(
|
||||
hive=hive,
|
||||
key=key,
|
||||
vname=v_name,
|
||||
vdata=v_data,
|
||||
vtype=v_type,
|
||||
)
|
||||
):
|
||||
log.error("LGPO_REG Mod: Failed to set registry entry")
|
||||
success = False
|
||||
|
||||
return success
|
||||
|
||||
|
||||
def disable_value(key, v_name, policy_class="machine"):
|
||||
|
@ -445,28 +456,42 @@ def disable_value(key, v_name, policy_class="machine"):
|
|||
if key.lower() == p_key.lower():
|
||||
found_key = p_key
|
||||
for p_name in pol_data[p_key]:
|
||||
if v_name.lower() in p_name.lower():
|
||||
if v_name.lower() == p_name.lower().lstrip("**del."):
|
||||
found_name = p_name
|
||||
|
||||
if found_key:
|
||||
if found_name:
|
||||
if "**del." in found_name:
|
||||
# Already set to delete... do nothing
|
||||
log.debug(f"LGPO_REG Mod: Already disabled: {v_name}")
|
||||
return None
|
||||
log.debug(f"LGPO_REG Mod: Disabling value name: {v_name}")
|
||||
pol_data[found_key].pop(found_name)
|
||||
found_name = "**del.{}".format(found_name)
|
||||
found_name = f"**del.{found_name}"
|
||||
pol_data[found_key][found_name] = {"data": " ", "type": "REG_SZ"}
|
||||
else:
|
||||
pol_data[found_key]["**del.{}".format(v_name)] = {
|
||||
log.debug(f"LGPO_REG Mod: Setting new disabled value name: {v_name}")
|
||||
pol_data[found_key][f"**del.{v_name}"] = {
|
||||
"data": " ",
|
||||
"type": "REG_SZ",
|
||||
}
|
||||
else:
|
||||
pol_data[key] = {"**del.{}".format(v_name): {"data": " ", "type": "REG_SZ"}}
|
||||
log.debug(f"LGPO_REG Mod: Adding new key and disabled value name: {found_name}")
|
||||
pol_data[key] = {f"**del.{v_name}": {"data": " ", "type": "REG_SZ"}}
|
||||
|
||||
write_reg_pol(pol_data, policy_class=policy_class)
|
||||
success = True
|
||||
if not write_reg_pol(pol_data, policy_class=policy_class):
|
||||
log.error("LGPO_REG Mod: Failed to write registry.pol file")
|
||||
success = False
|
||||
|
||||
return salt.utils.win_reg.delete_value(hive=hive, key=key, vname=v_name)
|
||||
ret = salt.utils.win_reg.delete_value(hive=hive, key=key, vname=v_name)
|
||||
if not ret:
|
||||
if ret is None:
|
||||
log.debug("LGPO_REG Mod: Registry key/value already missing")
|
||||
else:
|
||||
log.error("LGPO_REG Mod: Failed to remove registry entry")
|
||||
success = False
|
||||
|
||||
return success
|
||||
|
||||
|
||||
def delete_value(key, v_name, policy_class="Machine"):
|
||||
|
@ -523,20 +548,37 @@ def delete_value(key, v_name, policy_class="Machine"):
|
|||
if key.lower() == p_key.lower():
|
||||
found_key = p_key
|
||||
for p_name in pol_data[p_key]:
|
||||
if v_name.lower() in p_name.lower():
|
||||
if v_name.lower() == p_name.lower().lstrip("**del."):
|
||||
found_name = p_name
|
||||
|
||||
if found_key:
|
||||
if found_name:
|
||||
log.debug(f"LGPO_REG Mod: Removing value name: {found_name}")
|
||||
pol_data[found_key].pop(found_name)
|
||||
else:
|
||||
log.debug(f"LGPO_REG Mod: Value name not found: {v_name}")
|
||||
return None
|
||||
if len(pol_data[found_key]) == 0:
|
||||
log.debug(f"LGPO_REG Mod: Removing empty key: {found_key}")
|
||||
pol_data.pop(found_key)
|
||||
else:
|
||||
log.debug(f"LGPO_REG Mod: Key not found: {key}")
|
||||
return None
|
||||
|
||||
write_reg_pol(pol_data, policy_class=policy_class)
|
||||
success = True
|
||||
if not write_reg_pol(pol_data, policy_class=policy_class):
|
||||
log.error("LGPO_REG Mod: Failed to write registry.pol file")
|
||||
success = False
|
||||
|
||||
return salt.utils.win_reg.delete_value(hive=hive, key=key, vname=v_name)
|
||||
ret = salt.utils.win_reg.delete_value(hive=hive, key=key, vname=v_name)
|
||||
if not ret:
|
||||
if ret is None:
|
||||
log.debug("LGPO_REG Mod: Registry key/value already missing")
|
||||
else:
|
||||
log.error("LGPO_REG Mod: Failed to remove registry entry")
|
||||
success = False
|
||||
|
||||
return success
|
||||
|
||||
|
||||
# This is for testing different settings and verifying that we are writing the
|
||||
|
|
|
@ -509,15 +509,15 @@ def get_system_info():
|
|||
def byte_calc(val):
|
||||
val = float(val)
|
||||
if val < 2**10:
|
||||
return "{:.3f}B".format(val)
|
||||
return f"{val:.3f}B"
|
||||
elif val < 2**20:
|
||||
return "{:.3f}KB".format(val / 2**10)
|
||||
return f"{val / 2**10:.3f}KB"
|
||||
elif val < 2**30:
|
||||
return "{:.3f}MB".format(val / 2**20)
|
||||
return f"{val / 2**20:.3f}MB"
|
||||
elif val < 2**40:
|
||||
return "{:.3f}GB".format(val / 2**30)
|
||||
return f"{val / 2**30:.3f}GB"
|
||||
else:
|
||||
return "{:.3f}TB".format(val / 2**40)
|
||||
return f"{val / 2**40:.3f}TB"
|
||||
|
||||
# Lookup dicts for Win32_OperatingSystem
|
||||
os_type = {1: "Work Station", 2: "Domain Controller", 3: "Server"}
|
||||
|
@ -755,7 +755,7 @@ def join_domain(
|
|||
``True`` will restart the computer after a successful join. Default
|
||||
is ``False``
|
||||
|
||||
.. versionadded:: 2015.8.2/2015.5.7
|
||||
.. versionadded:: 2015.5.7,2015.8.2
|
||||
|
||||
Returns:
|
||||
dict: Returns a dictionary if successful, otherwise ``False``
|
||||
|
@ -772,10 +772,10 @@ def join_domain(
|
|||
status = get_domain_workgroup()
|
||||
if "Domain" in status:
|
||||
if status["Domain"] == domain:
|
||||
return "Already joined to {}".format(domain)
|
||||
return f"Already joined to {domain}"
|
||||
|
||||
if username and "\\" not in username and "@" not in username:
|
||||
username = "{}@{}".format(username, domain)
|
||||
username = f"{username}@{domain}"
|
||||
|
||||
if username and password is None:
|
||||
return "Must specify a password if you pass a username"
|
||||
|
@ -889,7 +889,7 @@ def unjoin_domain(
|
|||
workgroup (str):
|
||||
The workgroup to join the computer to. Default is ``WORKGROUP``
|
||||
|
||||
.. versionadded:: 2015.8.2/2015.5.7
|
||||
.. versionadded:: 2015.5.7,2015.8.2
|
||||
|
||||
disable (bool):
|
||||
``True`` to disable the computer account in Active Directory.
|
||||
|
@ -899,7 +899,7 @@ def unjoin_domain(
|
|||
``True`` will restart the computer after successful unjoin. Default
|
||||
is ``False``
|
||||
|
||||
.. versionadded:: 2015.8.2/2015.5.7
|
||||
.. versionadded:: 2015.5.7,2015.8.2
|
||||
|
||||
Returns:
|
||||
dict: Returns a dictionary if successful, otherwise ``False``
|
||||
|
@ -918,11 +918,11 @@ def unjoin_domain(
|
|||
status = get_domain_workgroup()
|
||||
if "Workgroup" in status:
|
||||
if status["Workgroup"] == workgroup:
|
||||
return "Already joined to {}".format(workgroup)
|
||||
return f"Already joined to {workgroup}"
|
||||
|
||||
if username and "\\" not in username and "@" not in username:
|
||||
if domain:
|
||||
username = "{}@{}".format(username, domain)
|
||||
username = f"{username}@{domain}"
|
||||
else:
|
||||
return "Must specify domain if not supplied in username"
|
||||
|
||||
|
@ -1060,7 +1060,7 @@ def get_system_time():
|
|||
elif hours > 12:
|
||||
hours = hours - 12
|
||||
meridian = "PM"
|
||||
return "{:02d}:{:02d}:{:02d} {}".format(hours, now[5], now[6], meridian)
|
||||
return f"{hours:02d}:{now[5]:02d}:{now[6]:02d} {meridian}"
|
||||
|
||||
|
||||
def set_system_time(newtime):
|
||||
|
@ -1199,7 +1199,7 @@ def get_system_date():
|
|||
salt '*' system.get_system_date
|
||||
"""
|
||||
now = win32api.GetLocalTime()
|
||||
return "{:02d}/{:02d}/{:04d}".format(now[1], now[3], now[0])
|
||||
return f"{now[1]:02d}/{now[3]:02d}/{now[0]:04d}"
|
||||
|
||||
|
||||
def set_system_date(newdate):
|
||||
|
|
|
@ -110,7 +110,7 @@ def _strip_headers(output, *args):
|
|||
def _get_copr_repo(copr):
|
||||
copr = copr.split(":", 1)[1]
|
||||
copr = copr.split("/", 1)
|
||||
return "copr:copr.fedorainfracloud.org:{}:{}".format(copr[0], copr[1])
|
||||
return f"copr:copr.fedorainfracloud.org:{copr[0]}:{copr[1]}"
|
||||
|
||||
|
||||
def _get_hold(line, pattern=__HOLD_PATTERN, full=True):
|
||||
|
@ -123,14 +123,14 @@ def _get_hold(line, pattern=__HOLD_PATTERN, full=True):
|
|||
"""
|
||||
if full:
|
||||
if _yum() == "dnf":
|
||||
lock_re = r"({}-\S+)".format(pattern)
|
||||
lock_re = rf"({pattern}-\S+)"
|
||||
else:
|
||||
lock_re = r"(\d+:{}-\S+)".format(pattern)
|
||||
lock_re = rf"(\d+:{pattern}-\S+)"
|
||||
else:
|
||||
if _yum() == "dnf":
|
||||
lock_re = r"({}-\S+)".format(pattern)
|
||||
lock_re = rf"({pattern}-\S+)"
|
||||
else:
|
||||
lock_re = r"\d+:({}-\S+)".format(pattern)
|
||||
lock_re = rf"\d+:({pattern}-\S+)"
|
||||
|
||||
match = re.search(lock_re, line)
|
||||
if match:
|
||||
|
@ -271,9 +271,7 @@ def _check_versionlock():
|
|||
"""
|
||||
vl_plugin = _versionlock_pkg()
|
||||
if vl_plugin not in list_pkgs():
|
||||
raise SaltInvocationError(
|
||||
"Cannot proceed, {} is not installed.".format(vl_plugin)
|
||||
)
|
||||
raise SaltInvocationError(f"Cannot proceed, {vl_plugin} is not installed.")
|
||||
|
||||
|
||||
def _get_options(**kwargs):
|
||||
|
@ -303,26 +301,26 @@ def _get_options(**kwargs):
|
|||
|
||||
if fromrepo:
|
||||
log.info("Restricting to repo '%s'", fromrepo)
|
||||
ret.extend(["--disablerepo=*", "--enablerepo={}".format(fromrepo)])
|
||||
ret.extend(["--disablerepo=*", f"--enablerepo={fromrepo}"])
|
||||
else:
|
||||
if disablerepo:
|
||||
targets = (
|
||||
[disablerepo] if not isinstance(disablerepo, list) else disablerepo
|
||||
)
|
||||
log.info("Disabling repo(s): %s", ", ".join(targets))
|
||||
ret.extend(["--disablerepo={}".format(x) for x in targets])
|
||||
ret.extend([f"--disablerepo={x}" for x in targets])
|
||||
if enablerepo:
|
||||
targets = [enablerepo] if not isinstance(enablerepo, list) else enablerepo
|
||||
log.info("Enabling repo(s): %s", ", ".join(targets))
|
||||
ret.extend(["--enablerepo={}".format(x) for x in targets])
|
||||
ret.extend([f"--enablerepo={x}" for x in targets])
|
||||
|
||||
if disableexcludes:
|
||||
log.info("Disabling excludes for '%s'", disableexcludes)
|
||||
ret.append("--disableexcludes={}".format(disableexcludes))
|
||||
ret.append(f"--disableexcludes={disableexcludes}")
|
||||
|
||||
if branch:
|
||||
log.info("Adding branch '%s'", branch)
|
||||
ret.append("--branch={}".format(branch))
|
||||
ret.append(f"--branch={branch}")
|
||||
|
||||
for item in setopt:
|
||||
ret.extend(["--setopt", str(item)])
|
||||
|
@ -335,10 +333,10 @@ def _get_options(**kwargs):
|
|||
value = kwargs[key]
|
||||
if isinstance(value, str):
|
||||
log.info("Found extra option --%s=%s", key, value)
|
||||
ret.append("--{}={}".format(key, value))
|
||||
ret.append(f"--{key}={value}")
|
||||
elif value is True:
|
||||
log.info("Found extra option --%s", key)
|
||||
ret.append("--{}".format(key))
|
||||
ret.append(f"--{key}")
|
||||
if ret:
|
||||
log.info("Adding extra options: %s", ret)
|
||||
|
||||
|
@ -372,10 +370,10 @@ def _get_yum_config(strict_parser=True):
|
|||
for name, value in yb.conf.items():
|
||||
conf[name] = value
|
||||
except (AttributeError, yum.Errors.ConfigError) as exc:
|
||||
raise CommandExecutionError("Could not query yum config: {}".format(exc))
|
||||
raise CommandExecutionError(f"Could not query yum config: {exc}")
|
||||
except yum.Errors.YumBaseError as yum_base_error:
|
||||
raise CommandExecutionError(
|
||||
"Error accessing yum or rpmdb: {}".format(yum_base_error)
|
||||
f"Error accessing yum or rpmdb: {yum_base_error}"
|
||||
)
|
||||
else:
|
||||
# fall back to parsing the config ourselves
|
||||
|
@ -394,14 +392,14 @@ def _get_yum_config(strict_parser=True):
|
|||
|
||||
if not fn:
|
||||
raise CommandExecutionError(
|
||||
"No suitable yum config file found in: {}".format(paths)
|
||||
f"No suitable yum config file found in: {paths}"
|
||||
)
|
||||
|
||||
cp = configparser.ConfigParser(strict=strict_parser)
|
||||
try:
|
||||
cp.read(fn)
|
||||
except OSError as exc:
|
||||
raise CommandExecutionError("Unable to read from {}: {}".format(fn, exc))
|
||||
raise CommandExecutionError(f"Unable to read from {fn}: {exc}")
|
||||
|
||||
if cp.has_section("main"):
|
||||
for opt in cp.options("main"):
|
||||
|
@ -995,7 +993,7 @@ def list_repo_pkgs(*args, **kwargs):
|
|||
else:
|
||||
for repo in repos:
|
||||
if _yum() == "tdnf":
|
||||
cmd = ["--quiet", "--enablerepo={}".format(repo), "list"]
|
||||
cmd = ["--quiet", f"--enablerepo={repo}", "list"]
|
||||
else:
|
||||
cmd = [
|
||||
"--quiet",
|
||||
|
@ -1254,7 +1252,7 @@ def install(
|
|||
update_holds=False,
|
||||
saltenv="base",
|
||||
ignore_epoch=False,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
||||
|
@ -1468,7 +1466,7 @@ def install(
|
|||
sources,
|
||||
saltenv=saltenv,
|
||||
normalize=normalize and kwargs.get("split_arch", True),
|
||||
**kwargs
|
||||
**kwargs,
|
||||
)
|
||||
except MinionError as exc:
|
||||
raise CommandExecutionError(exc)
|
||||
|
@ -1527,9 +1525,7 @@ def install(
|
|||
cur_patches = list_patches()
|
||||
for advisory_id in pkg_params:
|
||||
if advisory_id not in cur_patches:
|
||||
raise CommandExecutionError(
|
||||
'Advisory id "{}" not found'.format(advisory_id)
|
||||
)
|
||||
raise CommandExecutionError(f'Advisory id "{advisory_id}" not found')
|
||||
else:
|
||||
pkg_params_items.append(advisory_id)
|
||||
else:
|
||||
|
@ -1647,7 +1643,7 @@ def install(
|
|||
continue
|
||||
|
||||
if ignore_epoch is True:
|
||||
pkgstr = "{}-{}{}".format(pkgname, version_num, arch)
|
||||
pkgstr = f"{pkgname}-{version_num}{arch}"
|
||||
else:
|
||||
pkgstr = "{}-{}{}".format(
|
||||
pkgname, version_num.split(":", 1)[-1], arch
|
||||
|
@ -1771,7 +1767,7 @@ def install(
|
|||
with _temporarily_unhold(to_install, targets):
|
||||
if targets:
|
||||
if pkg_type == "advisory":
|
||||
targets = ["--advisory={}".format(t) for t in targets]
|
||||
targets = [f"--advisory={t}" for t in targets]
|
||||
cmd = ["-y"]
|
||||
if _yum() == "dnf":
|
||||
cmd.extend(["--best", "--allowerasing"])
|
||||
|
@ -1848,7 +1844,7 @@ def upgrade(
|
|||
minimal=False,
|
||||
obsoletes=True,
|
||||
diff_attr=None,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Run a full system upgrade (a ``yum upgrade`` or ``dnf upgrade``), or
|
||||
|
@ -1905,7 +1901,7 @@ def upgrade(
|
|||
Disable exclude from main, for a repo or for everything.
|
||||
(e.g., ``yum --disableexcludes='main'``)
|
||||
|
||||
.. versionadded:: 2014.7
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
name
|
||||
The name of the package to be upgraded. Note that this parameter is
|
||||
|
@ -2076,7 +2072,7 @@ def update(
|
|||
normalize=True,
|
||||
minimal=False,
|
||||
obsoletes=False,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
.. versionadded:: 2019.2.0
|
||||
|
@ -2322,7 +2318,7 @@ def hold(
|
|||
if target not in current_locks:
|
||||
if "test" in __opts__ and __opts__["test"]:
|
||||
ret[target].update(result=None)
|
||||
ret[target]["comment"] = "Package {} is set to be held.".format(target)
|
||||
ret[target]["comment"] = f"Package {target} is set to be held."
|
||||
else:
|
||||
out = _call_yum(["versionlock", target])
|
||||
if out["retcode"] == 0:
|
||||
|
@ -2415,7 +2411,7 @@ def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W06
|
|||
search_locks = [
|
||||
x
|
||||
for x in current_locks
|
||||
if fnmatch.fnmatch(x, "*{}*".format(target))
|
||||
if fnmatch.fnmatch(x, f"*{target}*")
|
||||
and target == _get_hold(x, full=False)
|
||||
]
|
||||
|
||||
|
@ -2437,16 +2433,16 @@ def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W06
|
|||
else:
|
||||
ret[target][
|
||||
"comment"
|
||||
] = "Package {} was unable to be unheld.".format(target)
|
||||
] = f"Package {target} was unable to be unheld."
|
||||
else:
|
||||
ret[target].update(result=True)
|
||||
ret[target]["comment"] = "Package {} is not being held.".format(target)
|
||||
ret[target]["comment"] = f"Package {target} is not being held."
|
||||
return ret
|
||||
|
||||
|
||||
def list_holds(pattern=__HOLD_PATTERN, full=True):
|
||||
r"""
|
||||
.. versionchanged:: 2016.3.0,2015.8.4,2015.5.10
|
||||
.. versionchanged:: 2015.5.10,2015.8.4,2016.3.0
|
||||
Function renamed from ``pkg.get_locked_pkgs`` to ``pkg.list_holds``.
|
||||
|
||||
List information on locked packages
|
||||
|
@ -2573,7 +2569,7 @@ def group_list():
|
|||
def group_info(name, expand=False, ignore_groups=None):
|
||||
"""
|
||||
.. versionadded:: 2014.1.0
|
||||
.. versionchanged:: 3001,2016.3.0,2015.8.4,2015.5.10
|
||||
.. versionchanged:: 2015.5.10,2015.8.4,2016.3.0,3001
|
||||
The return data has changed. A new key ``type`` has been added to
|
||||
distinguish environment groups from package groups. Also, keys for the
|
||||
group name and group ID have been added. The ``mandatory packages``,
|
||||
|
@ -2631,7 +2627,7 @@ def group_info(name, expand=False, ignore_groups=None):
|
|||
ret["group"] = g_info.get("environment group") or g_info.get("group")
|
||||
ret["id"] = g_info.get("environment-id") or g_info.get("group-id")
|
||||
if not ret["group"] and not ret["id"]:
|
||||
raise CommandExecutionError("Group '{}' not found".format(name))
|
||||
raise CommandExecutionError(f"Group '{name}' not found")
|
||||
|
||||
ret["description"] = g_info.get("description", "")
|
||||
|
||||
|
@ -2683,7 +2679,7 @@ def group_info(name, expand=False, ignore_groups=None):
|
|||
def group_diff(name):
|
||||
"""
|
||||
.. versionadded:: 2014.1.0
|
||||
.. versionchanged:: 2016.3.0,2015.8.4,2015.5.10
|
||||
.. versionchanged:: 2015.5.10,2015.8.4,2016.3.0
|
||||
Environment groups are now supported. The key names have been renamed,
|
||||
similar to the changes made in :py:func:`pkg.group_info
|
||||
<salt.modules.yumpkg.group_info>`.
|
||||
|
@ -2830,7 +2826,7 @@ def list_repos(basedir=None, **kwargs):
|
|||
if not os.path.exists(bdir):
|
||||
continue
|
||||
for repofile in os.listdir(bdir):
|
||||
repopath = "{}/{}".format(bdir, repofile)
|
||||
repopath = f"{bdir}/{repofile}"
|
||||
if not repofile.endswith(".repo"):
|
||||
continue
|
||||
filerepos = _parse_repo_file(repopath, strict_parser)[1]
|
||||
|
@ -2902,7 +2898,7 @@ def del_repo(repo, basedir=None, **kwargs): # pylint: disable=W0613
|
|||
repos = list_repos(basedirs, **kwargs)
|
||||
|
||||
if repo not in repos:
|
||||
return "Error: the {} repo does not exist in {}".format(repo, basedirs)
|
||||
return f"Error: the {repo} repo does not exist in {basedirs}"
|
||||
|
||||
# Find out what file the repo lives in
|
||||
repofile = ""
|
||||
|
@ -2921,7 +2917,7 @@ def del_repo(repo, basedir=None, **kwargs): # pylint: disable=W0613
|
|||
# If this is the only repo in the file, delete the file itself
|
||||
if onlyrepo:
|
||||
os.remove(repofile)
|
||||
return "File {} containing repo {} has been removed".format(repofile, repo)
|
||||
return f"File {repofile} containing repo {repo} has been removed"
|
||||
|
||||
# There must be other repos in this file, write the file with them
|
||||
header, filerepos = _parse_repo_file(repofile, strict_parser)
|
||||
|
@ -2935,20 +2931,20 @@ def del_repo(repo, basedir=None, **kwargs): # pylint: disable=W0613
|
|||
filerepos[stanza]["comments"]
|
||||
)
|
||||
del filerepos[stanza]["comments"]
|
||||
content += "\n[{}]".format(stanza)
|
||||
content += f"\n[{stanza}]"
|
||||
for line in filerepos[stanza]:
|
||||
# A whitespace is needed at the beginning of the new line in order
|
||||
# to avoid breaking multiple line values allowed on repo files.
|
||||
value = filerepos[stanza][line]
|
||||
if isinstance(value, str) and "\n" in value:
|
||||
value = "\n ".join(value.split("\n"))
|
||||
content += "\n{}={}".format(line, value)
|
||||
content += "\n{}\n".format(comments)
|
||||
content += f"\n{line}={value}"
|
||||
content += f"\n{comments}\n"
|
||||
|
||||
with salt.utils.files.fopen(repofile, "w") as fileout:
|
||||
fileout.write(salt.utils.stringutils.to_str(content))
|
||||
|
||||
return "Repo {} has been removed from {}".format(repo, repofile)
|
||||
return f"Repo {repo} has been removed from {repofile}"
|
||||
|
||||
|
||||
def mod_repo(repo, basedir=None, **kwargs):
|
||||
|
@ -3036,7 +3032,7 @@ def mod_repo(repo, basedir=None, **kwargs):
|
|||
"The repo does not exist and needs to be created, but none "
|
||||
"of the following basedir directories exist: {}".format(basedirs)
|
||||
)
|
||||
repofile = "{}/{}.repo".format(newdir, repo)
|
||||
repofile = f"{newdir}/{repo}.repo"
|
||||
if use_copr:
|
||||
# Is copr plugin installed?
|
||||
copr_plugin_name = ""
|
||||
|
@ -3047,7 +3043,7 @@ def mod_repo(repo, basedir=None, **kwargs):
|
|||
|
||||
if not __salt__["pkg_resource.version"](copr_plugin_name):
|
||||
raise SaltInvocationError(
|
||||
"{} must be installed to use COPR".format(copr_plugin_name)
|
||||
f"{copr_plugin_name} must be installed to use COPR"
|
||||
)
|
||||
|
||||
# Enable COPR
|
||||
|
@ -3064,7 +3060,7 @@ def mod_repo(repo, basedir=None, **kwargs):
|
|||
repofile = repos[repo]["file"]
|
||||
header, filerepos = _parse_repo_file(repofile, strict_parser)
|
||||
else:
|
||||
repofile = "{}/{}.repo".format(newdir, repo)
|
||||
repofile = f"{newdir}/{repo}.repo"
|
||||
|
||||
if "name" not in repo_opts:
|
||||
raise SaltInvocationError(
|
||||
|
@ -3108,7 +3104,7 @@ def mod_repo(repo, basedir=None, **kwargs):
|
|||
comments = salt.utils.pkg.rpm.combine_comments(
|
||||
filerepos[stanza].pop("comments", [])
|
||||
)
|
||||
content += "[{}]\n".format(stanza)
|
||||
content += f"[{stanza}]\n"
|
||||
for line in filerepos[stanza].keys():
|
||||
# A whitespace is needed at the beginning of the new line in order
|
||||
# to avoid breaking multiple line values allowed on repo files.
|
||||
|
@ -3327,11 +3323,7 @@ def download(*packages, **kwargs):
|
|||
to_purge = []
|
||||
for pkg in packages:
|
||||
to_purge.extend(
|
||||
[
|
||||
os.path.join(CACHE_DIR, x)
|
||||
for x in cached_pkgs
|
||||
if x.startswith("{}-".format(pkg))
|
||||
]
|
||||
[os.path.join(CACHE_DIR, x) for x in cached_pkgs if x.startswith(f"{pkg}-")]
|
||||
)
|
||||
for purge_target in set(to_purge):
|
||||
log.debug("Removing cached package %s", purge_target)
|
||||
|
@ -3340,7 +3332,7 @@ def download(*packages, **kwargs):
|
|||
except OSError as exc:
|
||||
log.error("Unable to remove %s: %s", purge_target, exc)
|
||||
|
||||
cmd = ["yumdownloader", "-q", "--destdir={}".format(CACHE_DIR)]
|
||||
cmd = ["yumdownloader", "-q", f"--destdir={CACHE_DIR}"]
|
||||
cmd.extend(packages)
|
||||
__salt__["cmd.run"](cmd, output_loglevel="trace", python_shell=False)
|
||||
ret = {}
|
||||
|
@ -3350,7 +3342,7 @@ def download(*packages, **kwargs):
|
|||
pkg_name = None
|
||||
pkg_file = None
|
||||
for query_pkg in packages:
|
||||
if dld_result.startswith("{}-".format(query_pkg)):
|
||||
if dld_result.startswith(f"{query_pkg}-"):
|
||||
pkg_name = query_pkg
|
||||
pkg_file = dld_result
|
||||
break
|
||||
|
|
|
@ -146,7 +146,7 @@ def _query(method, params, url, auth=None):
|
|||
|
||||
:return: Response from API with desired data in JSON format. In case of error returns more specific description.
|
||||
|
||||
.. versionchanged:: 2017.7
|
||||
.. versionchanged:: 2017.7.0
|
||||
"""
|
||||
|
||||
unauthenticated_methods = [
|
||||
|
@ -189,11 +189,9 @@ def _query(method, params, url, auth=None):
|
|||
)
|
||||
return ret
|
||||
except ValueError as err:
|
||||
raise SaltException(
|
||||
"URL or HTTP headers are probably not correct! ({})".format(err)
|
||||
)
|
||||
raise SaltException(f"URL or HTTP headers are probably not correct! ({err})")
|
||||
except OSError as err:
|
||||
raise SaltException("Check hostname in URL! ({})".format(err))
|
||||
raise SaltException(f"Check hostname in URL! ({err})")
|
||||
|
||||
|
||||
def _login(**kwargs):
|
||||
|
@ -232,9 +230,9 @@ def _login(**kwargs):
|
|||
name = name[len(prefix) :]
|
||||
except IndexError:
|
||||
return
|
||||
val = __salt__["config.get"]("zabbix.{}".format(name), None) or __salt__[
|
||||
val = __salt__["config.get"](f"zabbix.{name}", None) or __salt__[
|
||||
"config.get"
|
||||
]("zabbix:{}".format(name), None)
|
||||
](f"zabbix:{name}", None)
|
||||
if val is not None:
|
||||
connargs[key] = val
|
||||
|
||||
|
@ -258,7 +256,7 @@ def _login(**kwargs):
|
|||
else:
|
||||
raise KeyError
|
||||
except KeyError as err:
|
||||
raise SaltException("URL is probably not correct! ({})".format(err))
|
||||
raise SaltException(f"URL is probably not correct! ({err})")
|
||||
|
||||
|
||||
def _params_extend(params, _ignore_name=False, **kwargs):
|
||||
|
@ -311,7 +309,7 @@ def _map_to_list_of_dicts(source, key):
|
|||
|
||||
def get_zabbix_id_mapper():
|
||||
"""
|
||||
.. versionadded:: 2017.7
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Make ZABBIX_ID_MAPPER constant available to state modules.
|
||||
|
||||
|
@ -328,7 +326,7 @@ def get_zabbix_id_mapper():
|
|||
|
||||
def substitute_params(input_object, extend_params=None, filter_key="name", **kwargs):
|
||||
"""
|
||||
.. versionadded:: 2017.7
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Go through Zabbix object params specification and if needed get given object ID from Zabbix API and put it back
|
||||
as a value. Definition of the object is done via dict with keys "query_object" and "query_name".
|
||||
|
@ -385,7 +383,7 @@ def substitute_params(input_object, extend_params=None, filter_key="name", **kwa
|
|||
# pylint: disable=too-many-return-statements,too-many-nested-blocks
|
||||
def compare_params(defined, existing, return_old_value=False):
|
||||
"""
|
||||
.. versionadded:: 2017.7
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Compares Zabbix object definition against existing Zabbix object.
|
||||
|
||||
|
@ -471,7 +469,7 @@ def compare_params(defined, existing, return_old_value=False):
|
|||
|
||||
def get_object_id_by_params(obj, params=None, **connection_args):
|
||||
"""
|
||||
.. versionadded:: 2017.7
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Get ID of single Zabbix object specified by its name.
|
||||
|
||||
|
@ -2143,7 +2141,7 @@ def usermacro_get(
|
|||
hostmacroids=None,
|
||||
globalmacroids=None,
|
||||
globalmacro=False,
|
||||
**connection_args
|
||||
**connection_args,
|
||||
):
|
||||
"""
|
||||
Retrieve user macros according to the given parameters.
|
||||
|
@ -2703,7 +2701,7 @@ def run_query(method, params, **connection_args):
|
|||
|
||||
def configuration_import(config_file, rules=None, file_format="xml", **connection_args):
|
||||
"""
|
||||
.. versionadded:: 2017.7
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Imports Zabbix configuration specified in file to Zabbix server.
|
||||
|
||||
|
|
|
@ -56,28 +56,28 @@ site_prefixes: ``True``
|
|||
Whether should retrieve the prefixes of the site the device belongs to.
|
||||
|
||||
devices: ``True``
|
||||
.. versionadded:: 3004.0
|
||||
.. versionadded:: 3004
|
||||
|
||||
Whether should retrieve physical devices.
|
||||
|
||||
virtual_machines: ``False``
|
||||
.. versionadded:: 3004.0
|
||||
.. versionadded:: 3004
|
||||
|
||||
Whether should retrieve virtual machines.
|
||||
|
||||
interfaces: ``False``
|
||||
.. versionadded:: 3004.0
|
||||
.. versionadded:: 3004
|
||||
|
||||
Whether should retrieve the interfaces of the device.
|
||||
|
||||
interface_ips: ``False``
|
||||
.. versionadded:: 3004.0
|
||||
.. versionadded:: 3004
|
||||
|
||||
Whether should retrieve the IP addresses for interfaces of the device.
|
||||
(interfaces must be set to True as well)
|
||||
|
||||
api_query_result_limit: ``Use NetBox default``
|
||||
.. versionadded:: 3004.0
|
||||
.. versionadded:: 3004
|
||||
|
||||
An integer specifying how many results should be returned for each query
|
||||
to the NetBox API. Leaving this unset will use NetBox's default value.
|
||||
|
@ -1109,7 +1109,7 @@ def ext_pillar(minion_id, pillar, *args, **kwargs):
|
|||
# Fetch device from API
|
||||
headers = {}
|
||||
if api_token:
|
||||
headers = {"Authorization": "Token {}".format(api_token)}
|
||||
headers = {"Authorization": f"Token {api_token}"}
|
||||
else:
|
||||
log.error("The value for api_token is not set")
|
||||
return ret
|
||||
|
|
|
@ -7,16 +7,16 @@ Flat inventory files should be in the regular ansible inventory format.
|
|||
|
||||
# /tmp/example_roster
|
||||
[servers]
|
||||
salt.gtmanfred.com ansible_ssh_user=gtmanfred ansible_ssh_host=127.0.0.1 ansible_ssh_port=22 ansible_ssh_pass='password'
|
||||
salt.gtmanfred.com ansible_ssh_user=gtmanfred ansible_ssh_host=127.0.0.1 ansible_ssh_port=22 ansible_ssh_pass='password' ansible_sudo_pass='password'
|
||||
|
||||
[desktop]
|
||||
home ansible_ssh_user=gtmanfred ansible_ssh_host=12.34.56.78 ansible_ssh_port=23 ansible_ssh_pass='password'
|
||||
home ansible_ssh_user=gtmanfred ansible_ssh_host=12.34.56.78 ansible_ssh_port=23 ansible_ssh_pass='password' ansible_sudo_pass='password'
|
||||
|
||||
[computers:children]
|
||||
desktop
|
||||
servers
|
||||
|
||||
[names:vars]
|
||||
[computers:vars]
|
||||
http_port=80
|
||||
|
||||
then salt-ssh can be used to hit any of them
|
||||
|
@ -47,35 +47,40 @@ There is also the option of specifying a dynamic inventory, and generating it on
|
|||
#!/bin/bash
|
||||
# filename: /etc/salt/hosts
|
||||
echo '{
|
||||
"servers": [
|
||||
"salt.gtmanfred.com"
|
||||
],
|
||||
"desktop": [
|
||||
"home"
|
||||
],
|
||||
"computers": {
|
||||
"hosts": [],
|
||||
"children": [
|
||||
"desktop",
|
||||
"servers"
|
||||
]
|
||||
},
|
||||
"_meta": {
|
||||
"hostvars": {
|
||||
"salt.gtmanfred.com": {
|
||||
"ansible_ssh_user": "gtmanfred",
|
||||
"ansible_ssh_host": "127.0.0.1",
|
||||
"ansible_sudo_pass": "password",
|
||||
"ansible_ssh_port": 22
|
||||
},
|
||||
"home": {
|
||||
"ansible_ssh_user": "gtmanfred",
|
||||
"ansible_ssh_host": "12.34.56.78",
|
||||
"ansible_sudo_pass": "password",
|
||||
"ansible_ssh_port": 23
|
||||
}
|
||||
"servers": [
|
||||
"salt.gtmanfred.com"
|
||||
],
|
||||
"desktop": [
|
||||
"home"
|
||||
],
|
||||
"computers": {
|
||||
"hosts": [],
|
||||
"children": [
|
||||
"desktop",
|
||||
"servers"
|
||||
],
|
||||
"vars": {
|
||||
"http_port": 80
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"hostvars": {
|
||||
"salt.gtmanfred.com": {
|
||||
"ansible_ssh_user": "gtmanfred",
|
||||
"ansible_ssh_host": "127.0.0.1",
|
||||
"ansible_sudo_pass": "password",
|
||||
"ansible_ssh_pass": "password",
|
||||
"ansible_ssh_port": 22
|
||||
},
|
||||
"home": {
|
||||
"ansible_ssh_user": "gtmanfred",
|
||||
"ansible_ssh_host": "12.34.56.78",
|
||||
"ansible_sudo_pass": "password",
|
||||
"ansible_ssh_pass": "password",
|
||||
"ansible_ssh_port": 23
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}'
|
||||
|
||||
This is the format that an inventory script needs to output to work with ansible, and thus here.
|
||||
|
|
|
@ -86,7 +86,7 @@ def orchestrate(
|
|||
|
||||
Runner uses the pillar variable
|
||||
|
||||
.. versionchanged:: 2017.5
|
||||
.. versionchanged:: 2017.5.0
|
||||
|
||||
Runner uses the pillar_enc variable that allows renderers to render the pillar.
|
||||
This is usable when supplying the contents of a file as pillar, and the file contains
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
ACME / Let's Encrypt certificate management state
|
||||
=================================================
|
||||
|
||||
.. versionadded:: 2016.3
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
See also the module documentation
|
||||
|
||||
|
@ -109,15 +109,13 @@ def cert(
|
|||
else:
|
||||
ret["result"] = True
|
||||
ret["comment"].append(
|
||||
"Certificate {} exists and does not need renewal.".format(certname)
|
||||
f"Certificate {certname} exists and does not need renewal."
|
||||
)
|
||||
|
||||
if action:
|
||||
if __opts__["test"]:
|
||||
ret["result"] = None
|
||||
ret["comment"].append(
|
||||
"Certificate {} would have been {}ed.".format(certname, action)
|
||||
)
|
||||
ret["comment"].append(f"Certificate {certname} would have been {action}ed.")
|
||||
ret["changes"] = {"old": "current certificate", "new": "new certificate"}
|
||||
else:
|
||||
res = __salt__["acme.cert"](
|
||||
|
|
|
@ -36,7 +36,7 @@ the above word between angle brackets (<>).
|
|||
- FollowSymlinks
|
||||
AllowOverride: All
|
||||
|
||||
.. versionchanged:: 2018.3
|
||||
.. versionchanged:: 2018.3.0
|
||||
|
||||
Allows having the same section container multiple times (e.g. <Directory /path/to/dir>).
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -50,7 +50,7 @@ def _convert_to_mb(size):
|
|||
if str_size[-1:].isdigit():
|
||||
size = int(str_size)
|
||||
else:
|
||||
raise salt.exceptions.ArgumentValueError("Size {} is invalid.".format(size))
|
||||
raise salt.exceptions.ArgumentValueError(f"Size {size} is invalid.")
|
||||
|
||||
if unit == "s":
|
||||
target_size = size / 2048
|
||||
|
@ -63,7 +63,7 @@ def _convert_to_mb(size):
|
|||
elif unit == "p":
|
||||
target_size = size * 1024 * 1024 * 1024
|
||||
else:
|
||||
raise salt.exceptions.ArgumentValueError("Unit {} is invalid.".format(unit))
|
||||
raise salt.exceptions.ArgumentValueError(f"Unit {unit} is invalid.")
|
||||
return target_size
|
||||
|
||||
|
||||
|
@ -81,19 +81,19 @@ def pv_present(name, **kwargs):
|
|||
ret = {"changes": {}, "comment": "", "name": name, "result": True}
|
||||
|
||||
if __salt__["lvm.pvdisplay"](name, quiet=True):
|
||||
ret["comment"] = "Physical Volume {} already present".format(name)
|
||||
ret["comment"] = f"Physical Volume {name} already present"
|
||||
elif __opts__["test"]:
|
||||
ret["comment"] = "Physical Volume {} is set to be created".format(name)
|
||||
ret["comment"] = f"Physical Volume {name} is set to be created"
|
||||
ret["result"] = None
|
||||
return ret
|
||||
else:
|
||||
changes = __salt__["lvm.pvcreate"](name, **kwargs)
|
||||
|
||||
if __salt__["lvm.pvdisplay"](name):
|
||||
ret["comment"] = "Created Physical Volume {}".format(name)
|
||||
ret["comment"] = f"Created Physical Volume {name}"
|
||||
ret["changes"]["created"] = changes
|
||||
else:
|
||||
ret["comment"] = "Failed to create Physical Volume {}".format(name)
|
||||
ret["comment"] = f"Failed to create Physical Volume {name}"
|
||||
ret["result"] = False
|
||||
return ret
|
||||
|
||||
|
@ -108,19 +108,19 @@ def pv_absent(name):
|
|||
ret = {"changes": {}, "comment": "", "name": name, "result": True}
|
||||
|
||||
if not __salt__["lvm.pvdisplay"](name, quiet=True):
|
||||
ret["comment"] = "Physical Volume {} does not exist".format(name)
|
||||
ret["comment"] = f"Physical Volume {name} does not exist"
|
||||
elif __opts__["test"]:
|
||||
ret["comment"] = "Physical Volume {} is set to be removed".format(name)
|
||||
ret["comment"] = f"Physical Volume {name} is set to be removed"
|
||||
ret["result"] = None
|
||||
return ret
|
||||
else:
|
||||
changes = __salt__["lvm.pvremove"](name)
|
||||
|
||||
if __salt__["lvm.pvdisplay"](name, quiet=True):
|
||||
ret["comment"] = "Failed to remove Physical Volume {}".format(name)
|
||||
ret["comment"] = f"Failed to remove Physical Volume {name}"
|
||||
ret["result"] = False
|
||||
else:
|
||||
ret["comment"] = "Removed Physical Volume {}".format(name)
|
||||
ret["comment"] = f"Removed Physical Volume {name}"
|
||||
ret["changes"]["removed"] = changes
|
||||
return ret
|
||||
|
||||
|
@ -144,23 +144,23 @@ def vg_present(name, devices=None, **kwargs):
|
|||
devices = devices.split(",")
|
||||
|
||||
if __salt__["lvm.vgdisplay"](name, quiet=True):
|
||||
ret["comment"] = "Volume Group {} already present".format(name)
|
||||
ret["comment"] = f"Volume Group {name} already present"
|
||||
for device in devices:
|
||||
realdev = os.path.realpath(device)
|
||||
pvs = __salt__["lvm.pvdisplay"](realdev, real=True)
|
||||
if pvs and pvs.get(realdev, None):
|
||||
if pvs[realdev]["Volume Group Name"] == name:
|
||||
ret["comment"] = "{}\n{}".format(
|
||||
ret["comment"], "{} is part of Volume Group".format(device)
|
||||
ret["comment"], f"{device} is part of Volume Group"
|
||||
)
|
||||
elif pvs[realdev]["Volume Group Name"] in ["", "#orphans_lvm2"]:
|
||||
__salt__["lvm.vgextend"](name, device)
|
||||
pvs = __salt__["lvm.pvdisplay"](realdev, real=True)
|
||||
if pvs[realdev]["Volume Group Name"] == name:
|
||||
ret["changes"].update({device: "added to {}".format(name)})
|
||||
ret["changes"].update({device: f"added to {name}"})
|
||||
else:
|
||||
ret["comment"] = "{}\n{}".format(
|
||||
ret["comment"], "{} could not be added".format(device)
|
||||
ret["comment"], f"{device} could not be added"
|
||||
)
|
||||
ret["result"] = False
|
||||
else:
|
||||
|
@ -173,21 +173,21 @@ def vg_present(name, devices=None, **kwargs):
|
|||
ret["result"] = False
|
||||
else:
|
||||
ret["comment"] = "{}\n{}".format(
|
||||
ret["comment"], "pv {} is not present".format(device)
|
||||
ret["comment"], f"pv {device} is not present"
|
||||
)
|
||||
ret["result"] = False
|
||||
elif __opts__["test"]:
|
||||
ret["comment"] = "Volume Group {} is set to be created".format(name)
|
||||
ret["comment"] = f"Volume Group {name} is set to be created"
|
||||
ret["result"] = None
|
||||
return ret
|
||||
else:
|
||||
changes = __salt__["lvm.vgcreate"](name, devices, **kwargs)
|
||||
|
||||
if __salt__["lvm.vgdisplay"](name):
|
||||
ret["comment"] = "Created Volume Group {}".format(name)
|
||||
ret["comment"] = f"Created Volume Group {name}"
|
||||
ret["changes"]["created"] = changes
|
||||
else:
|
||||
ret["comment"] = "Failed to create Volume Group {}".format(name)
|
||||
ret["comment"] = f"Failed to create Volume Group {name}"
|
||||
ret["result"] = False
|
||||
return ret
|
||||
|
||||
|
@ -202,19 +202,19 @@ def vg_absent(name):
|
|||
ret = {"changes": {}, "comment": "", "name": name, "result": True}
|
||||
|
||||
if not __salt__["lvm.vgdisplay"](name, quiet=True):
|
||||
ret["comment"] = "Volume Group {} already absent".format(name)
|
||||
ret["comment"] = f"Volume Group {name} already absent"
|
||||
elif __opts__["test"]:
|
||||
ret["comment"] = "Volume Group {} is set to be removed".format(name)
|
||||
ret["comment"] = f"Volume Group {name} is set to be removed"
|
||||
ret["result"] = None
|
||||
return ret
|
||||
else:
|
||||
changes = __salt__["lvm.vgremove"](name)
|
||||
|
||||
if not __salt__["lvm.vgdisplay"](name, quiet=True):
|
||||
ret["comment"] = "Removed Volume Group {}".format(name)
|
||||
ret["comment"] = f"Removed Volume Group {name}"
|
||||
ret["changes"]["removed"] = changes
|
||||
else:
|
||||
ret["comment"] = "Failed to remove Volume Group {}".format(name)
|
||||
ret["comment"] = f"Failed to remove Volume Group {name}"
|
||||
ret["result"] = False
|
||||
return ret
|
||||
|
||||
|
@ -230,7 +230,7 @@ def lv_present(
|
|||
thinpool=False,
|
||||
force=False,
|
||||
resizefs=False,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Ensure that a Logical Volume is present, creating it if absent.
|
||||
|
@ -274,7 +274,7 @@ def lv_present(
|
|||
force
|
||||
Assume yes to all prompts
|
||||
|
||||
.. versionadded:: 3002.0
|
||||
.. versionadded:: 3002
|
||||
|
||||
resizefs
|
||||
Use fsadm to resize the logical volume filesystem if needed
|
||||
|
@ -299,14 +299,14 @@ def lv_present(
|
|||
if thinvolume:
|
||||
lvpath = "/dev/{}/{}".format(vgname.split("/")[0], name)
|
||||
else:
|
||||
lvpath = "/dev/{}/{}".format(vgname, name)
|
||||
lvpath = f"/dev/{vgname}/{name}"
|
||||
|
||||
lv_info = __salt__["lvm.lvdisplay"](lvpath, quiet=True)
|
||||
lv_info = lv_info.get(lvpath)
|
||||
|
||||
if not lv_info:
|
||||
if __opts__["test"]:
|
||||
ret["comment"] = "Logical Volume {} is set to be created".format(name)
|
||||
ret["comment"] = f"Logical Volume {name} is set to be created"
|
||||
ret["result"] = None
|
||||
return ret
|
||||
else:
|
||||
|
@ -320,11 +320,11 @@ def lv_present(
|
|||
thinvolume=thinvolume,
|
||||
thinpool=thinpool,
|
||||
force=force,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if __salt__["lvm.lvdisplay"](lvpath):
|
||||
ret["comment"] = "Created Logical Volume {}".format(name)
|
||||
ret["comment"] = f"Created Logical Volume {name}"
|
||||
ret["changes"]["created"] = changes
|
||||
else:
|
||||
ret["comment"] = "Failed to create Logical Volume {}. Error: {}".format(
|
||||
|
@ -332,7 +332,7 @@ def lv_present(
|
|||
)
|
||||
ret["result"] = False
|
||||
else:
|
||||
ret["comment"] = "Logical Volume {} already present".format(name)
|
||||
ret["comment"] = f"Logical Volume {name} already present"
|
||||
|
||||
if size or extents:
|
||||
old_extents = int(lv_info["Current Logical Extents Associated"])
|
||||
|
@ -386,7 +386,7 @@ def lv_present(
|
|||
lv_info = __salt__["lvm.lvdisplay"](lvpath, quiet=True)[lvpath]
|
||||
new_size_mb = _convert_to_mb(lv_info["Logical Volume Size"] + "s")
|
||||
if new_size_mb != old_size_mb:
|
||||
ret["comment"] = "Resized Logical Volume {}".format(name)
|
||||
ret["comment"] = f"Resized Logical Volume {name}"
|
||||
ret["changes"]["resized"] = changes
|
||||
else:
|
||||
ret[
|
||||
|
@ -410,20 +410,20 @@ def lv_absent(name, vgname=None):
|
|||
"""
|
||||
ret = {"changes": {}, "comment": "", "name": name, "result": True}
|
||||
|
||||
lvpath = "/dev/{}/{}".format(vgname, name)
|
||||
lvpath = f"/dev/{vgname}/{name}"
|
||||
if not __salt__["lvm.lvdisplay"](lvpath, quiet=True):
|
||||
ret["comment"] = "Logical Volume {} already absent".format(name)
|
||||
ret["comment"] = f"Logical Volume {name} already absent"
|
||||
elif __opts__["test"]:
|
||||
ret["comment"] = "Logical Volume {} is set to be removed".format(name)
|
||||
ret["comment"] = f"Logical Volume {name} is set to be removed"
|
||||
ret["result"] = None
|
||||
return ret
|
||||
else:
|
||||
changes = __salt__["lvm.lvremove"](name, vgname)
|
||||
|
||||
if not __salt__["lvm.lvdisplay"](lvpath, quiet=True):
|
||||
ret["comment"] = "Removed Logical Volume {}".format(name)
|
||||
ret["comment"] = f"Removed Logical Volume {name}"
|
||||
ret["changes"]["removed"] = changes
|
||||
else:
|
||||
ret["comment"] = "Failed to remove Logical Volume {}".format(name)
|
||||
ret["comment"] = f"Failed to remove Logical Volume {name}"
|
||||
ret["result"] = False
|
||||
return ret
|
||||
|
|
|
@ -22,26 +22,26 @@ def present(name, parent=None, vlan=None):
|
|||
parent : string
|
||||
name of the parent bridge (if the bridge shall be created as a fake
|
||||
bridge). If specified, vlan must also be specified.
|
||||
.. versionadded:: 3006
|
||||
.. versionadded:: 3006.0
|
||||
vlan: int
|
||||
VLAN ID of the bridge (if the bridge shall be created as a fake
|
||||
bridge). If specified, parent must also be specified.
|
||||
.. versionadded:: 3006
|
||||
.. versionadded:: 3006.0
|
||||
|
||||
"""
|
||||
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
|
||||
|
||||
# Comment and change messages
|
||||
comment_bridge_created = "Bridge {} created.".format(name)
|
||||
comment_bridge_notcreated = "Unable to create bridge: {}.".format(name)
|
||||
comment_bridge_exists = "Bridge {} already exists.".format(name)
|
||||
comment_bridge_created = f"Bridge {name} created."
|
||||
comment_bridge_notcreated = f"Unable to create bridge: {name}."
|
||||
comment_bridge_exists = f"Bridge {name} already exists."
|
||||
comment_bridge_mismatch = (
|
||||
"Bridge {} already exists, but has a different" " parent or VLAN ID."
|
||||
).format(name)
|
||||
changes_bridge_created = {
|
||||
name: {
|
||||
"old": "Bridge {} does not exist.".format(name),
|
||||
"new": "Bridge {} created".format(name),
|
||||
"old": f"Bridge {name} does not exist.",
|
||||
"new": f"Bridge {name} created",
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -103,13 +103,13 @@ def absent(name):
|
|||
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
|
||||
|
||||
# Comment and change messages
|
||||
comment_bridge_deleted = "Bridge {} deleted.".format(name)
|
||||
comment_bridge_notdeleted = "Unable to delete bridge: {}.".format(name)
|
||||
comment_bridge_notexists = "Bridge {} does not exist.".format(name)
|
||||
comment_bridge_deleted = f"Bridge {name} deleted."
|
||||
comment_bridge_notdeleted = f"Unable to delete bridge: {name}."
|
||||
comment_bridge_notexists = f"Bridge {name} does not exist."
|
||||
changes_bridge_deleted = {
|
||||
name: {
|
||||
"old": "Bridge {} exists.".format(name),
|
||||
"new": "Bridge {} deleted.".format(name),
|
||||
"old": f"Bridge {name} exists.",
|
||||
"new": f"Bridge {name} deleted.",
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
"""
|
||||
Management of Open vSwitch database records.
|
||||
|
||||
.. versionadded:: 3006
|
||||
.. versionadded:: 3006.0
|
||||
"""
|
||||
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ Management of Pacemaker/Corosync clusters with PCS
|
|||
A state module to manage Pacemaker/Corosync clusters
|
||||
with the Pacemaker/Corosync configuration system (PCS)
|
||||
|
||||
.. versionadded:: 2016.110
|
||||
.. versionadded:: 2016.11.0
|
||||
|
||||
:depends: pcs
|
||||
|
||||
|
@ -229,7 +229,7 @@ def _get_cibfile_tmp(cibname):
|
|||
"""
|
||||
Get the full path of a temporary CIB-file with the name of the CIB
|
||||
"""
|
||||
cibfile_tmp = "{}.tmp".format(_get_cibfile(cibname))
|
||||
cibfile_tmp = f"{_get_cibfile(cibname)}.tmp"
|
||||
log.trace("cibfile_tmp: %s", cibfile_tmp)
|
||||
return cibfile_tmp
|
||||
|
||||
|
@ -238,7 +238,7 @@ def _get_cibfile_cksum(cibname):
|
|||
"""
|
||||
Get the full path of the file containing a checksum of a CIB-file with the name of the CIB
|
||||
"""
|
||||
cibfile_cksum = "{}.cksum".format(_get_cibfile(cibname))
|
||||
cibfile_cksum = f"{_get_cibfile(cibname)}.cksum"
|
||||
log.trace("cibfile_cksum: %s", cibfile_cksum)
|
||||
return cibfile_cksum
|
||||
|
||||
|
@ -336,7 +336,7 @@ def _item_present(
|
|||
# constraints match on '(id:<id>)'
|
||||
elif item in ["constraint"]:
|
||||
for line in is_existing["stdout"].splitlines():
|
||||
if "(id:{})".format(item_id) in line:
|
||||
if f"(id:{item_id})" in line:
|
||||
item_create_required = False
|
||||
|
||||
# item_id was provided,
|
||||
|
@ -370,7 +370,7 @@ def _item_present(
|
|||
log.trace("Output of pcs.item_create: %s", item_create)
|
||||
|
||||
if item_create["retcode"] in [0]:
|
||||
ret["comment"] += "Created {} {} ({})\n".format(item, item_id, item_type)
|
||||
ret["comment"] += f"Created {item} {item_id} ({item_type})\n"
|
||||
ret["changes"].update({item_id: {"old": "", "new": str(item_id)}})
|
||||
else:
|
||||
ret["result"] = False
|
||||
|
@ -435,11 +435,11 @@ def auth(name, nodes, pcsuser="hacluster", pcspasswd="hacluster", extra_args=Non
|
|||
authorized_dict[node] == "Already authorized"
|
||||
or authorized_dict[node] == "Authorized"
|
||||
):
|
||||
ret["comment"] += "Node {} is already authorized\n".format(node)
|
||||
ret["comment"] += f"Node {node} is already authorized\n"
|
||||
else:
|
||||
auth_required = True
|
||||
if __opts__["test"]:
|
||||
ret["comment"] += "Node is set to authorize: {}\n".format(node)
|
||||
ret["comment"] += f"Node is set to authorize: {node}\n"
|
||||
|
||||
if not auth_required:
|
||||
return ret
|
||||
|
@ -463,7 +463,7 @@ def auth(name, nodes, pcsuser="hacluster", pcspasswd="hacluster", extra_args=Non
|
|||
|
||||
for node in nodes:
|
||||
if node in authorize_dict and authorize_dict[node] == "Authorized":
|
||||
ret["comment"] += "Authorized {}\n".format(node)
|
||||
ret["comment"] += f"Authorized {node}\n"
|
||||
ret["changes"].update({node: {"old": "", "new": "Authorized"}})
|
||||
else:
|
||||
ret["result"] = False
|
||||
|
@ -604,13 +604,13 @@ def cluster_setup(
|
|||
"Success",
|
||||
"Cluster enabled",
|
||||
]:
|
||||
ret["comment"] += "Set up {}\n".format(node)
|
||||
ret["comment"] += f"Set up {node}\n"
|
||||
ret["changes"].update({node: {"old": "", "new": "Setup"}})
|
||||
else:
|
||||
ret["result"] = False
|
||||
ret["comment"] += "Failed to setup {}\n".format(node)
|
||||
ret["comment"] += f"Failed to setup {node}\n"
|
||||
if node in setup_dict:
|
||||
ret["comment"] += "{}: setup_dict: {}\n".format(node, setup_dict[node])
|
||||
ret["comment"] += f"{node}: setup_dict: {setup_dict[node]}\n"
|
||||
ret["comment"] += str(setup)
|
||||
|
||||
log.trace("ret: %s", ret)
|
||||
|
@ -664,7 +664,7 @@ def cluster_node_present(name, node, extra_args=None):
|
|||
node_add_required = False
|
||||
ret[
|
||||
"comment"
|
||||
] += "Node {} is already member of the cluster\n".format(node)
|
||||
] += f"Node {node} is already member of the cluster\n"
|
||||
else:
|
||||
current_nodes += value.split()
|
||||
|
||||
|
@ -673,7 +673,7 @@ def cluster_node_present(name, node, extra_args=None):
|
|||
|
||||
if __opts__["test"]:
|
||||
ret["result"] = None
|
||||
ret["comment"] += "Node {} is set to be added to the cluster\n".format(node)
|
||||
ret["comment"] += f"Node {node} is set to be added to the cluster\n"
|
||||
return ret
|
||||
|
||||
if not isinstance(extra_args, (list, tuple)):
|
||||
|
@ -710,11 +710,11 @@ def cluster_node_present(name, node, extra_args=None):
|
|||
)
|
||||
|
||||
if node in node_add_dict and node_add_dict[node] in ["Succeeded", "Success"]:
|
||||
ret["comment"] += "Added node {}\n".format(node)
|
||||
ret["comment"] += f"Added node {node}\n"
|
||||
ret["changes"].update({node: {"old": "", "new": "Added"}})
|
||||
else:
|
||||
ret["result"] = False
|
||||
ret["comment"] += "Failed to add node{}\n".format(node)
|
||||
ret["comment"] += f"Failed to add node{node}\n"
|
||||
if node in node_add_dict:
|
||||
ret["comment"] += "{}: node_add_dict: {}\n".format(
|
||||
node, node_add_dict[node]
|
||||
|
@ -806,10 +806,10 @@ def cib_present(name, cibname, scope=None, extra_args=None):
|
|||
|
||||
if not cib_create_required:
|
||||
__salt__["file.remove"](cibfile_tmp)
|
||||
ret["comment"] += "CIB {} is already equal to the live CIB\n".format(cibname)
|
||||
ret["comment"] += f"CIB {cibname} is already equal to the live CIB\n"
|
||||
|
||||
if not cib_cksum_required:
|
||||
ret["comment"] += "CIB {} checksum is correct\n".format(cibname)
|
||||
ret["comment"] += f"CIB {cibname} checksum is correct\n"
|
||||
|
||||
if not cib_required:
|
||||
return ret
|
||||
|
@ -818,7 +818,7 @@ def cib_present(name, cibname, scope=None, extra_args=None):
|
|||
__salt__["file.remove"](cibfile_tmp)
|
||||
ret["result"] = None
|
||||
if cib_create_required:
|
||||
ret["comment"] += "CIB {} is set to be created/updated\n".format(cibname)
|
||||
ret["comment"] += f"CIB {cibname} is set to be created/updated\n"
|
||||
if cib_cksum_required:
|
||||
ret["comment"] += "CIB {} checksum is set to be created/updated\n".format(
|
||||
cibname
|
||||
|
@ -829,11 +829,11 @@ def cib_present(name, cibname, scope=None, extra_args=None):
|
|||
__salt__["file.move"](cibfile_tmp, cibfile)
|
||||
|
||||
if __salt__["file.check_hash"](path=cibfile, file_hash=cib_hash_live):
|
||||
ret["comment"] += "Created/updated CIB {}\n".format(cibname)
|
||||
ret["comment"] += f"Created/updated CIB {cibname}\n"
|
||||
ret["changes"].update({"cibfile": cibfile})
|
||||
else:
|
||||
ret["result"] = False
|
||||
ret["comment"] += "Failed to create/update CIB {}\n".format(cibname)
|
||||
ret["comment"] += f"Failed to create/update CIB {cibname}\n"
|
||||
|
||||
if cib_cksum_required:
|
||||
_file_write(cibfile_cksum, cib_hash_live)
|
||||
|
@ -894,7 +894,7 @@ def cib_pushed(name, cibname, scope=None, extra_args=None):
|
|||
|
||||
if not os.path.exists(cibfile):
|
||||
ret["result"] = False
|
||||
ret["comment"] += "CIB-file {} does not exist\n".format(cibfile)
|
||||
ret["comment"] += f"CIB-file {cibfile} does not exist\n"
|
||||
return ret
|
||||
|
||||
cib_hash_cibfile = "{}:{}".format(
|
||||
|
@ -926,11 +926,11 @@ def cib_pushed(name, cibname, scope=None, extra_args=None):
|
|||
log.trace("Output of pcs.cib_push: %s", cib_push)
|
||||
|
||||
if cib_push["retcode"] in [0]:
|
||||
ret["comment"] += "Pushed CIB {}\n".format(cibname)
|
||||
ret["comment"] += f"Pushed CIB {cibname}\n"
|
||||
ret["changes"].update({"cibfile_pushed": cibfile})
|
||||
else:
|
||||
ret["result"] = False
|
||||
ret["comment"] += "Failed to push CIB {}\n".format(cibname)
|
||||
ret["comment"] += f"Failed to push CIB {cibname}\n"
|
||||
|
||||
log.trace("ret: %s", ret)
|
||||
|
||||
|
@ -968,7 +968,7 @@ def prop_has_value(name, prop, value, extra_args=None, cibname=None):
|
|||
return _item_present(
|
||||
name=name,
|
||||
item="property",
|
||||
item_id="{}={}".format(prop, value),
|
||||
item_id=f"{prop}={value}",
|
||||
item_type=None,
|
||||
create="set",
|
||||
extra_args=extra_args,
|
||||
|
@ -1008,7 +1008,7 @@ def resource_defaults_to(name, default, value, extra_args=None, cibname=None):
|
|||
return _item_present(
|
||||
name=name,
|
||||
item="resource",
|
||||
item_id="{}={}".format(default, value),
|
||||
item_id=f"{default}={value}",
|
||||
item_type=None,
|
||||
show="defaults",
|
||||
create="defaults",
|
||||
|
@ -1049,7 +1049,7 @@ def resource_op_defaults_to(name, op_default, value, extra_args=None, cibname=No
|
|||
return _item_present(
|
||||
name=name,
|
||||
item="resource",
|
||||
item_id="{}={}".format(op_default, value),
|
||||
item_id=f"{op_default}={value}",
|
||||
item_type=None,
|
||||
show=["op", "defaults"],
|
||||
create=["op", "defaults"],
|
||||
|
|
|
@ -174,7 +174,7 @@ def _check_pkg_version_format(pkg):
|
|||
|
||||
if not HAS_PIP:
|
||||
ret["comment"] = (
|
||||
"An importable Python 2 pip module is required but could not be "
|
||||
"An importable Python pip module is required but could not be "
|
||||
"found on your system. This usually means that the system's pip "
|
||||
"package is not installed properly."
|
||||
)
|
||||
|
@ -198,7 +198,7 @@ def _check_pkg_version_format(pkg):
|
|||
for vcs in supported_vcs:
|
||||
if pkg.startswith(vcs):
|
||||
from_vcs = True
|
||||
install_req = _from_line(pkg.split("{}+".format(vcs))[-1])
|
||||
install_req = _from_line(pkg.split(f"{vcs}+")[-1])
|
||||
break
|
||||
else:
|
||||
install_req = _from_line(pkg)
|
||||
|
@ -767,7 +767,7 @@ def installed(
|
|||
cur_version = __salt__["pip.version"](bin_env)
|
||||
except (CommandNotFoundError, CommandExecutionError) as err:
|
||||
ret["result"] = False
|
||||
ret["comment"] = "Error installing '{}': {}".format(name, err)
|
||||
ret["comment"] = f"Error installing '{name}': {err}"
|
||||
return ret
|
||||
# Check that the pip binary supports the 'use_wheel' option
|
||||
if use_wheel:
|
||||
|
@ -853,7 +853,7 @@ def installed(
|
|||
# TODO: Check requirements file against currently-installed
|
||||
# packages to provide more accurate state output.
|
||||
comments.append(
|
||||
"Requirements file '{}' will be processed.".format(requirements)
|
||||
f"Requirements file '{requirements}' will be processed."
|
||||
)
|
||||
if editable:
|
||||
comments.append(
|
||||
|
@ -956,7 +956,7 @@ def installed(
|
|||
|
||||
# Call to install the package. Actual installation takes place here
|
||||
pip_install_call = __salt__["pip.install"](
|
||||
pkgs="{}".format(pkgs_str) if pkgs_str else "",
|
||||
pkgs=f"{pkgs_str}" if pkgs_str else "",
|
||||
requirements=requirements,
|
||||
bin_env=bin_env,
|
||||
use_wheel=use_wheel,
|
||||
|
@ -1081,10 +1081,10 @@ def installed(
|
|||
and prefix.lower() not in already_installed_packages
|
||||
):
|
||||
ver = pipsearch[prefix]
|
||||
ret["changes"]["{}=={}".format(prefix, ver)] = "Installed"
|
||||
ret["changes"][f"{prefix}=={ver}"] = "Installed"
|
||||
# Case for packages that are an URL
|
||||
else:
|
||||
ret["changes"]["{}==???".format(state_name)] = "Installed"
|
||||
ret["changes"][f"{state_name}==???"] = "Installed"
|
||||
|
||||
# Set comments
|
||||
aicomms = "\n".join(already_installed_comments)
|
||||
|
@ -1109,19 +1109,15 @@ def installed(
|
|||
if requirements or editable:
|
||||
comments = []
|
||||
if requirements:
|
||||
comments.append(
|
||||
'Unable to process requirements file "{}"'.format(requirements)
|
||||
)
|
||||
comments.append(f'Unable to process requirements file "{requirements}"')
|
||||
if editable:
|
||||
comments.append(
|
||||
"Unable to install from VCS checkout {}.".format(editable)
|
||||
)
|
||||
comments.append(f"Unable to install from VCS checkout {editable}.")
|
||||
comments.append(error)
|
||||
ret["comment"] = " ".join(comments)
|
||||
else:
|
||||
pkgs_str = ", ".join([state_name for _, state_name in target_pkgs])
|
||||
aicomms = "\n".join(already_installed_comments)
|
||||
error_comm = "Failed to install packages: {}. {}".format(pkgs_str, error)
|
||||
error_comm = f"Failed to install packages: {pkgs_str}. {error}"
|
||||
ret["comment"] = aicomms + ("\n" if aicomms else "") + error_comm
|
||||
else:
|
||||
ret["result"] = False
|
||||
|
@ -1159,7 +1155,7 @@ def removed(
|
|||
pip_list = __salt__["pip.list"](bin_env=bin_env, user=user, cwd=cwd)
|
||||
except (CommandExecutionError, CommandNotFoundError) as err:
|
||||
ret["result"] = False
|
||||
ret["comment"] = "Error uninstalling '{}': {}".format(name, err)
|
||||
ret["comment"] = f"Error uninstalling '{name}': {err}"
|
||||
return ret
|
||||
|
||||
if name not in pip_list:
|
||||
|
@ -1169,7 +1165,7 @@ def removed(
|
|||
|
||||
if __opts__["test"]:
|
||||
ret["result"] = None
|
||||
ret["comment"] = "Package {} is set to be removed".format(name)
|
||||
ret["comment"] = f"Package {name} is set to be removed"
|
||||
return ret
|
||||
|
||||
if __salt__["pip.uninstall"](
|
||||
|
|
|
@ -40,11 +40,29 @@ def _group_changes(cur, wanted, remove=False):
|
|||
"""
|
||||
Determine if the groups need to be changed
|
||||
"""
|
||||
old = set(cur)
|
||||
new = set(wanted)
|
||||
if (remove and old != new) or (not remove and not new.issubset(old)):
|
||||
return True
|
||||
return False
|
||||
cur = set(cur)
|
||||
wanted = set(wanted)
|
||||
|
||||
if cur == wanted or (not remove and wanted.issubset(cur)):
|
||||
return False
|
||||
|
||||
all_grps = {name: __salt__["group.info"](name) for name in cur.union(wanted)}
|
||||
|
||||
if remove:
|
||||
diff = wanted.symmetric_difference(cur)
|
||||
else:
|
||||
diff = wanted.difference(cur)
|
||||
|
||||
remain = list(diff)
|
||||
for diff_grp in diff:
|
||||
for grp, info in all_grps.items():
|
||||
if grp == diff_grp:
|
||||
continue
|
||||
if all_grps[diff_grp]["gid"] == info["gid"]:
|
||||
# dupe detected
|
||||
remain.remove(diff_grp)
|
||||
|
||||
return bool(remain)
|
||||
|
||||
|
||||
def _get_root_args(local):
|
||||
|
@ -110,6 +128,15 @@ def _changes(
|
|||
|
||||
change = {}
|
||||
wanted_groups = sorted(set((groups or []) + (optional_groups or [])))
|
||||
lusr_groups_gids = [
|
||||
__salt__["file.group_to_gid"](gname) for gname in lusr["groups"]
|
||||
]
|
||||
dupe_groups = {}
|
||||
for idx, _gid in enumerate(lusr_groups_gids):
|
||||
if lusr_groups_gids.count(_gid) > 1:
|
||||
if _gid not in dupe_groups:
|
||||
dupe_groups[_gid] = []
|
||||
dupe_groups[_gid].append(lusr["groups"][idx])
|
||||
if not remove_groups:
|
||||
wanted_groups = sorted(set(wanted_groups + lusr["groups"]))
|
||||
if uid and lusr["uid"] != uid:
|
||||
|
@ -119,24 +146,44 @@ def _changes(
|
|||
default_grp = __salt__["file.gid_to_group"](gid if gid is not None else lusr["gid"])
|
||||
old_default_grp = __salt__["file.gid_to_group"](lusr["gid"])
|
||||
# Remove the default group from the list for comparison purposes.
|
||||
if default_grp in lusr["groups"]:
|
||||
lusr["groups"].remove(default_grp)
|
||||
# Remove default group from wanted_groups, as this requirement is
|
||||
# already met
|
||||
if default_grp in lusr["groups"] or default_grp in wanted_groups:
|
||||
if default_grp in salt.utils.data.flatten(dupe_groups.values()):
|
||||
dupe_gid = __salt__["file.group_to_gid"](default_grp)
|
||||
for gname in dupe_groups[dupe_gid]:
|
||||
if gname in lusr["groups"]:
|
||||
lusr["groups"].remove(gname)
|
||||
if gname in wanted_groups:
|
||||
wanted_groups.remove(gname)
|
||||
else:
|
||||
if default_grp in lusr["groups"]:
|
||||
lusr["groups"].remove(default_grp)
|
||||
if default_grp in wanted_groups:
|
||||
wanted_groups.remove(default_grp)
|
||||
# If the group is being changed, make sure that the old primary group is
|
||||
# also removed from the list. Otherwise, if a user's gid is being changed
|
||||
# and their old primary group is reassigned as an additional group, Salt
|
||||
# will not properly detect the need for the change.
|
||||
if old_default_grp != default_grp and old_default_grp in lusr["groups"]:
|
||||
lusr["groups"].remove(old_default_grp)
|
||||
if old_default_grp in salt.utils.data.flatten(dupe_groups.values()):
|
||||
dupe_gid = __salt__["file.group_to_gid"](old_default_grp)
|
||||
for gname in dupe_groups[dupe_gid]:
|
||||
lusr["groups"].remove(gname)
|
||||
else:
|
||||
lusr["groups"].remove(old_default_grp)
|
||||
# If there's a group by the same name as the user, remove it from the list
|
||||
# for comparison purposes.
|
||||
if name in lusr["groups"] and name not in wanted_groups:
|
||||
lusr["groups"].remove(name)
|
||||
# Remove default group from wanted_groups, as this requirement is
|
||||
# already met
|
||||
if default_grp in wanted_groups:
|
||||
wanted_groups.remove(default_grp)
|
||||
if name in salt.utils.data.flatten(dupe_groups.values()):
|
||||
dupe_gid = __salt__["file.group_to_gid"](name)
|
||||
for gname in dupe_groups[dupe_gid]:
|
||||
lusr["groups"].remove(gname)
|
||||
else:
|
||||
lusr["groups"].remove(name)
|
||||
if _group_changes(lusr["groups"], wanted_groups, remove_groups):
|
||||
change["groups"] = wanted_groups
|
||||
if wanted_groups or remove_groups:
|
||||
change["groups"] = wanted_groups
|
||||
if home and lusr["home"] != home:
|
||||
change["home"] = home
|
||||
if createhome:
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
"""
|
||||
Management of Zabbix Action object over Zabbix API.
|
||||
|
||||
.. versionadded:: 2017.7
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
:codeauthor: Jakub Sliva <jakub.sliva@ultimum.io>
|
||||
"""
|
||||
|
@ -127,7 +127,7 @@ def present(name, params, **kwargs):
|
|||
|
||||
if dry_run:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Action "{}" would be fixed.'.format(name)
|
||||
ret["comment"] = f'Zabbix Action "{name}" would be fixed.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": (
|
||||
|
@ -151,14 +151,14 @@ def present(name, params, **kwargs):
|
|||
)
|
||||
if action_update:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Action "{}" updated.'.format(name)
|
||||
ret["comment"] = f'Zabbix Action "{name}" updated.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": (
|
||||
'Zabbix Action "{}" differed '
|
||||
"in following parameters: {}".format(name, diff_params)
|
||||
),
|
||||
"new": 'Zabbix Action "{}" fixed.'.format(name),
|
||||
"new": f'Zabbix Action "{name}" fixed.',
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -173,10 +173,10 @@ def present(name, params, **kwargs):
|
|||
else:
|
||||
if dry_run:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Action "{}" would be created.'.format(name)
|
||||
ret["comment"] = f'Zabbix Action "{name}" would be created.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": 'Zabbix Action "{}" does not exist.'.format(name),
|
||||
"old": f'Zabbix Action "{name}" does not exist.',
|
||||
"new": (
|
||||
'Zabbix Action "{}" would be created according definition.'.format(
|
||||
name
|
||||
|
@ -193,10 +193,10 @@ def present(name, params, **kwargs):
|
|||
|
||||
if action_create:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Action "{}" created.'.format(name)
|
||||
ret["comment"] = f'Zabbix Action "{name}" created.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": 'Zabbix Action "{}" did not exist.'.format(name),
|
||||
"old": f'Zabbix Action "{name}" did not exist.',
|
||||
"new": (
|
||||
'Zabbix Action "{}" created according definition.'.format(
|
||||
name
|
||||
|
@ -235,15 +235,15 @@ def absent(name, **kwargs):
|
|||
|
||||
if not object_id:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Action "{}" does not exist.'.format(name)
|
||||
ret["comment"] = f'Zabbix Action "{name}" does not exist.'
|
||||
else:
|
||||
if dry_run:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Action "{}" would be deleted.'.format(name)
|
||||
ret["comment"] = f'Zabbix Action "{name}" would be deleted.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": 'Zabbix Action "{}" exists.'.format(name),
|
||||
"new": 'Zabbix Action "{}" would be deleted.'.format(name),
|
||||
"old": f'Zabbix Action "{name}" exists.',
|
||||
"new": f'Zabbix Action "{name}" would be deleted.',
|
||||
}
|
||||
}
|
||||
else:
|
||||
|
@ -253,11 +253,11 @@ def absent(name, **kwargs):
|
|||
|
||||
if action_delete:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Action "{}" deleted.'.format(name)
|
||||
ret["comment"] = f'Zabbix Action "{name}" deleted.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": 'Zabbix Action "{}" existed.'.format(name),
|
||||
"new": 'Zabbix Action "{}" deleted.'.format(name),
|
||||
"old": f'Zabbix Action "{name}" existed.',
|
||||
"new": f'Zabbix Action "{name}" deleted.',
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
"""
|
||||
.. versionadded:: 2017.7
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Management of Zabbix Template object over Zabbix API.
|
||||
|
||||
|
@ -487,10 +487,10 @@ def is_present(name, **kwargs):
|
|||
|
||||
if not object_id:
|
||||
ret["result"] = False
|
||||
ret["comment"] = 'Zabbix Template "{}" does not exist.'.format(name)
|
||||
ret["comment"] = f'Zabbix Template "{name}" does not exist.'
|
||||
else:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Template "{}" exists.'.format(name)
|
||||
ret["comment"] = f'Zabbix Template "{name}" exists.'
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -690,7 +690,7 @@ def present(name, params, static_host_list=True, **kwargs):
|
|||
"selectMacros": "extend",
|
||||
"filter": {"host": name},
|
||||
},
|
||||
**kwargs
|
||||
**kwargs,
|
||||
)
|
||||
log.info("TEMPLATE get result: %s", str(json.dumps(tmpl_get, indent=4)))
|
||||
|
||||
|
@ -797,7 +797,7 @@ def present(name, params, static_host_list=True, **kwargs):
|
|||
TEMPLATE_COMPONENT_DEF[component]["qselectpid"]: template_id
|
||||
},
|
||||
filter_key=TEMPLATE_COMPONENT_DEF[component]["filter"],
|
||||
**kwargs
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
defined_c_list_subs = []
|
||||
|
@ -807,7 +807,7 @@ def present(name, params, static_host_list=True, **kwargs):
|
|||
template_id,
|
||||
defined_c_list_subs,
|
||||
existing_c_list_subs,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
log.info(
|
||||
|
@ -846,7 +846,7 @@ def present(name, params, static_host_list=True, **kwargs):
|
|||
defined_p_list_subs = __salt__["zabbix.substitute_params"](
|
||||
d_rule_component[proto_name],
|
||||
extend_params={c_def["qselectpid"]: template_id},
|
||||
**kwargs
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
defined_p_list_subs = []
|
||||
|
@ -857,7 +857,7 @@ def present(name, params, static_host_list=True, **kwargs):
|
|||
defined_p_list_subs,
|
||||
existing_p_list_subs,
|
||||
template_id=template_id,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
log.info(
|
||||
|
@ -884,10 +884,10 @@ def present(name, params, static_host_list=True, **kwargs):
|
|||
if tmpl_action:
|
||||
ret["result"] = True
|
||||
if dry_run:
|
||||
ret["comment"] = 'Zabbix Template "{}" would be created.'.format(name)
|
||||
ret["comment"] = f'Zabbix Template "{name}" would be created.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": 'Zabbix Template "{}" does not exist.'.format(name),
|
||||
"old": f'Zabbix Template "{name}" does not exist.',
|
||||
"new": (
|
||||
'Zabbix Template "{}" would be created '
|
||||
"according definition.".format(name)
|
||||
|
@ -895,10 +895,10 @@ def present(name, params, static_host_list=True, **kwargs):
|
|||
}
|
||||
}
|
||||
else:
|
||||
ret["comment"] = 'Zabbix Template "{}" created.'.format(name)
|
||||
ret["comment"] = f'Zabbix Template "{name}" created.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": 'Zabbix Template "{}" did not exist.'.format(name),
|
||||
"old": f'Zabbix Template "{name}" did not exist.',
|
||||
"new": (
|
||||
'Zabbix Template "{}" created according definition.'.format(
|
||||
name
|
||||
|
@ -909,10 +909,10 @@ def present(name, params, static_host_list=True, **kwargs):
|
|||
else:
|
||||
ret["result"] = True
|
||||
if dry_run:
|
||||
ret["comment"] = 'Zabbix Template "{}" would be updated.'.format(name)
|
||||
ret["comment"] = f'Zabbix Template "{name}" would be updated.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": 'Zabbix Template "{}" differs.'.format(name),
|
||||
"old": f'Zabbix Template "{name}" differs.',
|
||||
"new": (
|
||||
'Zabbix Template "{}" would be updated '
|
||||
"according definition.".format(name)
|
||||
|
@ -920,10 +920,10 @@ def present(name, params, static_host_list=True, **kwargs):
|
|||
}
|
||||
}
|
||||
else:
|
||||
ret["comment"] = 'Zabbix Template "{}" updated.'.format(name)
|
||||
ret["comment"] = f'Zabbix Template "{name}" updated.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": 'Zabbix Template "{}" differed.'.format(name),
|
||||
"old": f'Zabbix Template "{name}" differed.',
|
||||
"new": (
|
||||
'Zabbix Template "{}" updated according definition.'.format(
|
||||
name
|
||||
|
@ -962,15 +962,15 @@ def absent(name, **kwargs):
|
|||
|
||||
if not object_id:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Template "{}" does not exist.'.format(name)
|
||||
ret["comment"] = f'Zabbix Template "{name}" does not exist.'
|
||||
else:
|
||||
if dry_run:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Template "{}" would be deleted.'.format(name)
|
||||
ret["comment"] = f'Zabbix Template "{name}" would be deleted.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": 'Zabbix Template "{}" exists.'.format(name),
|
||||
"new": 'Zabbix Template "{}" would be deleted.'.format(name),
|
||||
"old": f'Zabbix Template "{name}" exists.',
|
||||
"new": f'Zabbix Template "{name}" would be deleted.',
|
||||
}
|
||||
}
|
||||
else:
|
||||
|
@ -979,11 +979,11 @@ def absent(name, **kwargs):
|
|||
)
|
||||
if tmpl_delete:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Template "{}" deleted.'.format(name)
|
||||
ret["comment"] = f'Zabbix Template "{name}" deleted.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": 'Zabbix Template "{}" existed.'.format(name),
|
||||
"new": 'Zabbix Template "{}" deleted.'.format(name),
|
||||
"old": f'Zabbix Template "{name}" existed.',
|
||||
"new": f'Zabbix Template "{name}" deleted.',
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
"""
|
||||
Management of Zabbix Valuemap object over Zabbix API.
|
||||
|
||||
.. versionadded:: 2017.7
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
:codeauthor: Jakub Sliva <jakub.sliva@ultimum.io>
|
||||
"""
|
||||
|
@ -95,7 +95,7 @@ def present(name, params, **kwargs):
|
|||
|
||||
if dry_run:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Value map "{}" would be fixed.'.format(name)
|
||||
ret["comment"] = f'Zabbix Value map "{name}" would be fixed.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": (
|
||||
|
@ -119,14 +119,14 @@ def present(name, params, **kwargs):
|
|||
)
|
||||
if valuemap_update:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Value map "{}" updated.'.format(name)
|
||||
ret["comment"] = f'Zabbix Value map "{name}" updated.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": (
|
||||
'Zabbix Value map "{}" differed '
|
||||
"in following parameters: {}".format(name, diff_params)
|
||||
),
|
||||
"new": 'Zabbix Value map "{}" fixed.'.format(name),
|
||||
"new": f'Zabbix Value map "{name}" fixed.',
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -141,10 +141,10 @@ def present(name, params, **kwargs):
|
|||
else:
|
||||
if dry_run:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Value map "{}" would be created.'.format(name)
|
||||
ret["comment"] = f'Zabbix Value map "{name}" would be created.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": 'Zabbix Value map "{}" does not exist.'.format(name),
|
||||
"old": f'Zabbix Value map "{name}" does not exist.',
|
||||
"new": (
|
||||
'Zabbix Value map "{}" would be created '
|
||||
"according definition.".format(name)
|
||||
|
@ -163,10 +163,10 @@ def present(name, params, **kwargs):
|
|||
|
||||
if valuemap_create:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Value map "{}" created.'.format(name)
|
||||
ret["comment"] = f'Zabbix Value map "{name}" created.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": 'Zabbix Value map "{}" did not exist.'.format(name),
|
||||
"old": f'Zabbix Value map "{name}" did not exist.',
|
||||
"new": (
|
||||
'Zabbix Value map "{}" created according definition.'.format(
|
||||
name
|
||||
|
@ -205,15 +205,15 @@ def absent(name, **kwargs):
|
|||
|
||||
if not object_id:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Value map "{}" does not exist.'.format(name)
|
||||
ret["comment"] = f'Zabbix Value map "{name}" does not exist.'
|
||||
else:
|
||||
if dry_run:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Value map "{}" would be deleted.'.format(name)
|
||||
ret["comment"] = f'Zabbix Value map "{name}" would be deleted.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": 'Zabbix Value map "{}" exists.'.format(name),
|
||||
"new": 'Zabbix Value map "{}" would be deleted.'.format(name),
|
||||
"old": f'Zabbix Value map "{name}" exists.',
|
||||
"new": f'Zabbix Value map "{name}" would be deleted.',
|
||||
}
|
||||
}
|
||||
else:
|
||||
|
@ -223,11 +223,11 @@ def absent(name, **kwargs):
|
|||
|
||||
if valuemap_delete:
|
||||
ret["result"] = True
|
||||
ret["comment"] = 'Zabbix Value map "{}" deleted.'.format(name)
|
||||
ret["comment"] = f'Zabbix Value map "{name}" deleted.'
|
||||
ret["changes"] = {
|
||||
name: {
|
||||
"old": 'Zabbix Value map "{}" existed.'.format(name),
|
||||
"new": 'Zabbix Value map "{}" deleted.'.format(name),
|
||||
"old": f'Zabbix Value map "{name}" existed.',
|
||||
"new": f'Zabbix Value map "{name}" deleted.',
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ This is a base library used by a number of AWS services.
|
|||
:depends: requests
|
||||
"""
|
||||
|
||||
import copy
|
||||
import hashlib
|
||||
import hmac
|
||||
import logging
|
||||
|
@ -106,7 +107,7 @@ def get_metadata(path, refresh_token_if_needed=True):
|
|||
|
||||
# Connections to instance meta-data must fail fast and never be proxied
|
||||
result = requests.get(
|
||||
"http://169.254.169.254/latest/{}".format(path),
|
||||
f"http://169.254.169.254/latest/{path}",
|
||||
proxies={"http": ""},
|
||||
headers=headers,
|
||||
timeout=AWS_METADATA_TIMEOUT,
|
||||
|
@ -159,7 +160,7 @@ def creds(provider):
|
|||
return provider["id"], provider["key"], ""
|
||||
|
||||
try:
|
||||
result = get_metadata("meta-data/iam/security-credentials/{}".format(role))
|
||||
result = get_metadata(f"meta-data/iam/security-credentials/{role}")
|
||||
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError):
|
||||
return provider["id"], provider["key"], ""
|
||||
|
||||
|
@ -201,7 +202,7 @@ def sig2(method, endpoint, params, provider, aws_api_version):
|
|||
params_with_headers["AWSAccessKeyId"] = access_key_id
|
||||
params_with_headers["SignatureVersion"] = "2"
|
||||
params_with_headers["SignatureMethod"] = "HmacSHA256"
|
||||
params_with_headers["Timestamp"] = "{}".format(timestamp)
|
||||
params_with_headers["Timestamp"] = f"{timestamp}"
|
||||
params_with_headers["Version"] = aws_api_version
|
||||
keys = sorted(params_with_headers.keys())
|
||||
values = list(list(map(params_with_headers.get, keys)))
|
||||
|
@ -230,9 +231,9 @@ def assumed_creds(prov_dict, role_arn, location=None):
|
|||
# current time in epoch seconds
|
||||
now = time.mktime(datetime.utcnow().timetuple())
|
||||
|
||||
for key, creds in __AssumeCache__.items():
|
||||
for key, creds in copy.deepcopy(__AssumeCache__).items():
|
||||
if (creds["Expiration"] - now) <= 120:
|
||||
__AssumeCache__[key].delete()
|
||||
del __AssumeCache__[key]
|
||||
|
||||
if role_arn in __AssumeCache__:
|
||||
c = __AssumeCache__[role_arn]
|
||||
|
@ -345,9 +346,7 @@ def sig4(
|
|||
|
||||
for header in sorted(new_headers.keys(), key=str.lower):
|
||||
lower_header = header.lower()
|
||||
a_canonical_headers.append(
|
||||
"{}:{}".format(lower_header, new_headers[header].strip())
|
||||
)
|
||||
a_canonical_headers.append(f"{lower_header}:{new_headers[header].strip()}")
|
||||
a_signed_headers.append(lower_header)
|
||||
canonical_headers = "\n".join(a_canonical_headers) + "\n"
|
||||
signed_headers = ";".join(a_signed_headers)
|
||||
|
@ -389,7 +388,7 @@ def sig4(
|
|||
|
||||
new_headers["Authorization"] = authorization_header
|
||||
|
||||
requesturl = "{}?{}".format(requesturl, querystring)
|
||||
requesturl = f"{requesturl}?{querystring}"
|
||||
return new_headers, requesturl
|
||||
|
||||
|
||||
|
@ -484,11 +483,9 @@ def query(
|
|||
|
||||
if endpoint is None:
|
||||
if not requesturl:
|
||||
endpoint = prov_dict.get(
|
||||
"endpoint", "{}.{}.{}".format(product, location, service_url)
|
||||
)
|
||||
endpoint = prov_dict.get("endpoint", f"{product}.{location}.{service_url}")
|
||||
|
||||
requesturl = "https://{}/".format(endpoint)
|
||||
requesturl = f"https://{endpoint}/"
|
||||
else:
|
||||
endpoint = urllib.parse.urlparse(requesturl).netloc
|
||||
if endpoint == "":
|
||||
|
@ -507,7 +504,7 @@ def query(
|
|||
|
||||
aws_api_version = prov_dict.get(
|
||||
"aws_api_version",
|
||||
prov_dict.get("{}_api_version".format(product), DEFAULT_AWS_API_VERSION),
|
||||
prov_dict.get(f"{product}_api_version", DEFAULT_AWS_API_VERSION),
|
||||
)
|
||||
|
||||
# Fallback to ec2's id & key if none is found, for this component
|
||||
|
|
|
@ -70,7 +70,7 @@ if HAS_MAKO:
|
|||
if scheme in ("salt", "file"):
|
||||
return uri
|
||||
elif scheme:
|
||||
raise ValueError("Unsupported URL scheme({}) in {}".format(scheme, uri))
|
||||
raise ValueError(f"Unsupported URL scheme({scheme}) in {uri}")
|
||||
return self.lookup.adjust_uri(uri, filename)
|
||||
|
||||
def get_template(self, uri, relativeto=None):
|
||||
|
@ -99,8 +99,10 @@ if HAS_MAKO:
|
|||
)
|
||||
|
||||
def destroy(self):
|
||||
if self.client:
|
||||
if self._file_client:
|
||||
file_client = self._file_client
|
||||
self._file_client = None
|
||||
try:
|
||||
self.client.destroy()
|
||||
file_client.destroy()
|
||||
except AttributeError:
|
||||
pass
|
||||
|
|
|
@ -96,7 +96,7 @@ def read_reg_pol_file(reg_pol_path):
|
|||
"""
|
||||
return_data = None
|
||||
if os.path.exists(reg_pol_path):
|
||||
log.debug("LGPO_REG Utils: Reading from %s", reg_pol_path)
|
||||
log.debug("LGPO_REG Util: Reading from %s", reg_pol_path)
|
||||
with salt.utils.files.fopen(reg_pol_path, "rb") as pol_file:
|
||||
return_data = pol_file.read()
|
||||
return return_data
|
||||
|
@ -173,16 +173,16 @@ def write_reg_pol_data(
|
|||
if not search_reg_pol(r"\[General\]\r\n", gpt_ini_data):
|
||||
log.debug("LGPO_REG Util: Adding [General] section to gpt.ini")
|
||||
gpt_ini_data = "[General]\r\n" + gpt_ini_data
|
||||
if search_reg_pol(r"{}=".format(re.escape(gpt_extension)), gpt_ini_data):
|
||||
if search_reg_pol(rf"{re.escape(gpt_extension)}=", gpt_ini_data):
|
||||
# ensure the line contains the ADM guid
|
||||
gpt_ext_loc = re.search(
|
||||
r"^{}=.*\r\n".format(re.escape(gpt_extension)),
|
||||
rf"^{re.escape(gpt_extension)}=.*\r\n",
|
||||
gpt_ini_data,
|
||||
re.IGNORECASE | re.MULTILINE,
|
||||
)
|
||||
gpt_ext_str = gpt_ini_data[gpt_ext_loc.start() : gpt_ext_loc.end()]
|
||||
if not search_reg_pol(
|
||||
search_string=r"{}".format(re.escape(gpt_extension_guid)),
|
||||
search_string=rf"{re.escape(gpt_extension_guid)}",
|
||||
policy_data=gpt_ext_str,
|
||||
):
|
||||
log.debug("LGPO_REG Util: Inserting gpt extension GUID")
|
||||
|
@ -339,7 +339,7 @@ def reg_pol_to_dict(policy_data):
|
|||
# REG_QWORD : 64-bit little endian
|
||||
v_data = struct.unpack("<q", v_data)[0]
|
||||
else:
|
||||
msg = "LGPO_REG Util: Found unknown registry type: {}".format(v_type)
|
||||
msg = f"LGPO_REG Util: Found unknown registry type: {v_type}"
|
||||
raise CommandExecutionError(msg)
|
||||
|
||||
# Lookup the REG Type from the number
|
||||
|
@ -392,9 +392,9 @@ def dict_to_reg_pol(data):
|
|||
# The first three items are pretty straight forward
|
||||
policy = [
|
||||
# Key followed by null byte
|
||||
"{}".format(key).encode("utf-16-le") + pol_section_term,
|
||||
f"{key}".encode("utf-16-le") + pol_section_term,
|
||||
# Value name followed by null byte
|
||||
"{}".format(v_name).encode("utf-16-le") + pol_section_term,
|
||||
f"{v_name}".encode("utf-16-le") + pol_section_term,
|
||||
# Type in 32-bit little-endian
|
||||
struct.pack("<i", v_type),
|
||||
]
|
||||
|
|
|
@ -2,7 +2,7 @@ import pytest
|
|||
|
||||
import salt.modules.selinux as selinux
|
||||
from salt.exceptions import SaltInvocationError
|
||||
from tests.support.mock import MagicMock, patch
|
||||
from tests.support.mock import MagicMock, mock_open, patch
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -293,3 +293,86 @@ def test_fcontext_policy_parsing_fail():
|
|||
"retcode": 1,
|
||||
"error": "Unrecognized response from restorecon command.",
|
||||
}
|
||||
|
||||
|
||||
def test_selinux_config_enforcing():
|
||||
"""
|
||||
Test values written to /etc/selinux/config are lowercase
|
||||
"""
|
||||
mock_file = """
|
||||
# This file controls the state of SELinux on the system.
|
||||
# SELINUX= can take one of these three values:
|
||||
# enforcing - SELinux security policy is enforced.
|
||||
# permissive - SELinux prints warnings instead of enforcing.
|
||||
# disabled - No SELinux policy is loaded.
|
||||
## SELINUX=disabled
|
||||
SELINUX=permissive
|
||||
# SELINUXTYPE= can take one of these three values:
|
||||
# targeted - Targeted processes are protected,
|
||||
# minimum - Modification of targeted policy. Only selected processes are protected.
|
||||
# mls - Multi Level Security protection.
|
||||
SELINUXTYPE=targeted
|
||||
|
||||
"""
|
||||
with patch("salt.utils.files.fopen", mock_open(read_data=mock_file)) as m_open:
|
||||
selinux.setenforce("Enforcing")
|
||||
writes = m_open.write_calls()
|
||||
assert writes
|
||||
for line in writes:
|
||||
if line.startswith("SELINUX="):
|
||||
assert line == "SELINUX=enforcing"
|
||||
|
||||
|
||||
def test_selinux_config_permissive():
|
||||
"""
|
||||
Test values written to /etc/selinux/config are lowercase
|
||||
"""
|
||||
mock_file = """
|
||||
# This file controls the state of SELinux on the system.
|
||||
# SELINUX= can take one of these three values:
|
||||
# enforcing - SELinux security policy is enforced.
|
||||
# permissive - SELinux prints warnings instead of enforcing.
|
||||
# disabled - No SELinux policy is loaded.
|
||||
SELINUX=disabled
|
||||
# SELINUXTYPE= can take one of these three values:
|
||||
# targeted - Targeted processes are protected,
|
||||
# minimum - Modification of targeted policy. Only selected processes are protected.
|
||||
# mls - Multi Level Security protection.
|
||||
SELINUXTYPE=targeted
|
||||
|
||||
"""
|
||||
with patch("salt.utils.files.fopen", mock_open(read_data=mock_file)) as m_open:
|
||||
selinux.setenforce("Permissive")
|
||||
writes = m_open.write_calls()
|
||||
assert writes
|
||||
for line in writes:
|
||||
if line.startswith("SELINUX="):
|
||||
assert line == "SELINUX=permissive"
|
||||
|
||||
|
||||
def test_selinux_config_disabled():
|
||||
"""
|
||||
Test values written to /etc/selinux/config are lowercase
|
||||
"""
|
||||
mock_file = """
|
||||
# This file controls the state of SELinux on the system.
|
||||
# SELINUX= can take one of these three values:
|
||||
# enforcing - SELinux security policy is enforced.
|
||||
# permissive - SELinux prints warnings instead of enforcing.
|
||||
# disabled - No SELinux policy is loaded.
|
||||
## SELINUX=disabled
|
||||
SELINUX=permissive
|
||||
# SELINUXTYPE= can take one of these three values:
|
||||
# targeted - Targeted processes are protected,
|
||||
# minimum - Modification of targeted policy. Only selected processes are protected.
|
||||
# mls - Multi Level Security protection.
|
||||
SELINUXTYPE=targeted
|
||||
|
||||
"""
|
||||
with patch("salt.utils.files.fopen", mock_open(read_data=mock_file)) as m_open:
|
||||
selinux.setenforce("Disabled")
|
||||
writes = m_open.write_calls()
|
||||
assert writes
|
||||
for line in writes:
|
||||
if line.startswith("SELINUX="):
|
||||
assert line == "SELINUX=disabled"
|
||||
|
|
|
@ -107,154 +107,6 @@ def lgpo_bin():
|
|||
yield str(sys_dir / "lgpo.exe")
|
||||
|
||||
|
||||
def test_get_policy_name(osrelease):
|
||||
if osrelease == "2022Server":
|
||||
pytest.skip(f"Test is failing on {osrelease}")
|
||||
if osrelease == "11":
|
||||
policy_name = "Allow Diagnostic Data"
|
||||
else:
|
||||
policy_name = "Allow Telemetry"
|
||||
result = win_lgpo.get_policy(
|
||||
policy_name=policy_name,
|
||||
policy_class="machine",
|
||||
return_value_only=True,
|
||||
return_full_policy_names=True,
|
||||
hierarchical_return=False,
|
||||
)
|
||||
expected = "Not Configured"
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_get_policy_id():
|
||||
result = win_lgpo.get_policy(
|
||||
policy_name="AllowTelemetry",
|
||||
policy_class="machine",
|
||||
return_value_only=True,
|
||||
return_full_policy_names=True,
|
||||
hierarchical_return=False,
|
||||
)
|
||||
expected = "Not Configured"
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_get_policy_name_full_return_full_names(osrelease):
|
||||
if osrelease == "2022Server":
|
||||
pytest.skip(f"Test is failing on {osrelease}")
|
||||
if osrelease == "11":
|
||||
policy_name = "Allow Diagnostic Data"
|
||||
else:
|
||||
policy_name = "Allow Telemetry"
|
||||
result = win_lgpo.get_policy(
|
||||
policy_name=policy_name,
|
||||
policy_class="machine",
|
||||
return_value_only=False,
|
||||
return_full_policy_names=True,
|
||||
hierarchical_return=False,
|
||||
)
|
||||
key = "Windows Components\\Data Collection and Preview Builds\\{}"
|
||||
expected = {key.format(policy_name): "Not Configured"}
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_get_policy_id_full_return_full_names(osrelease):
|
||||
if osrelease == "2022Server":
|
||||
pytest.skip(f"Test is failing on {osrelease}")
|
||||
if osrelease == "11":
|
||||
policy_name = "Allow Diagnostic Data"
|
||||
else:
|
||||
policy_name = "Allow Telemetry"
|
||||
result = win_lgpo.get_policy(
|
||||
policy_name="AllowTelemetry",
|
||||
policy_class="machine",
|
||||
return_value_only=False,
|
||||
return_full_policy_names=True,
|
||||
hierarchical_return=False,
|
||||
)
|
||||
key = "Windows Components\\Data Collection and Preview Builds\\{}"
|
||||
expected = {key.format(policy_name): "Not Configured"}
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_get_policy_name_full_return_ids(osrelease):
|
||||
if osrelease == "2022Server":
|
||||
pytest.skip(f"Test is failing on {osrelease}")
|
||||
if osrelease == "11":
|
||||
policy_name = "Allow Diagnostic Data"
|
||||
else:
|
||||
policy_name = "Allow Telemetry"
|
||||
result = win_lgpo.get_policy(
|
||||
policy_name=policy_name,
|
||||
policy_class="machine",
|
||||
return_value_only=False,
|
||||
return_full_policy_names=False,
|
||||
hierarchical_return=False,
|
||||
)
|
||||
expected = {"AllowTelemetry": "Not Configured"}
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_get_policy_id_full_return_ids():
|
||||
result = win_lgpo.get_policy(
|
||||
policy_name="AllowTelemetry",
|
||||
policy_class="machine",
|
||||
return_value_only=False,
|
||||
return_full_policy_names=False,
|
||||
hierarchical_return=False,
|
||||
)
|
||||
expected = {"AllowTelemetry": "Not Configured"}
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_get_policy_id_full_return_ids_hierarchical():
|
||||
result = win_lgpo.get_policy(
|
||||
policy_name="AllowTelemetry",
|
||||
policy_class="machine",
|
||||
return_value_only=False,
|
||||
return_full_policy_names=False,
|
||||
hierarchical_return=True,
|
||||
)
|
||||
expected = {
|
||||
"Computer Configuration": {
|
||||
"Administrative Templates": {
|
||||
"WindowsComponents": {
|
||||
"DataCollectionAndPreviewBuilds": {
|
||||
"AllowTelemetry": "Not Configured"
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_get_policy_name_return_full_names_hierarchical(osrelease):
|
||||
if osrelease == "2022Server":
|
||||
pytest.skip(f"Test is failing on {osrelease}")
|
||||
if osrelease == "11":
|
||||
policy_name = "Allow Diagnostic Data"
|
||||
else:
|
||||
policy_name = "Allow Telemetry"
|
||||
result = win_lgpo.get_policy(
|
||||
policy_name=policy_name,
|
||||
policy_class="machine",
|
||||
return_value_only=False,
|
||||
return_full_policy_names=True,
|
||||
hierarchical_return=True,
|
||||
)
|
||||
expected = {
|
||||
"Computer Configuration": {
|
||||
"Administrative Templates": {
|
||||
"Windows Components": {
|
||||
"Data Collection and Preview Builds": {
|
||||
policy_name: "Not Configured"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
assert result == expected
|
||||
|
||||
|
||||
@pytest.mark.destructive_test
|
||||
def test__load_policy_definitions():
|
||||
"""
|
||||
|
@ -280,7 +132,7 @@ def test__load_policy_definitions():
|
|||
# Remove source file
|
||||
os.remove(bogus_fle)
|
||||
# Remove cached file
|
||||
search_string = "{}\\_bogus*.adml".format(cache_dir)
|
||||
search_string = f"{cache_dir}\\_bogus*.adml"
|
||||
for file_name in glob.glob(search_string):
|
||||
os.remove(file_name)
|
||||
|
||||
|
|
229
tests/pytests/unit/modules/win_lgpo/test_get_policy.py
Normal file
229
tests/pytests/unit/modules/win_lgpo/test_get_policy.py
Normal file
|
@ -0,0 +1,229 @@
|
|||
import copy
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
|
||||
import pytest
|
||||
|
||||
import salt.grains.core
|
||||
import salt.modules.win_file as win_file
|
||||
import salt.modules.win_lgpo as win_lgpo
|
||||
import salt.utils.files
|
||||
import salt.utils.win_dacl as win_dacl
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
pytestmark = [
|
||||
pytest.mark.windows_whitelisted,
|
||||
pytest.mark.skip_unless_on_windows,
|
||||
pytest.mark.slow_test,
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def configure_loader_modules(minion_opts):
|
||||
return {
|
||||
win_lgpo: {
|
||||
"__opts__": minion_opts,
|
||||
"__salt__": {
|
||||
"file.file_exists": win_file.file_exists,
|
||||
"file.makedirs": win_file.makedirs_,
|
||||
},
|
||||
},
|
||||
win_file: {
|
||||
"__utils__": {
|
||||
"dacl.set_perms": win_dacl.set_perms,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def osrelease():
|
||||
grains = salt.grains.core.os_data()
|
||||
yield grains.get("osrelease", None)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def clean_comp():
|
||||
reg_pol = pathlib.Path(
|
||||
os.getenv("SystemRoot"), "System32", "GroupPolicy", "Machine", "Registry.pol"
|
||||
)
|
||||
reg_pol.unlink(missing_ok=True)
|
||||
try:
|
||||
yield reg_pol
|
||||
finally:
|
||||
reg_pol.unlink(missing_ok=True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def checkbox_policy():
|
||||
policy_name = "Configure Corporate Windows Error Reporting"
|
||||
policy_settings = {
|
||||
"Connect using SSL": False,
|
||||
"Corporate server name": "fakeserver.com",
|
||||
"Only upload on free networks": False,
|
||||
"Server port": 1273,
|
||||
}
|
||||
win_lgpo.set_computer_policy(name=policy_name, setting=copy.copy(policy_settings))
|
||||
try:
|
||||
yield policy_name, policy_settings
|
||||
finally:
|
||||
win_lgpo.set_computer_policy(name=policy_name, setting="Not Configured")
|
||||
|
||||
|
||||
def test_name(osrelease):
|
||||
if osrelease == "2022Server":
|
||||
pytest.skip(f"Test is failing on {osrelease}")
|
||||
if osrelease == "11":
|
||||
policy_name = "Allow Diagnostic Data"
|
||||
else:
|
||||
policy_name = "Allow Telemetry"
|
||||
result = win_lgpo.get_policy(
|
||||
policy_name=policy_name,
|
||||
policy_class="machine",
|
||||
return_value_only=True,
|
||||
return_full_policy_names=True,
|
||||
hierarchical_return=False,
|
||||
)
|
||||
expected = "Not Configured"
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_id():
|
||||
result = win_lgpo.get_policy(
|
||||
policy_name="AllowTelemetry",
|
||||
policy_class="machine",
|
||||
return_value_only=True,
|
||||
return_full_policy_names=True,
|
||||
hierarchical_return=False,
|
||||
)
|
||||
expected = "Not Configured"
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_name_full_return_full_names(osrelease):
|
||||
if osrelease == "2022Server":
|
||||
pytest.skip(f"Test is failing on {osrelease}")
|
||||
if osrelease == "11":
|
||||
policy_name = "Allow Diagnostic Data"
|
||||
else:
|
||||
policy_name = "Allow Telemetry"
|
||||
result = win_lgpo.get_policy(
|
||||
policy_name=policy_name,
|
||||
policy_class="machine",
|
||||
return_value_only=False,
|
||||
return_full_policy_names=True,
|
||||
hierarchical_return=False,
|
||||
)
|
||||
key = "Windows Components\\Data Collection and Preview Builds\\{}"
|
||||
expected = {key.format(policy_name): "Not Configured"}
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_id_full_return_full_names(osrelease):
|
||||
if osrelease == "2022Server":
|
||||
pytest.skip(f"Test is failing on {osrelease}")
|
||||
if osrelease == "11":
|
||||
policy_name = "Allow Diagnostic Data"
|
||||
else:
|
||||
policy_name = "Allow Telemetry"
|
||||
result = win_lgpo.get_policy(
|
||||
policy_name="AllowTelemetry",
|
||||
policy_class="machine",
|
||||
return_value_only=False,
|
||||
return_full_policy_names=True,
|
||||
hierarchical_return=False,
|
||||
)
|
||||
key = "Windows Components\\Data Collection and Preview Builds\\{}"
|
||||
expected = {key.format(policy_name): "Not Configured"}
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_name_full_return_ids(osrelease):
|
||||
if osrelease == "2022Server":
|
||||
pytest.skip(f"Test is failing on {osrelease}")
|
||||
if osrelease == "11":
|
||||
policy_name = "Allow Diagnostic Data"
|
||||
else:
|
||||
policy_name = "Allow Telemetry"
|
||||
result = win_lgpo.get_policy(
|
||||
policy_name=policy_name,
|
||||
policy_class="machine",
|
||||
return_value_only=False,
|
||||
return_full_policy_names=False,
|
||||
hierarchical_return=False,
|
||||
)
|
||||
expected = {"AllowTelemetry": "Not Configured"}
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_id_full_return_ids():
|
||||
result = win_lgpo.get_policy(
|
||||
policy_name="AllowTelemetry",
|
||||
policy_class="machine",
|
||||
return_value_only=False,
|
||||
return_full_policy_names=False,
|
||||
hierarchical_return=False,
|
||||
)
|
||||
expected = {"AllowTelemetry": "Not Configured"}
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_id_full_return_ids_hierarchical():
|
||||
result = win_lgpo.get_policy(
|
||||
policy_name="AllowTelemetry",
|
||||
policy_class="machine",
|
||||
return_value_only=False,
|
||||
return_full_policy_names=False,
|
||||
hierarchical_return=True,
|
||||
)
|
||||
expected = {
|
||||
"Computer Configuration": {
|
||||
"Administrative Templates": {
|
||||
"WindowsComponents": {
|
||||
"DataCollectionAndPreviewBuilds": {
|
||||
"AllowTelemetry": "Not Configured"
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_name_return_full_names_hierarchical(osrelease):
|
||||
if osrelease == "2022Server":
|
||||
pytest.skip(f"Test is failing on {osrelease}")
|
||||
if osrelease == "11":
|
||||
policy_name = "Allow Diagnostic Data"
|
||||
else:
|
||||
policy_name = "Allow Telemetry"
|
||||
result = win_lgpo.get_policy(
|
||||
policy_name=policy_name,
|
||||
policy_class="machine",
|
||||
return_value_only=False,
|
||||
return_full_policy_names=True,
|
||||
hierarchical_return=True,
|
||||
)
|
||||
expected = {
|
||||
"Computer Configuration": {
|
||||
"Administrative Templates": {
|
||||
"Windows Components": {
|
||||
"Data Collection and Preview Builds": {
|
||||
policy_name: "Not Configured"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_checkboxes(checkbox_policy):
|
||||
"""
|
||||
Test scenario where sometimes checkboxes aren't returned in the results
|
||||
"""
|
||||
policy_name, expected = checkbox_policy
|
||||
result = win_lgpo.get_policy(policy_name=policy_name, policy_class="Machine")
|
||||
assert result == expected
|
|
@ -62,6 +62,28 @@ def expected_targets_return():
|
|||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def expected_docs_targets_return():
|
||||
return {
|
||||
"home": {
|
||||
"passwd": "password",
|
||||
"sudo": "password",
|
||||
"host": "12.34.56.78",
|
||||
"port": 23,
|
||||
"user": "gtmanfred",
|
||||
"minion_opts": {"http_port": 80},
|
||||
},
|
||||
"salt.gtmanfred.com": {
|
||||
"passwd": "password",
|
||||
"sudo": "password",
|
||||
"host": "127.0.0.1",
|
||||
"port": 22,
|
||||
"user": "gtmanfred",
|
||||
"minion_opts": {"http_port": 80},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def roster_dir(tmp_path_factory):
|
||||
dpath = tmp_path_factory.mktemp("roster")
|
||||
|
@ -136,6 +158,59 @@ def roster_dir(tmp_path_factory):
|
|||
children:
|
||||
southeast:
|
||||
"""
|
||||
docs_ini_contents = """
|
||||
[servers]
|
||||
salt.gtmanfred.com ansible_ssh_user=gtmanfred ansible_ssh_host=127.0.0.1 ansible_ssh_port=22 ansible_ssh_pass='password' ansible_sudo_pass='password'
|
||||
|
||||
[desktop]
|
||||
home ansible_ssh_user=gtmanfred ansible_ssh_host=12.34.56.78 ansible_ssh_port=23 ansible_ssh_pass='password' ansible_sudo_pass='password'
|
||||
|
||||
[computers:children]
|
||||
desktop
|
||||
servers
|
||||
|
||||
[computers:vars]
|
||||
http_port=80
|
||||
"""
|
||||
docs_script_contents = """
|
||||
#!/bin/bash
|
||||
echo '{
|
||||
"servers": [
|
||||
"salt.gtmanfred.com"
|
||||
],
|
||||
"desktop": [
|
||||
"home"
|
||||
],
|
||||
"computers": {
|
||||
"hosts": [],
|
||||
"children": [
|
||||
"desktop",
|
||||
"servers"
|
||||
],
|
||||
"vars": {
|
||||
"http_port": 80
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"hostvars": {
|
||||
"salt.gtmanfred.com": {
|
||||
"ansible_ssh_user": "gtmanfred",
|
||||
"ansible_ssh_host": "127.0.0.1",
|
||||
"ansible_sudo_pass": "password",
|
||||
"ansible_ssh_pass": "password",
|
||||
"ansible_ssh_port": 22
|
||||
},
|
||||
"home": {
|
||||
"ansible_ssh_user": "gtmanfred",
|
||||
"ansible_ssh_host": "12.34.56.78",
|
||||
"ansible_sudo_pass": "password",
|
||||
"ansible_ssh_pass": "password",
|
||||
"ansible_ssh_port": 23
|
||||
}
|
||||
}
|
||||
}
|
||||
}'
|
||||
"""
|
||||
with pytest.helpers.temp_file(
|
||||
"roster.py", roster_py_contents, directory=dpath
|
||||
) as py_roster:
|
||||
|
@ -144,11 +219,17 @@ def roster_dir(tmp_path_factory):
|
|||
"roster.ini", roster_ini_contents, directory=dpath
|
||||
), pytest.helpers.temp_file(
|
||||
"roster.yml", roster_yaml_contents, directory=dpath
|
||||
), pytest.helpers.temp_file(
|
||||
"roster-docs.ini", docs_ini_contents, directory=dpath
|
||||
):
|
||||
try:
|
||||
yield dpath
|
||||
finally:
|
||||
shutil.rmtree(str(dpath), ignore_errors=True)
|
||||
with pytest.helpers.temp_file(
|
||||
"roster-docs.sh", docs_script_contents, directory=dpath
|
||||
) as script_roster:
|
||||
script_roster.chmod(0o755)
|
||||
try:
|
||||
yield dpath
|
||||
finally:
|
||||
shutil.rmtree(str(dpath), ignore_errors=True)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -179,3 +260,17 @@ def test_script(roster_opts, roster_dir, expected_targets_return):
|
|||
with patch.dict(ansible.__opts__, roster_opts):
|
||||
ret = ansible.targets("*")
|
||||
assert ret == expected_targets_return
|
||||
|
||||
|
||||
def test_docs_ini(roster_opts, roster_dir, expected_docs_targets_return):
|
||||
roster_opts["roster_file"] = str(roster_dir / "roster-docs.ini")
|
||||
with patch.dict(ansible.__opts__, roster_opts):
|
||||
ret = ansible.targets("*")
|
||||
assert ret == expected_docs_targets_return
|
||||
|
||||
|
||||
def test_docs_script(roster_opts, roster_dir, expected_docs_targets_return):
|
||||
roster_opts["roster_file"] = str(roster_dir / "roster-docs.sh")
|
||||
with patch.dict(ansible.__opts__, roster_opts):
|
||||
ret = ansible.targets("*")
|
||||
assert ret == expected_docs_targets_return
|
||||
|
|
752
tests/pytests/unit/states/test_jboss7.py
Normal file
752
tests/pytests/unit/states/test_jboss7.py
Normal file
|
@ -0,0 +1,752 @@
|
|||
# pylint: disable=unused-argument
|
||||
|
||||
|
||||
import pytest
|
||||
|
||||
import salt.states.jboss7 as jboss7
|
||||
from salt.exceptions import CommandExecutionError
|
||||
from tests.support.mock import MagicMock, patch
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def configure_loader_modules():
|
||||
return {
|
||||
jboss7: {
|
||||
"__salt__": {
|
||||
"jboss7.read_datasource": MagicMock(),
|
||||
"jboss7.create_datasource": MagicMock(),
|
||||
"jboss7.update_datasource": MagicMock(),
|
||||
"jboss7.remove_datasource": MagicMock(),
|
||||
"jboss7.read_simple_binding": MagicMock(),
|
||||
"jboss7.create_simple_binding": MagicMock(),
|
||||
"jboss7.update_simple_binding": MagicMock(),
|
||||
"jboss7.undeploy": MagicMock(),
|
||||
"jboss7.deploy": MagicMock,
|
||||
"file.get_managed": MagicMock,
|
||||
"file.manage_file": MagicMock,
|
||||
"jboss7.list_deployments": MagicMock,
|
||||
},
|
||||
"__env__": "base",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def test_should_not_redeploy_unchanged():
|
||||
# given
|
||||
parameters = {
|
||||
"target_file": "some_artifact",
|
||||
"undeploy_force": False,
|
||||
"undeploy": "some_artifact",
|
||||
"source": "some_artifact_on_master",
|
||||
}
|
||||
jboss_conf = {"cli_path": "somewhere", "controller": "some_controller"}
|
||||
|
||||
def list_deployments(jboss_config):
|
||||
return ["some_artifact"]
|
||||
|
||||
def file_get_managed(
|
||||
name,
|
||||
template,
|
||||
source,
|
||||
source_hash,
|
||||
source_hash_name,
|
||||
user,
|
||||
group,
|
||||
mode,
|
||||
attrs,
|
||||
saltenv,
|
||||
context,
|
||||
defaults,
|
||||
skip_verify,
|
||||
kwargs,
|
||||
):
|
||||
return "sfn", "hash", ""
|
||||
|
||||
def file_manage_file(
|
||||
name,
|
||||
sfn,
|
||||
ret,
|
||||
source,
|
||||
source_sum,
|
||||
user,
|
||||
group,
|
||||
mode,
|
||||
attrs,
|
||||
saltenv,
|
||||
backup,
|
||||
makedirs,
|
||||
template,
|
||||
show_diff,
|
||||
contents,
|
||||
dir_mode,
|
||||
):
|
||||
return {"result": True, "changes": False}
|
||||
|
||||
jboss7_undeploy_mock = MagicMock()
|
||||
jboss7_deploy_mock = MagicMock()
|
||||
file_get_managed = MagicMock(side_effect=file_get_managed)
|
||||
file_manage_file = MagicMock(side_effect=file_manage_file)
|
||||
list_deployments_mock = MagicMock(side_effect=list_deployments)
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.undeploy": jboss7_undeploy_mock,
|
||||
"jboss7.deploy": jboss7_deploy_mock,
|
||||
"file.get_managed": file_get_managed,
|
||||
"file.manage_file": file_manage_file,
|
||||
"jboss7.list_deployments": list_deployments_mock,
|
||||
},
|
||||
):
|
||||
# when
|
||||
result = jboss7.deployed(
|
||||
name="unchanged", jboss_config=jboss_conf, salt_source=parameters
|
||||
)
|
||||
|
||||
# then
|
||||
assert not jboss7_undeploy_mock.called
|
||||
assert not jboss7_deploy_mock.called
|
||||
|
||||
|
||||
def test_should_redeploy_changed():
|
||||
# given
|
||||
parameters = {
|
||||
"target_file": "some_artifact",
|
||||
"undeploy_force": False,
|
||||
"undeploy": "some_artifact",
|
||||
"source": "some_artifact_on_master",
|
||||
}
|
||||
jboss_conf = {"cli_path": "somewhere", "controller": "some_controller"}
|
||||
|
||||
def list_deployments(jboss_config):
|
||||
return ["some_artifact"]
|
||||
|
||||
def file_get_managed(
|
||||
name,
|
||||
template,
|
||||
source,
|
||||
source_hash,
|
||||
source_hash_name,
|
||||
user,
|
||||
group,
|
||||
mode,
|
||||
attrs,
|
||||
saltenv,
|
||||
context,
|
||||
defaults,
|
||||
skip_verify,
|
||||
kwargs,
|
||||
):
|
||||
return "sfn", "hash", ""
|
||||
|
||||
def file_manage_file(
|
||||
name,
|
||||
sfn,
|
||||
ret,
|
||||
source,
|
||||
source_sum,
|
||||
user,
|
||||
group,
|
||||
mode,
|
||||
attrs,
|
||||
saltenv,
|
||||
backup,
|
||||
makedirs,
|
||||
template,
|
||||
show_diff,
|
||||
contents,
|
||||
dir_mode,
|
||||
):
|
||||
return {"result": True, "changes": True}
|
||||
|
||||
jboss7_undeploy_mock = MagicMock()
|
||||
jboss7_deploy_mock = MagicMock()
|
||||
file_get_managed = MagicMock(side_effect=file_get_managed)
|
||||
file_manage_file = MagicMock(side_effect=file_manage_file)
|
||||
list_deployments_mock = MagicMock(side_effect=list_deployments)
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.undeploy": jboss7_undeploy_mock,
|
||||
"jboss7.deploy": jboss7_deploy_mock,
|
||||
"file.get_managed": file_get_managed,
|
||||
"file.manage_file": file_manage_file,
|
||||
"jboss7.list_deployments": list_deployments_mock,
|
||||
},
|
||||
):
|
||||
# when
|
||||
result = jboss7.deployed(
|
||||
name="unchanged", jboss_config=jboss_conf, salt_source=parameters
|
||||
)
|
||||
|
||||
# then
|
||||
assert jboss7_undeploy_mock.called
|
||||
assert jboss7_deploy_mock.called
|
||||
|
||||
|
||||
def test_should_deploy_different_artifact():
|
||||
# given
|
||||
parameters = {
|
||||
"target_file": "some_artifact",
|
||||
"undeploy_force": False,
|
||||
"undeploy": "some_artifact",
|
||||
"source": "some_artifact_on_master",
|
||||
}
|
||||
jboss_conf = {"cli_path": "somewhere", "controller": "some_controller"}
|
||||
|
||||
def list_deployments(jboss_config):
|
||||
return ["some_other_artifact"]
|
||||
|
||||
def file_get_managed(
|
||||
name,
|
||||
template,
|
||||
source,
|
||||
source_hash,
|
||||
source_hash_name,
|
||||
user,
|
||||
group,
|
||||
mode,
|
||||
attrs,
|
||||
saltenv,
|
||||
context,
|
||||
defaults,
|
||||
skip_verify,
|
||||
kwargs,
|
||||
):
|
||||
return "sfn", "hash", ""
|
||||
|
||||
def file_manage_file(
|
||||
name,
|
||||
sfn,
|
||||
ret,
|
||||
source,
|
||||
source_sum,
|
||||
user,
|
||||
group,
|
||||
mode,
|
||||
attrs,
|
||||
saltenv,
|
||||
backup,
|
||||
makedirs,
|
||||
template,
|
||||
show_diff,
|
||||
contents,
|
||||
dir_mode,
|
||||
):
|
||||
return {"result": True, "changes": False}
|
||||
|
||||
jboss7_undeploy_mock = MagicMock()
|
||||
jboss7_deploy_mock = MagicMock()
|
||||
file_get_managed = MagicMock(side_effect=file_get_managed)
|
||||
file_manage_file = MagicMock(side_effect=file_manage_file)
|
||||
list_deployments_mock = MagicMock(side_effect=list_deployments)
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.undeploy": jboss7_undeploy_mock,
|
||||
"jboss7.deploy": jboss7_deploy_mock,
|
||||
"file.get_managed": file_get_managed,
|
||||
"file.manage_file": file_manage_file,
|
||||
"jboss7.list_deployments": list_deployments_mock,
|
||||
},
|
||||
):
|
||||
# when
|
||||
result = jboss7.deployed(
|
||||
name="unchanged", jboss_config=jboss_conf, salt_source=parameters
|
||||
)
|
||||
|
||||
# then
|
||||
assert not jboss7_undeploy_mock.called
|
||||
assert jboss7_deploy_mock.called
|
||||
|
||||
|
||||
def test_should_redploy_undeploy_force():
|
||||
# given
|
||||
parameters = {
|
||||
"target_file": "some_artifact",
|
||||
"undeploy_force": True,
|
||||
"undeploy": "some_artifact",
|
||||
"source": "some_artifact_on_master",
|
||||
}
|
||||
jboss_conf = {"cli_path": "somewhere", "controller": "some_controller"}
|
||||
|
||||
def list_deployments(jboss_config):
|
||||
return ["some_artifact"]
|
||||
|
||||
def file_get_managed(
|
||||
name,
|
||||
template,
|
||||
source,
|
||||
source_hash,
|
||||
source_hash_name,
|
||||
user,
|
||||
group,
|
||||
mode,
|
||||
attrs,
|
||||
saltenv,
|
||||
context,
|
||||
defaults,
|
||||
skip_verify,
|
||||
kwargs,
|
||||
):
|
||||
return "sfn", "hash", ""
|
||||
|
||||
def file_manage_file(
|
||||
name,
|
||||
sfn,
|
||||
ret,
|
||||
source,
|
||||
source_sum,
|
||||
user,
|
||||
group,
|
||||
mode,
|
||||
attrs,
|
||||
saltenv,
|
||||
backup,
|
||||
makedirs,
|
||||
template,
|
||||
show_diff,
|
||||
contents,
|
||||
dir_mode,
|
||||
):
|
||||
return {"result": True, "changes": False}
|
||||
|
||||
jboss7_undeploy_mock = MagicMock()
|
||||
jboss7_deploy_mock = MagicMock()
|
||||
file_get_managed = MagicMock(side_effect=file_get_managed)
|
||||
file_manage_file = MagicMock(side_effect=file_manage_file)
|
||||
list_deployments_mock = MagicMock(side_effect=list_deployments)
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.undeploy": jboss7_undeploy_mock,
|
||||
"jboss7.deploy": jboss7_deploy_mock,
|
||||
"file.get_managed": file_get_managed,
|
||||
"file.manage_file": file_manage_file,
|
||||
"jboss7.list_deployments": list_deployments_mock,
|
||||
},
|
||||
):
|
||||
# when
|
||||
result = jboss7.deployed(
|
||||
name="unchanged", jboss_config=jboss_conf, salt_source=parameters
|
||||
)
|
||||
|
||||
# then
|
||||
assert jboss7_undeploy_mock.called
|
||||
assert jboss7_deploy_mock.called
|
||||
|
||||
|
||||
def test_should_create_new_datasource_if_not_exists():
|
||||
# given
|
||||
datasource_properties = {"connection-url": "jdbc:/old-connection-url"}
|
||||
ds_status = {"created": False}
|
||||
|
||||
def read_func(jboss_config, name, profile):
|
||||
if ds_status["created"]:
|
||||
return {"success": True, "result": datasource_properties}
|
||||
else:
|
||||
return {"success": False, "err_code": "JBAS014807"}
|
||||
|
||||
def create_func(jboss_config, name, datasource_properties, profile):
|
||||
ds_status["created"] = True
|
||||
return {"success": True}
|
||||
|
||||
read_mock = MagicMock(side_effect=read_func)
|
||||
create_mock = MagicMock(side_effect=create_func)
|
||||
update_mock = MagicMock()
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_datasource": read_mock,
|
||||
"jboss7.create_datasource": create_mock,
|
||||
"jboss7.update_datasource": update_mock,
|
||||
},
|
||||
):
|
||||
|
||||
# when
|
||||
result = jboss7.datasource_exists(
|
||||
name="appDS",
|
||||
jboss_config={},
|
||||
datasource_properties=datasource_properties,
|
||||
profile=None,
|
||||
)
|
||||
|
||||
# then
|
||||
create_mock.assert_called_with(
|
||||
name="appDS",
|
||||
jboss_config={},
|
||||
datasource_properties=datasource_properties,
|
||||
profile=None,
|
||||
)
|
||||
|
||||
assert not update_mock.called
|
||||
assert result["comment"] == "Datasource created."
|
||||
|
||||
|
||||
def test_should_update_the_datasource_if_exists():
|
||||
ds_status = {"updated": False}
|
||||
|
||||
def read_func(jboss_config, name, profile):
|
||||
if ds_status["updated"]:
|
||||
return {
|
||||
"success": True,
|
||||
"result": {"connection-url": "jdbc:/new-connection-url"},
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"success": True,
|
||||
"result": {"connection-url": "jdbc:/old-connection-url"},
|
||||
}
|
||||
|
||||
def update_func(jboss_config, name, new_properties, profile):
|
||||
ds_status["updated"] = True
|
||||
return {"success": True}
|
||||
|
||||
read_mock = MagicMock(side_effect=read_func)
|
||||
create_mock = MagicMock()
|
||||
update_mock = MagicMock(side_effect=update_func)
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_datasource": read_mock,
|
||||
"jboss7.create_datasource": create_mock,
|
||||
"jboss7.update_datasource": update_mock,
|
||||
},
|
||||
):
|
||||
result = jboss7.datasource_exists(
|
||||
name="appDS",
|
||||
jboss_config={},
|
||||
datasource_properties={"connection-url": "jdbc:/new-connection-url"},
|
||||
profile=None,
|
||||
)
|
||||
|
||||
update_mock.assert_called_with(
|
||||
name="appDS",
|
||||
jboss_config={},
|
||||
new_properties={"connection-url": "jdbc:/new-connection-url"},
|
||||
profile=None,
|
||||
)
|
||||
assert read_mock.called
|
||||
assert result["comment"] == "Datasource updated."
|
||||
|
||||
|
||||
def test_should_recreate_the_datasource_if_specified():
|
||||
read_mock = MagicMock(
|
||||
return_value={
|
||||
"success": True,
|
||||
"result": {"connection-url": "jdbc:/same-connection-url"},
|
||||
}
|
||||
)
|
||||
create_mock = MagicMock(return_value={"success": True})
|
||||
remove_mock = MagicMock(return_value={"success": True})
|
||||
update_mock = MagicMock()
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_datasource": read_mock,
|
||||
"jboss7.create_datasource": create_mock,
|
||||
"jboss7.remove_datasource": remove_mock,
|
||||
"jboss7.update_datasource": update_mock,
|
||||
},
|
||||
):
|
||||
result = jboss7.datasource_exists(
|
||||
name="appDS",
|
||||
jboss_config={},
|
||||
datasource_properties={"connection-url": "jdbc:/same-connection-url"},
|
||||
recreate=True,
|
||||
)
|
||||
|
||||
remove_mock.assert_called_with(name="appDS", jboss_config={}, profile=None)
|
||||
create_mock.assert_called_with(
|
||||
name="appDS",
|
||||
jboss_config={},
|
||||
datasource_properties={"connection-url": "jdbc:/same-connection-url"},
|
||||
profile=None,
|
||||
)
|
||||
assert result["changes"]["removed"] == "appDS"
|
||||
assert result["changes"]["created"] == "appDS"
|
||||
|
||||
|
||||
def test_should_inform_if_the_datasource_has_not_changed():
|
||||
read_mock = MagicMock(
|
||||
return_value={
|
||||
"success": True,
|
||||
"result": {"connection-url": "jdbc:/same-connection-url"},
|
||||
}
|
||||
)
|
||||
create_mock = MagicMock()
|
||||
remove_mock = MagicMock()
|
||||
update_mock = MagicMock(return_value={"success": True})
|
||||
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_datasource": read_mock,
|
||||
"jboss7.create_datasource": create_mock,
|
||||
"jboss7.remove_datasource": remove_mock,
|
||||
"jboss7.update_datasource": update_mock,
|
||||
},
|
||||
):
|
||||
result = jboss7.datasource_exists(
|
||||
name="appDS",
|
||||
jboss_config={},
|
||||
datasource_properties={"connection-url": "jdbc:/old-connection-url"},
|
||||
)
|
||||
|
||||
update_mock.assert_called_with(
|
||||
name="appDS",
|
||||
jboss_config={},
|
||||
new_properties={"connection-url": "jdbc:/old-connection-url"},
|
||||
profile=None,
|
||||
)
|
||||
assert not create_mock.called
|
||||
assert result["comment"] == "Datasource not changed."
|
||||
|
||||
|
||||
def test_should_create_binding_if_not_exists():
|
||||
# given
|
||||
binding_status = {"created": False}
|
||||
|
||||
def read_func(jboss_config, binding_name, profile):
|
||||
if binding_status["created"]:
|
||||
return {"success": True, "result": {"value": "DEV"}}
|
||||
else:
|
||||
return {"success": False, "err_code": "JBAS014807"}
|
||||
|
||||
def create_func(jboss_config, binding_name, value, profile):
|
||||
binding_status["created"] = True
|
||||
return {"success": True}
|
||||
|
||||
read_mock = MagicMock(side_effect=read_func)
|
||||
create_mock = MagicMock(side_effect=create_func)
|
||||
update_mock = MagicMock()
|
||||
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_simple_binding": read_mock,
|
||||
"jboss7.create_simple_binding": create_mock,
|
||||
"jboss7.update_simple_binding": update_mock,
|
||||
},
|
||||
):
|
||||
|
||||
# when
|
||||
result = jboss7.bindings_exist(
|
||||
name="bindings", jboss_config={}, bindings={"env": "DEV"}, profile=None
|
||||
)
|
||||
|
||||
# then
|
||||
create_mock.assert_called_with(
|
||||
jboss_config={}, binding_name="env", value="DEV", profile=None
|
||||
)
|
||||
assert update_mock.call_count == 0
|
||||
assert result["changes"] == {"added": "env:DEV\n"}
|
||||
assert result["comment"] == "Bindings changed."
|
||||
|
||||
|
||||
def test_should_update_bindings_if_exists_and_different():
|
||||
# given
|
||||
binding_status = {"updated": False}
|
||||
|
||||
def read_func(jboss_config, binding_name, profile):
|
||||
if binding_status["updated"]:
|
||||
return {"success": True, "result": {"value": "DEV2"}}
|
||||
else:
|
||||
return {"success": True, "result": {"value": "DEV"}}
|
||||
|
||||
def update_func(jboss_config, binding_name, value, profile):
|
||||
binding_status["updated"] = True
|
||||
return {"success": True}
|
||||
|
||||
read_mock = MagicMock(side_effect=read_func)
|
||||
create_mock = MagicMock()
|
||||
update_mock = MagicMock(side_effect=update_func)
|
||||
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_simple_binding": read_mock,
|
||||
"jboss7.create_simple_binding": create_mock,
|
||||
"jboss7.update_simple_binding": update_mock,
|
||||
},
|
||||
):
|
||||
# when
|
||||
result = jboss7.bindings_exist(
|
||||
name="bindings", jboss_config={}, bindings={"env": "DEV2"}, profile=None
|
||||
)
|
||||
|
||||
# then
|
||||
update_mock.assert_called_with(
|
||||
jboss_config={}, binding_name="env", value="DEV2", profile=None
|
||||
)
|
||||
assert create_mock.call_count == 0
|
||||
assert result["changes"] == {"changed": "env:DEV->DEV2\n"}
|
||||
assert result["comment"] == "Bindings changed."
|
||||
|
||||
|
||||
def test_should_not_update_bindings_if_same():
|
||||
# given
|
||||
read_mock = MagicMock(return_value={"success": True, "result": {"value": "DEV2"}})
|
||||
create_mock = MagicMock()
|
||||
update_mock = MagicMock()
|
||||
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_simple_binding": read_mock,
|
||||
"jboss7.create_simple_binding": create_mock,
|
||||
"jboss7.update_simple_binding": update_mock,
|
||||
},
|
||||
):
|
||||
# when
|
||||
result = jboss7.bindings_exist(
|
||||
name="bindings", jboss_config={}, bindings={"env": "DEV2"}
|
||||
)
|
||||
|
||||
# then
|
||||
assert create_mock.call_count == 0
|
||||
assert update_mock.call_count == 0
|
||||
assert result["changes"] == {}
|
||||
assert result["comment"] == "Bindings not changed."
|
||||
|
||||
|
||||
def test_should_raise_exception_if_cannot_create_binding():
|
||||
def read_func(jboss_config, binding_name, profile):
|
||||
return {"success": False, "err_code": "JBAS014807"}
|
||||
|
||||
def create_func(jboss_config, binding_name, value, profile):
|
||||
return {"success": False, "failure-description": "Incorrect binding name."}
|
||||
|
||||
read_mock = MagicMock(side_effect=read_func)
|
||||
create_mock = MagicMock(side_effect=create_func)
|
||||
update_mock = MagicMock()
|
||||
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_simple_binding": read_mock,
|
||||
"jboss7.create_simple_binding": create_mock,
|
||||
"jboss7.update_simple_binding": update_mock,
|
||||
},
|
||||
):
|
||||
# when
|
||||
with pytest.raises(CommandExecutionError) as exc:
|
||||
jboss7.bindings_exist(
|
||||
name="bindings",
|
||||
jboss_config={},
|
||||
bindings={"env": "DEV2"},
|
||||
profile=None,
|
||||
)
|
||||
assert str(exc.value) == "Incorrect binding name."
|
||||
|
||||
|
||||
def test_should_raise_exception_if_cannot_update_binding():
|
||||
def read_func(jboss_config, binding_name, profile):
|
||||
return {"success": True, "result": {"value": "DEV"}}
|
||||
|
||||
def update_func(jboss_config, binding_name, value, profile):
|
||||
return {"success": False, "failure-description": "Incorrect binding name."}
|
||||
|
||||
read_mock = MagicMock(side_effect=read_func)
|
||||
create_mock = MagicMock()
|
||||
update_mock = MagicMock(side_effect=update_func)
|
||||
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_simple_binding": read_mock,
|
||||
"jboss7.create_simple_binding": create_mock,
|
||||
"jboss7.update_simple_binding": update_mock,
|
||||
},
|
||||
):
|
||||
|
||||
# when
|
||||
with pytest.raises(CommandExecutionError) as exc:
|
||||
jboss7.bindings_exist(
|
||||
name="bindings",
|
||||
jboss_config={},
|
||||
bindings={"env": "DEV2"},
|
||||
profile=None,
|
||||
)
|
||||
assert str(exc.value) == "Incorrect binding name."
|
||||
|
||||
|
||||
def test_datasource_exist_create_datasource_good_code():
|
||||
jboss_config = {
|
||||
"cli_path": "/home/ch44d/Desktop/wildfly-18.0.0.Final/bin/jboss-cli.sh",
|
||||
"controller": "127.0.0.1: 9990",
|
||||
"cli_user": "user",
|
||||
"cli_password": "user",
|
||||
}
|
||||
|
||||
datasource_properties = {
|
||||
"driver - name": "h2",
|
||||
"connection - url": "jdbc:sqlserver://127.0.0.1:1433;DatabaseName=test_s2",
|
||||
"jndi - name": (
|
||||
"java:/home/ch44d/Desktop/sqljdbc_7.4/enu/mssql-jdbc-7.4.1.jre8.jar"
|
||||
),
|
||||
"user - name": "user",
|
||||
"password": "user",
|
||||
"use - java - context": True,
|
||||
}
|
||||
|
||||
read_datasource = MagicMock(
|
||||
return_value={"success": False, "err_code": "WFLYCTL0216"}
|
||||
)
|
||||
|
||||
error_msg = "Error: -1"
|
||||
create_datasource = MagicMock(return_value={"success": False, "stdout": error_msg})
|
||||
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_datasource": read_datasource,
|
||||
"jboss7.create_datasource": create_datasource,
|
||||
},
|
||||
):
|
||||
ret = jboss7.datasource_exists("SQL", jboss_config, datasource_properties)
|
||||
|
||||
assert "result" in ret
|
||||
assert not ret["result"]
|
||||
assert "comment" in ret
|
||||
assert error_msg in ret["comment"]
|
||||
|
||||
read_datasource.assert_called_once()
|
||||
create_datasource.assert_called_once()
|
||||
|
||||
|
||||
def test_datasource_exist_create_datasource_bad_code():
|
||||
jboss_config = {
|
||||
"cli_path": "/home/ch44d/Desktop/wildfly-18.0.0.Final/bin/jboss-cli.sh",
|
||||
"controller": "127.0.0.1: 9990",
|
||||
"cli_user": "user",
|
||||
"cli_password": "user",
|
||||
}
|
||||
|
||||
datasource_properties = {
|
||||
"driver - name": "h2",
|
||||
"connection - url": "jdbc:sqlserver://127.0.0.1:1433;DatabaseName=test_s2",
|
||||
"jndi - name": (
|
||||
"java:/home/ch44d/Desktop/sqljdbc_7.4/enu/mssql-jdbc-7.4.1.jre8.jar"
|
||||
),
|
||||
"user - name": "user",
|
||||
"password": "user",
|
||||
"use - java - context": True,
|
||||
}
|
||||
|
||||
read_datasource = MagicMock(
|
||||
return_value={
|
||||
"success": False,
|
||||
"err_code": "WFLYCTL0217",
|
||||
"failure-description": "Something happened",
|
||||
}
|
||||
)
|
||||
|
||||
with patch.dict(jboss7.__salt__, {"jboss7.read_datasource": read_datasource}):
|
||||
pytest.raises(
|
||||
CommandExecutionError,
|
||||
jboss7.datasource_exists,
|
||||
"SQL",
|
||||
jboss_config,
|
||||
datasource_properties,
|
||||
)
|
||||
read_datasource.assert_called_once()
|
811
tests/pytests/unit/states/test_kubernetes.py
Normal file
811
tests/pytests/unit/states/test_kubernetes.py
Normal file
|
@ -0,0 +1,811 @@
|
|||
"""
|
||||
:codeauthor: :email:`Jeff Schroeder <jeffschroeder@computer.org>`
|
||||
|
||||
Test cases for salt.states.kubernetes
|
||||
"""
|
||||
|
||||
import base64
|
||||
from contextlib import contextmanager
|
||||
|
||||
import pytest
|
||||
|
||||
import salt.modules.kubernetesmod as kubernetesmod
|
||||
import salt.states.kubernetes as kubernetes
|
||||
import salt.utils.stringutils
|
||||
from tests.support.mock import MagicMock, patch
|
||||
|
||||
pytestmark = [
|
||||
pytest.mark.skipif(
|
||||
kubernetesmod.HAS_LIBS is False,
|
||||
reason="Kubernetes client lib is not installed.",
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def configure_loader_modules():
|
||||
return {kubernetes: {"__env__": "base"}}
|
||||
|
||||
|
||||
@contextmanager
|
||||
def mock_func(func_name, return_value, test=False):
|
||||
"""
|
||||
Mock any of the kubernetes state function return values and set
|
||||
the test options.
|
||||
"""
|
||||
name = f"kubernetes.{func_name}"
|
||||
mocked = {name: MagicMock(return_value=return_value)}
|
||||
with patch.dict(kubernetes.__salt__, mocked) as patched:
|
||||
with patch.dict(kubernetes.__opts__, {"test": test}):
|
||||
yield patched
|
||||
|
||||
|
||||
def make_configmap(name, namespace="default", data=None):
|
||||
return make_ret_dict(
|
||||
kind="ConfigMap",
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
data=data,
|
||||
)
|
||||
|
||||
|
||||
def make_secret(name, namespace="default", data=None):
|
||||
secret_data = make_ret_dict(
|
||||
kind="Secret",
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
data=data,
|
||||
)
|
||||
# Base64 all of the values just like kubectl does
|
||||
for key, value in secret_data["data"].items():
|
||||
secret_data["data"][key] = base64.b64encode(
|
||||
salt.utils.stringutils.to_bytes(value)
|
||||
)
|
||||
|
||||
return secret_data
|
||||
|
||||
|
||||
def make_node_labels(name="minikube"):
|
||||
return {
|
||||
"kubernetes.io/hostname": name,
|
||||
"beta.kubernetes.io/os": "linux",
|
||||
"beta.kubernetes.io/arch": "amd64",
|
||||
"failure-domain.beta.kubernetes.io/region": "us-west-1",
|
||||
}
|
||||
|
||||
|
||||
def make_node(name="minikube"):
|
||||
node_data = make_ret_dict(kind="Node", name="minikube")
|
||||
node_data.update(
|
||||
{
|
||||
"api_version": "v1",
|
||||
"kind": "Node",
|
||||
"metadata": {
|
||||
"annotations": {"node.alpha.kubernetes.io/ttl": "0"},
|
||||
"labels": make_node_labels(name=name),
|
||||
"name": name,
|
||||
"namespace": None,
|
||||
"link": f"/api/v1/nodes/{name}",
|
||||
"uid": "7811b8ae-c1a1-11e7-a55a-0800279fb61e",
|
||||
},
|
||||
"spec": {"external_id": name},
|
||||
"status": {},
|
||||
}
|
||||
)
|
||||
return node_data
|
||||
|
||||
|
||||
def make_namespace(name="default"):
|
||||
namespace_data = make_ret_dict(kind="Namespace", name=name)
|
||||
del namespace_data["data"]
|
||||
namespace_data.update(
|
||||
{
|
||||
"status": {"phase": "Active"},
|
||||
"spec": {"finalizers": ["kubernetes"]},
|
||||
"metadata": {
|
||||
"name": name,
|
||||
"namespace": None,
|
||||
"labels": None,
|
||||
"link": "/api/v1/namespaces/{namespace}".format(
|
||||
namespace=name,
|
||||
),
|
||||
"annotations": None,
|
||||
"uid": "752fceeb-c1a1-11e7-a55a-0800279fb61e",
|
||||
},
|
||||
}
|
||||
)
|
||||
return namespace_data
|
||||
|
||||
|
||||
def make_ret_dict(kind, name, namespace=None, data=None):
|
||||
"""
|
||||
Make a minimal example configmap or secret for using in mocks
|
||||
"""
|
||||
|
||||
assert kind in ("Secret", "ConfigMap", "Namespace", "Node")
|
||||
|
||||
if data is None:
|
||||
data = {}
|
||||
|
||||
link = "/api/v1/namespaces/{namespace}/{kind}s/{name}".format(
|
||||
namespace=namespace,
|
||||
kind=kind.lower(),
|
||||
name=name,
|
||||
)
|
||||
|
||||
return_data = {
|
||||
"kind": kind,
|
||||
"data": data,
|
||||
"api_version": "v1",
|
||||
"metadata": {
|
||||
"name": name,
|
||||
"labels": None,
|
||||
"namespace": namespace,
|
||||
"link": link,
|
||||
"annotations": {"kubernetes.io/change-cause": "salt-call state.apply"},
|
||||
},
|
||||
}
|
||||
return return_data
|
||||
|
||||
|
||||
def test_configmap_present__fail():
|
||||
error = kubernetes.configmap_present(
|
||||
name="testme",
|
||||
data={1: 1},
|
||||
source="salt://beyond/oblivion.jinja",
|
||||
)
|
||||
assert error == {
|
||||
"changes": {},
|
||||
"result": False,
|
||||
"name": "testme",
|
||||
"comment": "'source' cannot be used in combination with 'data'",
|
||||
}
|
||||
|
||||
|
||||
def test_configmap_present__create_test_true():
|
||||
# Create a new configmap with test=True
|
||||
with mock_func("show_configmap", return_value=None, test=True):
|
||||
ret = kubernetes.configmap_present(
|
||||
name="example",
|
||||
data={"example.conf": "# empty config file"},
|
||||
)
|
||||
assert ret == {
|
||||
"comment": "The configmap is going to be created",
|
||||
"changes": {},
|
||||
"name": "example",
|
||||
"result": None,
|
||||
}
|
||||
|
||||
|
||||
def test_configmap_present__create():
|
||||
# Create a new configmap
|
||||
with mock_func("show_configmap", return_value=None):
|
||||
cm = make_configmap(
|
||||
name="test",
|
||||
namespace="default",
|
||||
data={"foo": "bar"},
|
||||
)
|
||||
with mock_func("create_configmap", return_value=cm):
|
||||
actual = kubernetes.configmap_present(
|
||||
name="test",
|
||||
data={"foo": "bar"},
|
||||
)
|
||||
assert actual == {
|
||||
"comment": "",
|
||||
"changes": {"data": {"foo": "bar"}},
|
||||
"name": "test",
|
||||
"result": True,
|
||||
}
|
||||
|
||||
|
||||
def test_configmap_present__create_no_data():
|
||||
# Create a new configmap with no 'data' attribute
|
||||
with mock_func("show_configmap", return_value=None):
|
||||
cm = make_configmap(
|
||||
name="test",
|
||||
namespace="default",
|
||||
)
|
||||
with mock_func("create_configmap", return_value=cm):
|
||||
actual = kubernetes.configmap_present(name="test")
|
||||
assert actual == {
|
||||
"comment": "",
|
||||
"changes": {"data": {}},
|
||||
"name": "test",
|
||||
"result": True,
|
||||
}
|
||||
|
||||
|
||||
def test_configmap_present__replace_test_true():
|
||||
cm = make_configmap(
|
||||
name="settings",
|
||||
namespace="saltstack",
|
||||
data={"foobar.conf": "# Example configuration"},
|
||||
)
|
||||
with mock_func("show_configmap", return_value=cm, test=True):
|
||||
ret = kubernetes.configmap_present(
|
||||
name="settings",
|
||||
namespace="saltstack",
|
||||
data={"foobar.conf": "# Example configuration"},
|
||||
)
|
||||
assert ret == {
|
||||
"comment": "The configmap is going to be replaced",
|
||||
"changes": {},
|
||||
"name": "settings",
|
||||
"result": None,
|
||||
}
|
||||
|
||||
|
||||
def test_configmap_present__replace():
|
||||
cm = make_configmap(name="settings", data={"action": "make=war"})
|
||||
# Replace an existing configmap
|
||||
with mock_func("show_configmap", return_value=cm):
|
||||
new_cm = cm.copy()
|
||||
new_cm.update({"data": {"action": "make=peace"}})
|
||||
with mock_func("replace_configmap", return_value=new_cm):
|
||||
actual = kubernetes.configmap_present(
|
||||
name="settings",
|
||||
data={"action": "make=peace"},
|
||||
)
|
||||
assert actual == {
|
||||
"comment": ("The configmap is already present. Forcing recreation"),
|
||||
"changes": {"data": {"action": "make=peace"}},
|
||||
"name": "settings",
|
||||
"result": True,
|
||||
}
|
||||
|
||||
|
||||
def test_configmap_absent__noop_test_true():
|
||||
# Nothing to delete with test=True
|
||||
with mock_func("show_configmap", return_value=None, test=True):
|
||||
actual = kubernetes.configmap_absent(name="NOT_FOUND")
|
||||
assert actual == {
|
||||
"comment": "The configmap does not exist",
|
||||
"changes": {},
|
||||
"name": "NOT_FOUND",
|
||||
"result": None,
|
||||
}
|
||||
|
||||
|
||||
def test_configmap_absent__test_true():
|
||||
# Configmap exists with test=True
|
||||
cm = make_configmap(name="deleteme", namespace="default")
|
||||
with mock_func("show_configmap", return_value=cm, test=True):
|
||||
actual = kubernetes.configmap_absent(name="deleteme")
|
||||
assert actual == {
|
||||
"comment": "The configmap is going to be deleted",
|
||||
"changes": {},
|
||||
"name": "deleteme",
|
||||
"result": None,
|
||||
}
|
||||
|
||||
|
||||
def test_configmap_absent__noop():
|
||||
# Nothing to delete
|
||||
with mock_func("show_configmap", return_value=None):
|
||||
actual = kubernetes.configmap_absent(name="NOT_FOUND")
|
||||
assert actual == {
|
||||
"comment": "The configmap does not exist",
|
||||
"changes": {},
|
||||
"name": "NOT_FOUND",
|
||||
"result": True,
|
||||
}
|
||||
|
||||
|
||||
def test_configmap_absent():
|
||||
# Configmap exists, delete it!
|
||||
cm = make_configmap(name="deleteme", namespace="default")
|
||||
with mock_func("show_configmap", return_value=cm):
|
||||
# The return from this module isn't used in the state
|
||||
with mock_func("delete_configmap", return_value={}):
|
||||
actual = kubernetes.configmap_absent(name="deleteme")
|
||||
assert actual == {
|
||||
"comment": "ConfigMap deleted",
|
||||
"changes": {
|
||||
"kubernetes.configmap": {
|
||||
"new": "absent",
|
||||
"old": "present",
|
||||
},
|
||||
},
|
||||
"name": "deleteme",
|
||||
"result": True,
|
||||
}
|
||||
|
||||
|
||||
def test_secret_present__fail():
|
||||
actual = kubernetes.secret_present(
|
||||
name="sekret",
|
||||
data={"password": "monk3y"},
|
||||
source="salt://nope.jinja",
|
||||
)
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": False,
|
||||
"name": "sekret",
|
||||
"comment": "'source' cannot be used in combination with 'data'",
|
||||
}
|
||||
|
||||
|
||||
def test_secret_present__exists_test_true():
|
||||
secret = make_secret(name="sekret")
|
||||
new_secret = secret.copy()
|
||||
new_secret.update({"data": {"password": "uncle"}})
|
||||
# Secret exists already and needs replacing with test=True
|
||||
with mock_func("show_secret", return_value=secret):
|
||||
with mock_func("replace_secret", return_value=new_secret, test=True):
|
||||
actual = kubernetes.secret_present(
|
||||
name="sekret",
|
||||
data={"password": "uncle"},
|
||||
)
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "sekret",
|
||||
"comment": "The secret is going to be replaced",
|
||||
}
|
||||
|
||||
|
||||
def test_secret_present__exists():
|
||||
# Secret exists and gets replaced
|
||||
secret = make_secret(name="sekret", data={"password": "booyah"})
|
||||
with mock_func("show_secret", return_value=secret):
|
||||
with mock_func("replace_secret", return_value=secret):
|
||||
actual = kubernetes.secret_present(
|
||||
name="sekret",
|
||||
data={"password": "booyah"},
|
||||
)
|
||||
assert actual == {
|
||||
"changes": {"data": ["password"]},
|
||||
"result": True,
|
||||
"name": "sekret",
|
||||
"comment": "The secret is already present. Forcing recreation",
|
||||
}
|
||||
|
||||
|
||||
def test_secret_present__create():
|
||||
# Secret exists and gets replaced
|
||||
secret = make_secret(name="sekret", data={"password": "booyah"})
|
||||
with mock_func("show_secret", return_value=None):
|
||||
with mock_func("create_secret", return_value=secret):
|
||||
actual = kubernetes.secret_present(
|
||||
name="sekret",
|
||||
data={"password": "booyah"},
|
||||
)
|
||||
assert actual == {
|
||||
"changes": {"data": ["password"]},
|
||||
"result": True,
|
||||
"name": "sekret",
|
||||
"comment": "",
|
||||
}
|
||||
|
||||
|
||||
def test_secret_present__create_no_data():
|
||||
# Secret exists and gets replaced
|
||||
secret = make_secret(name="sekret")
|
||||
with mock_func("show_secret", return_value=None):
|
||||
with mock_func("create_secret", return_value=secret):
|
||||
actual = kubernetes.secret_present(name="sekret")
|
||||
assert actual == {
|
||||
"changes": {"data": []},
|
||||
"result": True,
|
||||
"name": "sekret",
|
||||
"comment": "",
|
||||
}
|
||||
|
||||
|
||||
def test_secret_present__create_test_true():
|
||||
# Secret exists and gets replaced with test=True
|
||||
secret = make_secret(name="sekret")
|
||||
with mock_func("show_secret", return_value=None):
|
||||
with mock_func("create_secret", return_value=secret, test=True):
|
||||
actual = kubernetes.secret_present(name="sekret")
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "sekret",
|
||||
"comment": "The secret is going to be created",
|
||||
}
|
||||
|
||||
|
||||
def test_secret_absent__noop_test_true():
|
||||
with mock_func("show_secret", return_value=None, test=True):
|
||||
actual = kubernetes.secret_absent(name="sekret")
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "sekret",
|
||||
"comment": "The secret does not exist",
|
||||
}
|
||||
|
||||
|
||||
def test_secret_absent__noop():
|
||||
with mock_func("show_secret", return_value=None):
|
||||
actual = kubernetes.secret_absent(name="passwords")
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": True,
|
||||
"name": "passwords",
|
||||
"comment": "The secret does not exist",
|
||||
}
|
||||
|
||||
|
||||
def test_secret_absent__delete_test_true():
|
||||
secret = make_secret(name="credentials", data={"redis": "letmein"})
|
||||
with mock_func("show_secret", return_value=secret):
|
||||
with mock_func("delete_secret", return_value=secret, test=True):
|
||||
actual = kubernetes.secret_absent(name="credentials")
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "credentials",
|
||||
"comment": "The secret is going to be deleted",
|
||||
}
|
||||
|
||||
|
||||
def test_secret_absent__delete():
|
||||
secret = make_secret(name="foobar", data={"redis": "letmein"})
|
||||
deleted = {
|
||||
"status": None,
|
||||
"kind": "Secret",
|
||||
"code": None,
|
||||
"reason": None,
|
||||
"details": None,
|
||||
"message": None,
|
||||
"api_version": "v1",
|
||||
"metadata": {
|
||||
"link": "/api/v1/namespaces/default/secrets/foobar",
|
||||
"resource_version": "30292",
|
||||
},
|
||||
}
|
||||
with mock_func("show_secret", return_value=secret):
|
||||
with mock_func("delete_secret", return_value=deleted):
|
||||
actual = kubernetes.secret_absent(name="foobar")
|
||||
assert actual == {
|
||||
"changes": {
|
||||
"kubernetes.secret": {"new": "absent", "old": "present"},
|
||||
},
|
||||
"result": True,
|
||||
"name": "foobar",
|
||||
"comment": "Secret deleted",
|
||||
}
|
||||
|
||||
|
||||
def test_node_label_present__add_test_true():
|
||||
labels = make_node_labels()
|
||||
with mock_func("node_labels", return_value=labels, test=True):
|
||||
actual = kubernetes.node_label_present(
|
||||
name="com.zoo-animal",
|
||||
node="minikube",
|
||||
value="monkey",
|
||||
)
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "com.zoo-animal",
|
||||
"comment": "The label is going to be set",
|
||||
}
|
||||
|
||||
|
||||
def test_node_label_present__add():
|
||||
node_data = make_node()
|
||||
# Remove some of the defaults to make it simpler
|
||||
node_data["metadata"]["labels"] = {
|
||||
"beta.kubernetes.io/os": "linux",
|
||||
}
|
||||
labels = node_data["metadata"]["labels"]
|
||||
|
||||
with mock_func("node_labels", return_value=labels):
|
||||
with mock_func("node_add_label", return_value=node_data):
|
||||
actual = kubernetes.node_label_present(
|
||||
name="failure-domain.beta.kubernetes.io/zone",
|
||||
node="minikube",
|
||||
value="us-central1-a",
|
||||
)
|
||||
assert actual == {
|
||||
"comment": "",
|
||||
"changes": {
|
||||
"minikube.failure-domain.beta.kubernetes.io/zone": {
|
||||
"new": {
|
||||
"failure-domain.beta.kubernetes.io/zone": ("us-central1-a"),
|
||||
"beta.kubernetes.io/os": "linux",
|
||||
},
|
||||
"old": {"beta.kubernetes.io/os": "linux"},
|
||||
},
|
||||
},
|
||||
"name": "failure-domain.beta.kubernetes.io/zone",
|
||||
"result": True,
|
||||
}
|
||||
|
||||
|
||||
def test_node_label_present__already_set():
|
||||
node_data = make_node()
|
||||
labels = node_data["metadata"]["labels"]
|
||||
with mock_func("node_labels", return_value=labels):
|
||||
with mock_func("node_add_label", return_value=node_data):
|
||||
actual = kubernetes.node_label_present(
|
||||
name="failure-domain.beta.kubernetes.io/region",
|
||||
node="minikube",
|
||||
value="us-west-1",
|
||||
)
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": True,
|
||||
"name": "failure-domain.beta.kubernetes.io/region",
|
||||
"comment": ("The label is already set and has the specified value"),
|
||||
}
|
||||
|
||||
|
||||
def test_node_label_present__update_test_true():
|
||||
node_data = make_node()
|
||||
labels = node_data["metadata"]["labels"]
|
||||
with mock_func("node_labels", return_value=labels):
|
||||
with mock_func("node_add_label", return_value=node_data, test=True):
|
||||
actual = kubernetes.node_label_present(
|
||||
name="failure-domain.beta.kubernetes.io/region",
|
||||
node="minikube",
|
||||
value="us-east-1",
|
||||
)
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "failure-domain.beta.kubernetes.io/region",
|
||||
"comment": "The label is going to be updated",
|
||||
}
|
||||
|
||||
|
||||
def test_node_label_present__update():
|
||||
node_data = make_node()
|
||||
# Remove some of the defaults to make it simpler
|
||||
node_data["metadata"]["labels"] = {
|
||||
"failure-domain.beta.kubernetes.io/region": "us-west-1",
|
||||
}
|
||||
labels = node_data["metadata"]["labels"]
|
||||
with mock_func("node_labels", return_value=labels):
|
||||
with mock_func("node_add_label", return_value=node_data):
|
||||
actual = kubernetes.node_label_present(
|
||||
name="failure-domain.beta.kubernetes.io/region",
|
||||
node="minikube",
|
||||
value="us-east-1",
|
||||
)
|
||||
assert actual == {
|
||||
"changes": {
|
||||
"minikube.failure-domain.beta.kubernetes.io/region": {
|
||||
"new": {
|
||||
"failure-domain.beta.kubernetes.io/region": ("us-east-1")
|
||||
},
|
||||
"old": {
|
||||
"failure-domain.beta.kubernetes.io/region": ("us-west-1")
|
||||
},
|
||||
}
|
||||
},
|
||||
"result": True,
|
||||
"name": "failure-domain.beta.kubernetes.io/region",
|
||||
"comment": "The label is already set, changing the value",
|
||||
}
|
||||
|
||||
|
||||
def test_node_label_absent__noop_test_true():
|
||||
labels = make_node_labels()
|
||||
with mock_func("node_labels", return_value=labels, test=True):
|
||||
actual = kubernetes.node_label_absent(
|
||||
name="non-existent-label",
|
||||
node="minikube",
|
||||
)
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "non-existent-label",
|
||||
"comment": "The label does not exist",
|
||||
}
|
||||
|
||||
|
||||
def test_node_label_absent__noop():
|
||||
labels = make_node_labels()
|
||||
with mock_func("node_labels", return_value=labels):
|
||||
actual = kubernetes.node_label_absent(
|
||||
name="non-existent-label",
|
||||
node="minikube",
|
||||
)
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": True,
|
||||
"name": "non-existent-label",
|
||||
"comment": "The label does not exist",
|
||||
}
|
||||
|
||||
|
||||
def test_node_label_absent__delete_test_true():
|
||||
labels = make_node_labels()
|
||||
with mock_func("node_labels", return_value=labels, test=True):
|
||||
actual = kubernetes.node_label_absent(
|
||||
name="failure-domain.beta.kubernetes.io/region",
|
||||
node="minikube",
|
||||
)
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "failure-domain.beta.kubernetes.io/region",
|
||||
"comment": "The label is going to be deleted",
|
||||
}
|
||||
|
||||
|
||||
def test_node_label_absent__delete():
|
||||
node_data = make_node()
|
||||
labels = node_data["metadata"]["labels"].copy()
|
||||
|
||||
node_data["metadata"]["labels"].pop("failure-domain.beta.kubernetes.io/region")
|
||||
|
||||
with mock_func("node_labels", return_value=labels):
|
||||
with mock_func("node_remove_label", return_value=node_data):
|
||||
actual = kubernetes.node_label_absent(
|
||||
name="failure-domain.beta.kubernetes.io/region",
|
||||
node="minikube",
|
||||
)
|
||||
assert actual == {
|
||||
"result": True,
|
||||
"changes": {
|
||||
"kubernetes.node_label": {
|
||||
"new": "absent",
|
||||
"old": "present",
|
||||
}
|
||||
},
|
||||
"comment": "Label removed from node",
|
||||
"name": "failure-domain.beta.kubernetes.io/region",
|
||||
}
|
||||
|
||||
|
||||
def test_namespace_present__create_test_true():
|
||||
with mock_func("show_namespace", return_value=None, test=True):
|
||||
actual = kubernetes.namespace_present(name="saltstack")
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "saltstack",
|
||||
"comment": "The namespace is going to be created",
|
||||
}
|
||||
|
||||
|
||||
def test_namespace_present__create():
|
||||
namespace_data = make_namespace(name="saltstack")
|
||||
with mock_func("show_namespace", return_value=None):
|
||||
with mock_func("create_namespace", return_value=namespace_data):
|
||||
actual = kubernetes.namespace_present(name="saltstack")
|
||||
assert actual == {
|
||||
"changes": {"namespace": {"new": namespace_data, "old": {}}},
|
||||
"result": True,
|
||||
"name": "saltstack",
|
||||
"comment": "",
|
||||
}
|
||||
|
||||
|
||||
def test_namespace_present__noop_test_true():
|
||||
namespace_data = make_namespace(name="saltstack")
|
||||
with mock_func("show_namespace", return_value=namespace_data, test=True):
|
||||
actual = kubernetes.namespace_present(name="saltstack")
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "saltstack",
|
||||
"comment": "The namespace already exists",
|
||||
}
|
||||
|
||||
|
||||
def test_namespace_present__noop():
|
||||
namespace_data = make_namespace(name="saltstack")
|
||||
with mock_func("show_namespace", return_value=namespace_data):
|
||||
actual = kubernetes.namespace_present(name="saltstack")
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": True,
|
||||
"name": "saltstack",
|
||||
"comment": "The namespace already exists",
|
||||
}
|
||||
|
||||
|
||||
def test_namespace_absent__noop_test_true():
|
||||
with mock_func("show_namespace", return_value=None, test=True):
|
||||
actual = kubernetes.namespace_absent(name="salt")
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "salt",
|
||||
"comment": "The namespace does not exist",
|
||||
}
|
||||
|
||||
|
||||
def test_namespace_absent__noop():
|
||||
with mock_func("show_namespace", return_value=None):
|
||||
actual = kubernetes.namespace_absent(name="salt")
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": True,
|
||||
"name": "salt",
|
||||
"comment": "The namespace does not exist",
|
||||
}
|
||||
|
||||
|
||||
def test_namespace_absent__delete_test_true():
|
||||
namespace_data = make_namespace(name="salt")
|
||||
with mock_func("show_namespace", return_value=namespace_data, test=True):
|
||||
actual = kubernetes.namespace_absent(name="salt")
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "salt",
|
||||
"comment": "The namespace is going to be deleted",
|
||||
}
|
||||
|
||||
|
||||
def test_namespace_absent__delete_code_200():
|
||||
namespace_data = make_namespace(name="salt")
|
||||
deleted = namespace_data.copy()
|
||||
deleted["code"] = 200
|
||||
deleted.update({"code": 200, "message": None})
|
||||
with mock_func("show_namespace", return_value=namespace_data):
|
||||
with mock_func("delete_namespace", return_value=deleted):
|
||||
actual = kubernetes.namespace_absent(name="salt")
|
||||
assert actual == {
|
||||
"changes": {
|
||||
"kubernetes.namespace": {"new": "absent", "old": "present"}
|
||||
},
|
||||
"result": True,
|
||||
"name": "salt",
|
||||
"comment": "Terminating",
|
||||
}
|
||||
|
||||
|
||||
def test_namespace_absent__delete_status_terminating():
|
||||
namespace_data = make_namespace(name="salt")
|
||||
deleted = namespace_data.copy()
|
||||
deleted.update(
|
||||
{
|
||||
"code": None,
|
||||
"status": "Terminating namespace",
|
||||
"message": "Terminating this shizzzle yo",
|
||||
}
|
||||
)
|
||||
with mock_func("show_namespace", return_value=namespace_data):
|
||||
with mock_func("delete_namespace", return_value=deleted):
|
||||
actual = kubernetes.namespace_absent(name="salt")
|
||||
assert actual == {
|
||||
"changes": {
|
||||
"kubernetes.namespace": {"new": "absent", "old": "present"}
|
||||
},
|
||||
"result": True,
|
||||
"name": "salt",
|
||||
"comment": "Terminating this shizzzle yo",
|
||||
}
|
||||
|
||||
|
||||
def test_namespace_absent__delete_status_phase_terminating():
|
||||
# This is what kubernetes 1.8.0 looks like when deleting namespaces
|
||||
namespace_data = make_namespace(name="salt")
|
||||
deleted = namespace_data.copy()
|
||||
deleted.update({"code": None, "message": None, "status": {"phase": "Terminating"}})
|
||||
with mock_func("show_namespace", return_value=namespace_data):
|
||||
with mock_func("delete_namespace", return_value=deleted):
|
||||
actual = kubernetes.namespace_absent(name="salt")
|
||||
assert actual == {
|
||||
"changes": {
|
||||
"kubernetes.namespace": {"new": "absent", "old": "present"}
|
||||
},
|
||||
"result": True,
|
||||
"name": "salt",
|
||||
"comment": "Terminating",
|
||||
}
|
||||
|
||||
|
||||
def test_namespace_absent__delete_error():
|
||||
namespace_data = make_namespace(name="salt")
|
||||
deleted = namespace_data.copy()
|
||||
deleted.update({"code": 418, "message": "I' a teapot!", "status": None})
|
||||
with mock_func("show_namespace", return_value=namespace_data):
|
||||
with mock_func("delete_namespace", return_value=deleted):
|
||||
actual = kubernetes.namespace_absent(name="salt")
|
||||
assert actual == {
|
||||
"changes": {},
|
||||
"result": False,
|
||||
"name": "salt",
|
||||
"comment": "Something went wrong, response: {}".format(
|
||||
deleted,
|
||||
),
|
||||
}
|
|
@ -7,6 +7,8 @@ import pytest
|
|||
import salt.states.selinux as selinux
|
||||
from tests.support.mock import MagicMock, patch
|
||||
|
||||
pytestmark = [pytest.mark.skip_unless_on_linux]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def configure_loader_modules():
|
||||
|
@ -77,7 +79,7 @@ def test_boolean():
|
|||
|
||||
mock_en = MagicMock(return_value=[])
|
||||
with patch.dict(selinux.__salt__, {"selinux.list_sebool": mock_en}):
|
||||
comt = "Boolean {} is not available".format(name)
|
||||
comt = f"Boolean {name} is not available"
|
||||
ret.update({"comment": comt})
|
||||
assert selinux.boolean(name, value) == ret
|
||||
|
||||
|
|
|
@ -123,8 +123,8 @@ def test_present_invalid_gid_change():
|
|||
)
|
||||
dunder_salt = {
|
||||
"user.info": mock_info,
|
||||
"file.group_to_gid": MagicMock(side_effect=["foo"]),
|
||||
"file.gid_to_group": MagicMock(side_effect=[5000, 5000]),
|
||||
"file.group_to_gid": MagicMock(return_value="foo"),
|
||||
"file.gid_to_group": MagicMock(return_value=5000),
|
||||
}
|
||||
with patch.dict(user.__grains__, {"kernel": "Linux"}), patch.dict(
|
||||
user.__salt__, dunder_salt
|
||||
|
@ -148,8 +148,8 @@ def test_present_invalid_uid_gid_change():
|
|||
)
|
||||
dunder_salt = {
|
||||
"user.info": mock_info,
|
||||
"file.group_to_gid": MagicMock(side_effect=["foo"]),
|
||||
"file.gid_to_group": MagicMock(side_effect=[5000, 5000]),
|
||||
"file.group_to_gid": MagicMock(return_value="foo"),
|
||||
"file.gid_to_group": MagicMock(return_value=5000),
|
||||
}
|
||||
with patch.dict(user.__grains__, {"kernel": "Linux"}), patch.dict(
|
||||
user.__salt__, dunder_salt
|
||||
|
@ -179,7 +179,7 @@ def test_present_uid_gid_change():
|
|||
# get the before/after for the changes dict, and one last time to
|
||||
# confirm that no changes still need to be made.
|
||||
mock_info = MagicMock(side_effect=[before, before, after, after])
|
||||
mock_group_to_gid = MagicMock(side_effect=[5000, 5001])
|
||||
mock_group_to_gid = MagicMock(side_effect=[5000, 5000, 5001, 5001])
|
||||
mock_gid_to_group = MagicMock(
|
||||
side_effect=["othergroup", "foo", "othergroup", "othergroup"]
|
||||
)
|
||||
|
@ -254,12 +254,11 @@ def test_changes():
|
|||
"file.gid_to_group": MagicMock(side_effect=[5000, 5000]),
|
||||
}
|
||||
|
||||
def mock_exists(*args):
|
||||
return True
|
||||
|
||||
with patch.dict(user.__grains__, {"kernel": "Linux"}), patch.dict(
|
||||
user.__salt__, dunder_salt
|
||||
), patch.dict(user.__opts__, {"test": False}), patch("os.path.isdir", mock_exists):
|
||||
), patch.dict(user.__opts__, {"test": False}), patch(
|
||||
"os.path.isdir", MagicMock(return_value=True)
|
||||
):
|
||||
ret = user._changes("foo", maxdays=999999, inactdays=0, warndays=7)
|
||||
assert ret == {
|
||||
"maxdays": 999999,
|
||||
|
@ -459,3 +458,43 @@ def test_present_password_unlock():
|
|||
else:
|
||||
unlock_password.assert_called_once()
|
||||
unlock_account.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"current,wanted,remove,return_value,expected",
|
||||
[
|
||||
(["grp1"], ["grp1"], False, MagicMock(return_value={"gid": 100}), False),
|
||||
(
|
||||
["grp1"],
|
||||
["grp1", "grp2"],
|
||||
False,
|
||||
MagicMock(side_effect=[{"gid": 100}, {"gid": 200}]),
|
||||
True,
|
||||
),
|
||||
(
|
||||
["grp1"],
|
||||
["grp1", "grp2"],
|
||||
False,
|
||||
MagicMock(side_effect=[{"gid": 100}, {"gid": 100}]),
|
||||
False,
|
||||
),
|
||||
(
|
||||
["grp1", "grp2"],
|
||||
["grp1"],
|
||||
True,
|
||||
MagicMock(side_effect=[{"gid": 100}, {"gid": 200}]),
|
||||
True,
|
||||
),
|
||||
(
|
||||
["grp1", "grp2"],
|
||||
["grp1"],
|
||||
True,
|
||||
MagicMock(side_effect=[{"gid": 100}, {"gid": 100}]),
|
||||
False,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test__group_changes(current, wanted, remove, return_value, expected):
|
||||
with patch.dict(user.__salt__, {"group.info": return_value}):
|
||||
ret = user._group_changes(current, wanted, remove)
|
||||
assert ret == expected
|
||||
|
|
|
@ -217,6 +217,38 @@ def test_machine_value_present(empty_reg_pol_mach):
|
|||
assert result == expected
|
||||
|
||||
|
||||
def test_machine_value_present_similar_names(empty_reg_pol_mach):
|
||||
"""
|
||||
Test value.present in Machine policy
|
||||
"""
|
||||
lgpo_reg.value_present(
|
||||
name="MyValueTwo",
|
||||
key="SOFTWARE\\MyKey1",
|
||||
v_data="1",
|
||||
v_type="REG_DWORD",
|
||||
)
|
||||
lgpo_reg.value_present(
|
||||
name="MyValue",
|
||||
key="SOFTWARE\\MyKey1",
|
||||
v_data="1",
|
||||
v_type="REG_DWORD",
|
||||
)
|
||||
expected = {
|
||||
"SOFTWARE\\MyKey1": {
|
||||
"MyValue": {
|
||||
"type": "REG_DWORD",
|
||||
"data": 1,
|
||||
},
|
||||
"MyValueTwo": {
|
||||
"type": "REG_DWORD",
|
||||
"data": 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
result = win_lgpo_reg.read_reg_pol(policy_class="Machine")
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_machine_value_present_enforce(reg_pol_mach):
|
||||
"""
|
||||
Issue #64222
|
||||
|
@ -614,6 +646,40 @@ def test_user_value_present(empty_reg_pol_user):
|
|||
assert result == expected
|
||||
|
||||
|
||||
def test_user_value_present_similar_names(empty_reg_pol_user):
|
||||
"""
|
||||
Test value.present in User policy
|
||||
"""
|
||||
lgpo_reg.value_present(
|
||||
name="MyValueTwo",
|
||||
key="SOFTWARE\\MyKey1",
|
||||
v_data="1",
|
||||
v_type="REG_DWORD",
|
||||
policy_class="User",
|
||||
)
|
||||
lgpo_reg.value_present(
|
||||
name="MyValue",
|
||||
key="SOFTWARE\\MyKey1",
|
||||
v_data="1",
|
||||
v_type="REG_DWORD",
|
||||
policy_class="User",
|
||||
)
|
||||
expected = {
|
||||
"SOFTWARE\\MyKey1": {
|
||||
"MyValue": {
|
||||
"type": "REG_DWORD",
|
||||
"data": 1,
|
||||
},
|
||||
"MyValueTwo": {
|
||||
"type": "REG_DWORD",
|
||||
"data": 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
result = win_lgpo_reg.read_reg_pol(policy_class="User")
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_user_value_present_existing_change(reg_pol_user):
|
||||
"""
|
||||
Test value.present with existing incorrect value in User policy
|
||||
|
|
|
@ -1,19 +1,122 @@
|
|||
import io
|
||||
|
||||
import pytest
|
||||
|
||||
import salt.state
|
||||
from salt import template
|
||||
from salt.config import minion_config
|
||||
from tests.support.mock import MagicMock, patch
|
||||
|
||||
|
||||
def test_compile_template_str_mkstemp_cleanup():
|
||||
@pytest.fixture
|
||||
def render_dict():
|
||||
return {
|
||||
"jinja": "fake_jinja_func",
|
||||
"json": "fake_json_func",
|
||||
"mako": "fake_make_func",
|
||||
}
|
||||
|
||||
|
||||
def test_compile_template_str_mkstemp_cleanup(minion_opts):
|
||||
minion_opts["file_client"] = "local"
|
||||
with patch("os.unlink", MagicMock()) as unlinked:
|
||||
_config = minion_config(None)
|
||||
_config["file_client"] = "local"
|
||||
_state = salt.state.State(_config)
|
||||
assert template.compile_template_str(
|
||||
_state = salt.state.State(minion_opts)
|
||||
ret = template.compile_template_str(
|
||||
"{'val':'test'}",
|
||||
_state.rend,
|
||||
_state.opts["renderer"],
|
||||
_state.opts["renderer_blacklist"],
|
||||
_state.opts["renderer_whitelist"],
|
||||
) == {"val": "test"}
|
||||
)
|
||||
assert ret == {"val": "test"}
|
||||
unlinked.assert_called_once()
|
||||
|
||||
|
||||
def test_compile_template_bad_type():
|
||||
"""
|
||||
Test to ensure that unsupported types cannot be passed to the template compiler
|
||||
"""
|
||||
ret = template.compile_template(["1", "2", "3"], None, None, None, None)
|
||||
assert ret == {}
|
||||
|
||||
|
||||
def test_compile_template_preserves_windows_newlines():
|
||||
"""
|
||||
Test to ensure that a file with Windows newlines, when rendered by a
|
||||
template renderer, does not eat the CR character.
|
||||
"""
|
||||
|
||||
def _get_rend(renderer, value):
|
||||
"""
|
||||
We need a new MagicMock each time since we're dealing with StringIO
|
||||
objects which are read like files.
|
||||
"""
|
||||
return {renderer: MagicMock(return_value=io.StringIO(value))}
|
||||
|
||||
input_data_windows = "foo\r\nbar\r\nbaz\r\n"
|
||||
input_data_non_windows = input_data_windows.replace("\r\n", "\n")
|
||||
renderer = "test"
|
||||
blacklist = whitelist = []
|
||||
|
||||
ret = template.compile_template(
|
||||
":string:",
|
||||
_get_rend(renderer, input_data_non_windows),
|
||||
renderer,
|
||||
blacklist,
|
||||
whitelist,
|
||||
input_data=input_data_windows,
|
||||
).read()
|
||||
# Even though the mocked renderer returned a string without the windows
|
||||
# newlines, the compiled template should still have them.
|
||||
assert ret == input_data_windows
|
||||
|
||||
# Now test that we aren't adding them in unnecessarily.
|
||||
ret = template.compile_template(
|
||||
":string:",
|
||||
_get_rend(renderer, input_data_non_windows),
|
||||
renderer,
|
||||
blacklist,
|
||||
whitelist,
|
||||
input_data=input_data_non_windows,
|
||||
).read()
|
||||
assert ret == input_data_non_windows
|
||||
|
||||
# Finally, ensure that we're not unnecessarily replacing the \n with
|
||||
# \r\n in the event that the renderer returned a string with the
|
||||
# windows newlines intact.
|
||||
ret = template.compile_template(
|
||||
":string:",
|
||||
_get_rend(renderer, input_data_windows),
|
||||
renderer,
|
||||
blacklist,
|
||||
whitelist,
|
||||
input_data=input_data_windows,
|
||||
).read()
|
||||
assert ret == input_data_windows
|
||||
|
||||
|
||||
def test_check_render_pipe_str(render_dict):
|
||||
"""
|
||||
Check that all renderers specified in the pipe string are available.
|
||||
"""
|
||||
ret = template.check_render_pipe_str("jinja|json", render_dict, None, None)
|
||||
assert ("fake_jinja_func", "") in ret
|
||||
assert ("fake_json_func", "") in ret
|
||||
assert ("OBVIOUSLY_NOT_HERE", "") not in ret
|
||||
|
||||
|
||||
def test_check_renderer_blacklisting(render_dict):
|
||||
"""
|
||||
Check that all renderers specified in the pipe string are available.
|
||||
"""
|
||||
ret = template.check_render_pipe_str("jinja|json", render_dict, ["jinja"], None)
|
||||
assert ret == [("fake_json_func", "")]
|
||||
ret = template.check_render_pipe_str("jinja|json", render_dict, None, ["jinja"])
|
||||
assert ret == [("fake_jinja_func", "")]
|
||||
ret = template.check_render_pipe_str(
|
||||
"jinja|json", render_dict, ["jinja"], ["jinja"]
|
||||
)
|
||||
assert ret == []
|
||||
ret = template.check_render_pipe_str(
|
||||
"jinja|json", render_dict, ["jinja"], ["jinja", "json"]
|
||||
)
|
||||
assert ret == [("fake_json_func", "")]
|
||||
|
|
0
tests/pytests/unit/utils/templates/__init__.py
Normal file
0
tests/pytests/unit/utils/templates/__init__.py
Normal file
9
tests/pytests/unit/utils/templates/conftest.py
Normal file
9
tests/pytests/unit/utils/templates/conftest.py
Normal file
|
@ -0,0 +1,9 @@
|
|||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def render_context():
|
||||
return {
|
||||
"opts": {"cachedir": "/D", "__cli": "salt"},
|
||||
"saltenv": None,
|
||||
}
|
47
tests/pytests/unit/utils/templates/test_cheetah.py
Normal file
47
tests/pytests/unit/utils/templates/test_cheetah.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
import pytest
|
||||
from salt.utils.templates import render_cheetah_tmpl
|
||||
|
||||
pytest.importorskip("Cheetah")
|
||||
|
||||
|
||||
def test_render_sanity(render_context):
|
||||
tmpl = """OK"""
|
||||
res = render_cheetah_tmpl(tmpl, render_context)
|
||||
assert res == "OK"
|
||||
|
||||
|
||||
def test_render_evaluate(render_context):
|
||||
tmpl = """<%="OK"%>"""
|
||||
res = render_cheetah_tmpl(tmpl, render_context)
|
||||
assert res == "OK"
|
||||
|
||||
|
||||
def test_render_evaluate_xml(render_context):
|
||||
tmpl = """
|
||||
<% if 1: %>
|
||||
OK
|
||||
<% pass %>
|
||||
"""
|
||||
res = render_cheetah_tmpl(tmpl, render_context)
|
||||
stripped = res.strip()
|
||||
assert stripped == "OK"
|
||||
|
||||
|
||||
def test_render_evaluate_text(render_context):
|
||||
tmpl = """
|
||||
#if 1
|
||||
OK
|
||||
#end if
|
||||
"""
|
||||
|
||||
res = render_cheetah_tmpl(tmpl, render_context)
|
||||
stripped = res.strip()
|
||||
assert stripped == "OK"
|
||||
|
||||
|
||||
def test_render_variable(render_context):
|
||||
tmpl = """$var"""
|
||||
|
||||
render_context["var"] = "OK"
|
||||
res = render_cheetah_tmpl(tmpl, render_context)
|
||||
assert res.strip() == "OK"
|
36
tests/pytests/unit/utils/templates/test_genshi.py
Normal file
36
tests/pytests/unit/utils/templates/test_genshi.py
Normal file
|
@ -0,0 +1,36 @@
|
|||
import pytest
|
||||
from salt.utils.templates import render_genshi_tmpl
|
||||
|
||||
pytest.importorskip("genshi")
|
||||
|
||||
|
||||
def test_render_sanity(render_context):
|
||||
tmpl = """<RU>OK</RU>"""
|
||||
res = render_genshi_tmpl(tmpl, render_context)
|
||||
assert res == "<RU>OK</RU>"
|
||||
|
||||
|
||||
def test_render_evaluate(render_context):
|
||||
tmpl = """<RU>${ "OK" }</RU>"""
|
||||
res = render_genshi_tmpl(tmpl, render_context)
|
||||
assert res == "<RU>OK</RU>"
|
||||
|
||||
|
||||
def test_render_evaluate_condition(render_context):
|
||||
tmpl = """<RU xmlns:py="http://genshi.edgewall.org/" py:if="1">OK</RU>"""
|
||||
res = render_genshi_tmpl(tmpl, render_context)
|
||||
assert res == "<RU>OK</RU>"
|
||||
|
||||
|
||||
def test_render_variable(render_context):
|
||||
tmpl = """<RU>$var</RU>"""
|
||||
render_context["var"] = "OK"
|
||||
res = render_genshi_tmpl(tmpl, render_context)
|
||||
assert res == "<RU>OK</RU>"
|
||||
|
||||
|
||||
def test_render_variable_replace(render_context):
|
||||
tmpl = """<RU xmlns:py="http://genshi.edgewall.org/" py:content="var">not ok</RU>"""
|
||||
render_context["var"] = "OK"
|
||||
res = render_genshi_tmpl(tmpl, render_context)
|
||||
assert res == "<RU>OK</RU>"
|
|
@ -1,41 +1,17 @@
|
|||
"""
|
||||
Tests for salt.utils.templates
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
|
||||
from collections import OrderedDict
|
||||
import pytest
|
||||
from salt.exceptions import SaltRenderError
|
||||
from salt.utils.templates import render_jinja_tmpl
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def minion_opts(tmp_path, minion_opts):
|
||||
minion_opts.update(
|
||||
{
|
||||
"cachedir": str(tmp_path / "jinja-template-cache"),
|
||||
"file_buffer_size": 1048576,
|
||||
"file_client": "local",
|
||||
"file_ignore_regex": None,
|
||||
"file_ignore_glob": None,
|
||||
"file_roots": {"test": [str(tmp_path / "templates")]},
|
||||
"pillar_roots": {"test": [str(tmp_path / "templates")]},
|
||||
"fileserver_backend": ["roots"],
|
||||
"hash_type": "md5",
|
||||
"extension_modules": os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)), "extmods"
|
||||
),
|
||||
}
|
||||
)
|
||||
return minion_opts
|
||||
from tests.support.mock import patch
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def local_salt():
|
||||
return {}
|
||||
|
||||
|
||||
def test_jinja_undefined_error_context(minion_opts, local_salt):
|
||||
def test_undefined_error_context(render_context):
|
||||
"""
|
||||
Test that jinja provides both the line number on which the error occurred
|
||||
in the Jinja template, and also several lines of context around the error
|
||||
|
@ -63,5 +39,64 @@ def test_jinja_undefined_error_context(minion_opts, local_salt):
|
|||
with pytest.raises(SaltRenderError, match=match_regex):
|
||||
render_jinja_tmpl(
|
||||
jinja_code,
|
||||
dict(opts=minion_opts, saltenv="test", salt=local_salt),
|
||||
render_context,
|
||||
)
|
||||
|
||||
|
||||
def test_render_sanity(render_context):
|
||||
tmpl = """OK"""
|
||||
res = render_jinja_tmpl(tmpl, render_context)
|
||||
assert res == "OK"
|
||||
|
||||
|
||||
def test_render_evaluate(render_context):
|
||||
tmpl = """{{ "OK" }}"""
|
||||
res = render_jinja_tmpl(tmpl, render_context)
|
||||
assert res == "OK"
|
||||
|
||||
|
||||
def test_render_evaluate_multi(render_context):
|
||||
tmpl = """{% if 1 -%}OK{%- endif %}"""
|
||||
res = render_jinja_tmpl(tmpl, render_context)
|
||||
assert res == "OK"
|
||||
|
||||
|
||||
def test_render_variable(render_context):
|
||||
tmpl = """{{ var }}"""
|
||||
render_context["var"] = "OK"
|
||||
res = render_jinja_tmpl(tmpl, render_context)
|
||||
assert res == "OK"
|
||||
|
||||
|
||||
def test_render_tojson_sorted(render_context):
|
||||
templ = """thing: {{ var|tojson(sort_keys=True) }}"""
|
||||
expected = """thing: {"x": "xxx", "y": "yyy", "z": "zzz"}"""
|
||||
|
||||
with patch.dict(render_context, {"var": {"z": "zzz", "y": "yyy", "x": "xxx"}}):
|
||||
res = render_jinja_tmpl(templ, render_context)
|
||||
|
||||
assert res == expected
|
||||
|
||||
|
||||
def test_render_tojson_unsorted(render_context):
|
||||
templ = """thing: {{ var|tojson(sort_keys=False) }}"""
|
||||
expected = """thing: {"z": "zzz", "x": "xxx", "y": "yyy"}"""
|
||||
|
||||
# Values must be added to the dict in the expected order. This is
|
||||
# only necessary for older Pythons that don't remember dict order.
|
||||
d = OrderedDict()
|
||||
d["z"] = "zzz"
|
||||
d["x"] = "xxx"
|
||||
d["y"] = "yyy"
|
||||
|
||||
with patch.dict(render_context, {"var": d}):
|
||||
res = render_jinja_tmpl(templ, render_context)
|
||||
|
||||
assert res == expected
|
||||
|
||||
|
||||
def test_render_cve_2021_25283(render_context):
|
||||
tmpl = """{{ [].__class__ }}"""
|
||||
render_context["var"] = "OK"
|
||||
with pytest.raises(SaltRenderError):
|
||||
res = render_jinja_tmpl(tmpl, render_context)
|
||||
|
|
34
tests/pytests/unit/utils/templates/test_mako.py
Normal file
34
tests/pytests/unit/utils/templates/test_mako.py
Normal file
|
@ -0,0 +1,34 @@
|
|||
import pytest
|
||||
from salt.utils.templates import render_mako_tmpl
|
||||
|
||||
pytest.importorskip("mako")
|
||||
|
||||
|
||||
def test_render_mako_sanity(render_context):
|
||||
tmpl = """OK"""
|
||||
res = render_mako_tmpl(tmpl, render_context)
|
||||
assert res == "OK"
|
||||
|
||||
|
||||
def test_render_mako_evaluate(render_context):
|
||||
tmpl = """${ "OK" }"""
|
||||
res = render_mako_tmpl(tmpl, render_context)
|
||||
assert res == "OK"
|
||||
|
||||
|
||||
def test_render_mako_evaluate_multi(render_context):
|
||||
tmpl = """
|
||||
% if 1:
|
||||
OK
|
||||
% endif
|
||||
"""
|
||||
res = render_mako_tmpl(tmpl, render_context)
|
||||
stripped = res.strip()
|
||||
assert stripped == "OK"
|
||||
|
||||
|
||||
def test_render_mako_variable(render_context):
|
||||
tmpl = """${ var }"""
|
||||
render_context["var"] = "OK"
|
||||
res = render_mako_tmpl(tmpl, render_context)
|
||||
assert res == "OK"
|
29
tests/pytests/unit/utils/templates/test_wempy.py
Normal file
29
tests/pytests/unit/utils/templates/test_wempy.py
Normal file
|
@ -0,0 +1,29 @@
|
|||
import pytest
|
||||
from salt.utils.templates import render_wempy_tmpl
|
||||
|
||||
pytest.importorskip("wemplate")
|
||||
|
||||
|
||||
def test_render_wempy_sanity(render_context):
|
||||
tmpl = """OK"""
|
||||
res = render_wempy_tmpl(tmpl, render_context)
|
||||
assert res == "OK"
|
||||
|
||||
|
||||
def test_render_wempy_evaluate(render_context):
|
||||
tmpl = """{{="OK"}}"""
|
||||
res = render_wempy_tmpl(tmpl, render_context)
|
||||
assert res == "OK"
|
||||
|
||||
|
||||
def test_render_wempy_evaluate_multi(render_context):
|
||||
tmpl = """{{if 1:}}OK{{pass}}"""
|
||||
res = render_wempy_tmpl(tmpl, render_context)
|
||||
assert res == "OK"
|
||||
|
||||
|
||||
def test_render_wempy_variable(render_context):
|
||||
tmpl = """{{=var}}"""
|
||||
render_context["var"] = "OK"
|
||||
res = render_wempy_tmpl(tmpl, render_context)
|
||||
assert res == "OK"
|
217
tests/pytests/unit/utils/templates/test_wrap_tmpl_func.py
Normal file
217
tests/pytests/unit/utils/templates/test_wrap_tmpl_func.py
Normal file
|
@ -0,0 +1,217 @@
|
|||
"""
|
||||
Unit tests for salt.utils.templates.py
|
||||
"""
|
||||
import logging
|
||||
from pathlib import PurePath, PurePosixPath
|
||||
|
||||
import pytest
|
||||
|
||||
from salt.utils.templates import wrap_tmpl_func, generate_sls_context
|
||||
from tests.support.mock import patch
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MockRender:
|
||||
def __call__(self, tplstr, context, tmplpath=None):
|
||||
self.tplstr = tplstr
|
||||
self.context = context
|
||||
self.tmplpath = tmplpath
|
||||
return tplstr
|
||||
|
||||
|
||||
def _test_generated_sls_context(tmplpath, sls, **expected):
|
||||
"""Generic SLS Context Test"""
|
||||
# DeNormalize tmplpath
|
||||
tmplpath = str(PurePath(PurePosixPath(tmplpath)))
|
||||
if tmplpath.startswith("\\"):
|
||||
tmplpath = f"C:{tmplpath}"
|
||||
expected["tplpath"] = tmplpath
|
||||
actual = generate_sls_context(tmplpath, sls)
|
||||
assert {key: actual[key] for key in expected if key in actual} == actual
|
||||
|
||||
|
||||
def test_sls_context_call(tmp_path):
|
||||
"""Check that generate_sls_context is called with proper parameters"""
|
||||
sls = "foo.bar"
|
||||
slsfile = tmp_path / "foo" / "bar.sls"
|
||||
slsfile.parent.mkdir()
|
||||
slsfile.write_text("{{ slspath }}")
|
||||
context = {"opts": {}, "saltenv": "base", "sls": sls}
|
||||
render = MockRender()
|
||||
with patch("salt.utils.templates.generate_sls_context") as generate_sls_context:
|
||||
wrapped = wrap_tmpl_func(render)
|
||||
res = wrapped(str(slsfile), context=context, tmplpath=str(slsfile))
|
||||
generate_sls_context.assert_called_with(str(slsfile), sls)
|
||||
|
||||
|
||||
def test_sls_context_no_call(tmp_path):
|
||||
"""Check that generate_sls_context is not called if sls is not set"""
|
||||
sls = "foo.bar"
|
||||
slsfile = tmp_path / "foo" / "bar.sls"
|
||||
slsfile.parent.mkdir()
|
||||
slsfile.write_text("{{ slspath }}")
|
||||
context = {"opts": {}, "saltenv": "base"}
|
||||
render = MockRender()
|
||||
with patch("salt.utils.templates.generate_sls_context") as generate_sls_context:
|
||||
wrapped = wrap_tmpl_func(render)
|
||||
res = wrapped(str(slsfile), context=context, tmplpath=str(slsfile))
|
||||
generate_sls_context.assert_not_called()
|
||||
|
||||
|
||||
def test_generate_sls_context__top_level():
|
||||
"""generate_sls_context - top_level Use case"""
|
||||
_test_generated_sls_context(
|
||||
"/tmp/boo.sls",
|
||||
"boo",
|
||||
tplfile="boo.sls",
|
||||
tpldir=".",
|
||||
tpldot="",
|
||||
slsdotpath="",
|
||||
slscolonpath="",
|
||||
sls_path="",
|
||||
slspath="",
|
||||
)
|
||||
|
||||
|
||||
def test_generate_sls_context__one_level_init_implicit():
|
||||
"""generate_sls_context - Basic one level with implicit init.sls"""
|
||||
_test_generated_sls_context(
|
||||
"/tmp/foo/init.sls",
|
||||
"foo",
|
||||
tplfile="foo/init.sls",
|
||||
tpldir="foo",
|
||||
tpldot="foo",
|
||||
slsdotpath="foo",
|
||||
slscolonpath="foo",
|
||||
sls_path="foo",
|
||||
slspath="foo",
|
||||
)
|
||||
|
||||
|
||||
def test_generate_sls_context__one_level_init_explicit():
|
||||
"""generate_sls_context - Basic one level with explicit init.sls"""
|
||||
_test_generated_sls_context(
|
||||
"/tmp/foo/init.sls",
|
||||
"foo.init",
|
||||
tplfile="foo/init.sls",
|
||||
tpldir="foo",
|
||||
tpldot="foo",
|
||||
slsdotpath="foo",
|
||||
slscolonpath="foo",
|
||||
sls_path="foo",
|
||||
slspath="foo",
|
||||
)
|
||||
|
||||
|
||||
def test_generate_sls_context__one_level():
|
||||
"""generate_sls_context - Basic one level with name"""
|
||||
_test_generated_sls_context(
|
||||
"/tmp/foo/boo.sls",
|
||||
"foo.boo",
|
||||
tplfile="foo/boo.sls",
|
||||
tpldir="foo",
|
||||
tpldot="foo",
|
||||
slsdotpath="foo",
|
||||
slscolonpath="foo",
|
||||
sls_path="foo",
|
||||
slspath="foo",
|
||||
)
|
||||
|
||||
|
||||
def test_generate_sls_context__one_level_repeating():
|
||||
"""generate_sls_context - Basic one level with name same as dir
|
||||
|
||||
(Issue #56410)
|
||||
"""
|
||||
_test_generated_sls_context(
|
||||
"/tmp/foo/foo.sls",
|
||||
"foo.foo",
|
||||
tplfile="foo/foo.sls",
|
||||
tpldir="foo",
|
||||
tpldot="foo",
|
||||
slsdotpath="foo",
|
||||
slscolonpath="foo",
|
||||
sls_path="foo",
|
||||
slspath="foo",
|
||||
)
|
||||
|
||||
|
||||
def test_generate_sls_context__two_level_init_implicit():
|
||||
"""generate_sls_context - Basic two level with implicit init.sls"""
|
||||
_test_generated_sls_context(
|
||||
"/tmp/foo/bar/init.sls",
|
||||
"foo.bar",
|
||||
tplfile="foo/bar/init.sls",
|
||||
tpldir="foo/bar",
|
||||
tpldot="foo.bar",
|
||||
slsdotpath="foo.bar",
|
||||
slscolonpath="foo:bar",
|
||||
sls_path="foo_bar",
|
||||
slspath="foo/bar",
|
||||
)
|
||||
|
||||
|
||||
def test_generate_sls_context__two_level_init_explicit():
|
||||
"""generate_sls_context - Basic two level with explicit init.sls"""
|
||||
_test_generated_sls_context(
|
||||
"/tmp/foo/bar/init.sls",
|
||||
"foo.bar.init",
|
||||
tplfile="foo/bar/init.sls",
|
||||
tpldir="foo/bar",
|
||||
tpldot="foo.bar",
|
||||
slsdotpath="foo.bar",
|
||||
slscolonpath="foo:bar",
|
||||
sls_path="foo_bar",
|
||||
slspath="foo/bar",
|
||||
)
|
||||
|
||||
|
||||
def test_generate_sls_context__two_level():
|
||||
"""generate_sls_context - Basic two level with name"""
|
||||
_test_generated_sls_context(
|
||||
"/tmp/foo/bar/boo.sls",
|
||||
"foo.bar.boo",
|
||||
tplfile="foo/bar/boo.sls",
|
||||
tpldir="foo/bar",
|
||||
tpldot="foo.bar",
|
||||
slsdotpath="foo.bar",
|
||||
slscolonpath="foo:bar",
|
||||
sls_path="foo_bar",
|
||||
slspath="foo/bar",
|
||||
)
|
||||
|
||||
|
||||
def test_generate_sls_context__two_level_repeating():
|
||||
"""generate_sls_context - Basic two level with name same as dir
|
||||
|
||||
(Issue #56410)
|
||||
"""
|
||||
_test_generated_sls_context(
|
||||
"/tmp/foo/foo/foo.sls",
|
||||
"foo.foo.foo",
|
||||
tplfile="foo/foo/foo.sls",
|
||||
tpldir="foo/foo",
|
||||
tpldot="foo.foo",
|
||||
slsdotpath="foo.foo",
|
||||
slscolonpath="foo:foo",
|
||||
sls_path="foo_foo",
|
||||
slspath="foo/foo",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.skip_on_windows
|
||||
def test_generate_sls_context__backslash_in_path():
|
||||
"""generate_sls_context - Handle backslash in path on non-windows"""
|
||||
_test_generated_sls_context(
|
||||
"/tmp/foo/foo\\foo.sls",
|
||||
"foo.foo\\foo",
|
||||
tplfile="foo/foo\\foo.sls",
|
||||
tpldir="foo",
|
||||
tpldot="foo",
|
||||
slsdotpath="foo",
|
||||
slscolonpath="foo",
|
||||
sls_path="foo",
|
||||
slspath="foo",
|
||||
)
|
|
@ -6,10 +6,12 @@
|
|||
"""
|
||||
|
||||
import io
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import requests
|
||||
|
||||
from salt.utils.aws import get_metadata
|
||||
import salt.utils.aws as aws
|
||||
from tests.support.mock import MagicMock, patch
|
||||
|
||||
|
||||
|
@ -19,7 +21,7 @@ def test_get_metadata_imdsv1():
|
|||
response.reason = "OK"
|
||||
response.raw = io.BytesIO(b"""test""")
|
||||
with patch("requests.get", return_value=response):
|
||||
result = get_metadata("/")
|
||||
result = aws.get_metadata("/")
|
||||
assert result.text == "test"
|
||||
|
||||
|
||||
|
@ -48,5 +50,76 @@ def test_get_metadata_imdsv2():
|
|||
with patch("requests.get", MagicMock(side_effect=handle_get_mock)), patch(
|
||||
"requests.put", return_value=put_response
|
||||
):
|
||||
result = get_metadata("/")
|
||||
result = aws.get_metadata("/")
|
||||
assert result.text == "test"
|
||||
|
||||
|
||||
def test_assumed_creds_not_updating_dictionary_while_iterating():
|
||||
mock_cache = {
|
||||
"expired": {
|
||||
"Expiration": time.mktime(datetime.utcnow().timetuple()),
|
||||
},
|
||||
"not_expired_1": {
|
||||
"Expiration": time.mktime(
|
||||
(datetime.utcnow() + timedelta(days=1)).timetuple()
|
||||
),
|
||||
"AccessKeyId": "mock_AccessKeyId",
|
||||
"SecretAccessKey": "mock_SecretAccessKey",
|
||||
"SessionToken": "mock_SessionToken",
|
||||
},
|
||||
"not_expired_2": {
|
||||
"Expiration": time.mktime(
|
||||
(datetime.utcnow() + timedelta(seconds=300)).timetuple()
|
||||
),
|
||||
},
|
||||
}
|
||||
with patch.dict(aws.__AssumeCache__, mock_cache):
|
||||
ret = aws.assumed_creds({}, "not_expired_1")
|
||||
assert "expired" not in aws.__AssumeCache__
|
||||
assert ret == ("mock_AccessKeyId", "mock_SecretAccessKey", "mock_SessionToken")
|
||||
|
||||
|
||||
def test_assumed_creds_deletes_expired_key():
|
||||
mock_cache = {
|
||||
"expired": {
|
||||
"Expiration": time.mktime(datetime.utcnow().timetuple()),
|
||||
},
|
||||
"not_expired_1": {
|
||||
"Expiration": time.mktime(
|
||||
(datetime.utcnow() + timedelta(days=1)).timetuple()
|
||||
),
|
||||
"AccessKeyId": "mock_AccessKeyId",
|
||||
"SecretAccessKey": "mock_SecretAccessKey",
|
||||
"SessionToken": "mock_SessionToken",
|
||||
},
|
||||
"not_expired_2": {
|
||||
"Expiration": time.mktime(
|
||||
(datetime.utcnow() + timedelta(seconds=300)).timetuple()
|
||||
),
|
||||
},
|
||||
}
|
||||
creds_dict = {
|
||||
"AccessKeyId": "mock_AccessKeyId",
|
||||
"SecretAccessKey": "mock_SecretAccessKey",
|
||||
"SessionToken": "mock_SessionToken",
|
||||
}
|
||||
response_mock = MagicMock()
|
||||
response_mock.status_code = 200
|
||||
response_mock.json.return_value = {
|
||||
"AssumeRoleResponse": {
|
||||
"AssumeRoleResult": {
|
||||
"Credentials": creds_dict,
|
||||
},
|
||||
},
|
||||
}
|
||||
with patch.dict(aws.__AssumeCache__, mock_cache):
|
||||
with patch.object(aws, "sig4", return_value=({}, "fakeurl.com")):
|
||||
with patch("requests.request", return_value=response_mock):
|
||||
ret = aws.assumed_creds({}, "expired")
|
||||
assert "expired" in aws.__AssumeCache__
|
||||
assert aws.__AssumeCache__["expired"] == creds_dict
|
||||
assert ret == (
|
||||
"mock_AccessKeyId",
|
||||
"mock_SecretAccessKey",
|
||||
"mock_SessionToken",
|
||||
)
|
||||
|
|
28
tests/pytests/unit/utils/test_mako.py
Normal file
28
tests/pytests/unit/utils/test_mako.py
Normal file
|
@ -0,0 +1,28 @@
|
|||
import pytest
|
||||
|
||||
from tests.support.mock import Mock, call, patch
|
||||
|
||||
pytest.importorskip("mako")
|
||||
|
||||
# This import needs to be after the above importorskip so that no ImportError
|
||||
# is raised if Mako is not installed
|
||||
from salt.utils.mako import SaltMakoTemplateLookup
|
||||
|
||||
|
||||
def test_mako_template_lookup(minion_opts):
|
||||
"""
|
||||
The shudown method can be called without raising an exception when the
|
||||
file_client does not have a destroy method
|
||||
"""
|
||||
# Test SaltCacheLoader creating and destroying the file client created
|
||||
file_client = Mock()
|
||||
with patch("salt.fileclient.get_file_client", return_value=file_client):
|
||||
loader = SaltMakoTemplateLookup(minion_opts)
|
||||
assert loader._file_client is None
|
||||
assert loader.file_client() is file_client
|
||||
assert loader._file_client is file_client
|
||||
try:
|
||||
loader.destroy()
|
||||
except AttributeError:
|
||||
pytest.fail("Regression when calling SaltMakoTemplateLookup.destroy()")
|
||||
assert file_client.mock_calls == [call.destroy()]
|
|
@ -1,745 +0,0 @@
|
|||
# pylint: disable=unused-argument
|
||||
|
||||
|
||||
import salt.states.jboss7 as jboss7
|
||||
from salt.exceptions import CommandExecutionError
|
||||
from tests.support.mixins import LoaderModuleMockMixin
|
||||
from tests.support.mock import MagicMock, patch
|
||||
from tests.support.unit import TestCase
|
||||
|
||||
|
||||
class JBoss7StateTestCase(TestCase, LoaderModuleMockMixin):
|
||||
def setup_loader_modules(self):
|
||||
return {
|
||||
jboss7: {
|
||||
"__salt__": {
|
||||
"jboss7.read_datasource": MagicMock(),
|
||||
"jboss7.create_datasource": MagicMock(),
|
||||
"jboss7.update_datasource": MagicMock(),
|
||||
"jboss7.remove_datasource": MagicMock(),
|
||||
"jboss7.read_simple_binding": MagicMock(),
|
||||
"jboss7.create_simple_binding": MagicMock(),
|
||||
"jboss7.update_simple_binding": MagicMock(),
|
||||
"jboss7.undeploy": MagicMock(),
|
||||
"jboss7.deploy": MagicMock,
|
||||
"file.get_managed": MagicMock,
|
||||
"file.manage_file": MagicMock,
|
||||
"jboss7.list_deployments": MagicMock,
|
||||
},
|
||||
"__env__": "base",
|
||||
}
|
||||
}
|
||||
|
||||
def test_should_not_redeploy_unchanged(self):
|
||||
# given
|
||||
parameters = {
|
||||
"target_file": "some_artifact",
|
||||
"undeploy_force": False,
|
||||
"undeploy": "some_artifact",
|
||||
"source": "some_artifact_on_master",
|
||||
}
|
||||
jboss_conf = {"cli_path": "somewhere", "controller": "some_controller"}
|
||||
|
||||
def list_deployments(jboss_config):
|
||||
return ["some_artifact"]
|
||||
|
||||
def file_get_managed(
|
||||
name,
|
||||
template,
|
||||
source,
|
||||
source_hash,
|
||||
source_hash_name,
|
||||
user,
|
||||
group,
|
||||
mode,
|
||||
attrs,
|
||||
saltenv,
|
||||
context,
|
||||
defaults,
|
||||
skip_verify,
|
||||
kwargs,
|
||||
):
|
||||
return "sfn", "hash", ""
|
||||
|
||||
def file_manage_file(
|
||||
name,
|
||||
sfn,
|
||||
ret,
|
||||
source,
|
||||
source_sum,
|
||||
user,
|
||||
group,
|
||||
mode,
|
||||
attrs,
|
||||
saltenv,
|
||||
backup,
|
||||
makedirs,
|
||||
template,
|
||||
show_diff,
|
||||
contents,
|
||||
dir_mode,
|
||||
):
|
||||
return {"result": True, "changes": False}
|
||||
|
||||
jboss7_undeploy_mock = MagicMock()
|
||||
jboss7_deploy_mock = MagicMock()
|
||||
file_get_managed = MagicMock(side_effect=file_get_managed)
|
||||
file_manage_file = MagicMock(side_effect=file_manage_file)
|
||||
list_deployments_mock = MagicMock(side_effect=list_deployments)
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.undeploy": jboss7_undeploy_mock,
|
||||
"jboss7.deploy": jboss7_deploy_mock,
|
||||
"file.get_managed": file_get_managed,
|
||||
"file.manage_file": file_manage_file,
|
||||
"jboss7.list_deployments": list_deployments_mock,
|
||||
},
|
||||
):
|
||||
# when
|
||||
result = jboss7.deployed(
|
||||
name="unchanged", jboss_config=jboss_conf, salt_source=parameters
|
||||
)
|
||||
|
||||
# then
|
||||
self.assertFalse(jboss7_undeploy_mock.called)
|
||||
self.assertFalse(jboss7_deploy_mock.called)
|
||||
|
||||
def test_should_redeploy_changed(self):
|
||||
# given
|
||||
parameters = {
|
||||
"target_file": "some_artifact",
|
||||
"undeploy_force": False,
|
||||
"undeploy": "some_artifact",
|
||||
"source": "some_artifact_on_master",
|
||||
}
|
||||
jboss_conf = {"cli_path": "somewhere", "controller": "some_controller"}
|
||||
|
||||
def list_deployments(jboss_config):
|
||||
return ["some_artifact"]
|
||||
|
||||
def file_get_managed(
|
||||
name,
|
||||
template,
|
||||
source,
|
||||
source_hash,
|
||||
source_hash_name,
|
||||
user,
|
||||
group,
|
||||
mode,
|
||||
attrs,
|
||||
saltenv,
|
||||
context,
|
||||
defaults,
|
||||
skip_verify,
|
||||
kwargs,
|
||||
):
|
||||
return "sfn", "hash", ""
|
||||
|
||||
def file_manage_file(
|
||||
name,
|
||||
sfn,
|
||||
ret,
|
||||
source,
|
||||
source_sum,
|
||||
user,
|
||||
group,
|
||||
mode,
|
||||
attrs,
|
||||
saltenv,
|
||||
backup,
|
||||
makedirs,
|
||||
template,
|
||||
show_diff,
|
||||
contents,
|
||||
dir_mode,
|
||||
):
|
||||
return {"result": True, "changes": True}
|
||||
|
||||
jboss7_undeploy_mock = MagicMock()
|
||||
jboss7_deploy_mock = MagicMock()
|
||||
file_get_managed = MagicMock(side_effect=file_get_managed)
|
||||
file_manage_file = MagicMock(side_effect=file_manage_file)
|
||||
list_deployments_mock = MagicMock(side_effect=list_deployments)
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.undeploy": jboss7_undeploy_mock,
|
||||
"jboss7.deploy": jboss7_deploy_mock,
|
||||
"file.get_managed": file_get_managed,
|
||||
"file.manage_file": file_manage_file,
|
||||
"jboss7.list_deployments": list_deployments_mock,
|
||||
},
|
||||
):
|
||||
# when
|
||||
result = jboss7.deployed(
|
||||
name="unchanged", jboss_config=jboss_conf, salt_source=parameters
|
||||
)
|
||||
|
||||
# then
|
||||
self.assertTrue(jboss7_undeploy_mock.called)
|
||||
self.assertTrue(jboss7_deploy_mock.called)
|
||||
|
||||
def test_should_deploy_different_artifact(self):
|
||||
# given
|
||||
parameters = {
|
||||
"target_file": "some_artifact",
|
||||
"undeploy_force": False,
|
||||
"undeploy": "some_artifact",
|
||||
"source": "some_artifact_on_master",
|
||||
}
|
||||
jboss_conf = {"cli_path": "somewhere", "controller": "some_controller"}
|
||||
|
||||
def list_deployments(jboss_config):
|
||||
return ["some_other_artifact"]
|
||||
|
||||
def file_get_managed(
|
||||
name,
|
||||
template,
|
||||
source,
|
||||
source_hash,
|
||||
source_hash_name,
|
||||
user,
|
||||
group,
|
||||
mode,
|
||||
attrs,
|
||||
saltenv,
|
||||
context,
|
||||
defaults,
|
||||
skip_verify,
|
||||
kwargs,
|
||||
):
|
||||
return "sfn", "hash", ""
|
||||
|
||||
def file_manage_file(
|
||||
name,
|
||||
sfn,
|
||||
ret,
|
||||
source,
|
||||
source_sum,
|
||||
user,
|
||||
group,
|
||||
mode,
|
||||
attrs,
|
||||
saltenv,
|
||||
backup,
|
||||
makedirs,
|
||||
template,
|
||||
show_diff,
|
||||
contents,
|
||||
dir_mode,
|
||||
):
|
||||
return {"result": True, "changes": False}
|
||||
|
||||
jboss7_undeploy_mock = MagicMock()
|
||||
jboss7_deploy_mock = MagicMock()
|
||||
file_get_managed = MagicMock(side_effect=file_get_managed)
|
||||
file_manage_file = MagicMock(side_effect=file_manage_file)
|
||||
list_deployments_mock = MagicMock(side_effect=list_deployments)
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.undeploy": jboss7_undeploy_mock,
|
||||
"jboss7.deploy": jboss7_deploy_mock,
|
||||
"file.get_managed": file_get_managed,
|
||||
"file.manage_file": file_manage_file,
|
||||
"jboss7.list_deployments": list_deployments_mock,
|
||||
},
|
||||
):
|
||||
# when
|
||||
result = jboss7.deployed(
|
||||
name="unchanged", jboss_config=jboss_conf, salt_source=parameters
|
||||
)
|
||||
|
||||
# then
|
||||
self.assertFalse(jboss7_undeploy_mock.called)
|
||||
self.assertTrue(jboss7_deploy_mock.called)
|
||||
|
||||
def test_should_redploy_undeploy_force(self):
|
||||
# given
|
||||
parameters = {
|
||||
"target_file": "some_artifact",
|
||||
"undeploy_force": True,
|
||||
"undeploy": "some_artifact",
|
||||
"source": "some_artifact_on_master",
|
||||
}
|
||||
jboss_conf = {"cli_path": "somewhere", "controller": "some_controller"}
|
||||
|
||||
def list_deployments(jboss_config):
|
||||
return ["some_artifact"]
|
||||
|
||||
def file_get_managed(
|
||||
name,
|
||||
template,
|
||||
source,
|
||||
source_hash,
|
||||
source_hash_name,
|
||||
user,
|
||||
group,
|
||||
mode,
|
||||
attrs,
|
||||
saltenv,
|
||||
context,
|
||||
defaults,
|
||||
skip_verify,
|
||||
kwargs,
|
||||
):
|
||||
return "sfn", "hash", ""
|
||||
|
||||
def file_manage_file(
|
||||
name,
|
||||
sfn,
|
||||
ret,
|
||||
source,
|
||||
source_sum,
|
||||
user,
|
||||
group,
|
||||
mode,
|
||||
attrs,
|
||||
saltenv,
|
||||
backup,
|
||||
makedirs,
|
||||
template,
|
||||
show_diff,
|
||||
contents,
|
||||
dir_mode,
|
||||
):
|
||||
return {"result": True, "changes": False}
|
||||
|
||||
jboss7_undeploy_mock = MagicMock()
|
||||
jboss7_deploy_mock = MagicMock()
|
||||
file_get_managed = MagicMock(side_effect=file_get_managed)
|
||||
file_manage_file = MagicMock(side_effect=file_manage_file)
|
||||
list_deployments_mock = MagicMock(side_effect=list_deployments)
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.undeploy": jboss7_undeploy_mock,
|
||||
"jboss7.deploy": jboss7_deploy_mock,
|
||||
"file.get_managed": file_get_managed,
|
||||
"file.manage_file": file_manage_file,
|
||||
"jboss7.list_deployments": list_deployments_mock,
|
||||
},
|
||||
):
|
||||
# when
|
||||
result = jboss7.deployed(
|
||||
name="unchanged", jboss_config=jboss_conf, salt_source=parameters
|
||||
)
|
||||
|
||||
# then
|
||||
self.assertTrue(jboss7_undeploy_mock.called)
|
||||
self.assertTrue(jboss7_deploy_mock.called)
|
||||
|
||||
def test_should_create_new_datasource_if_not_exists(self):
|
||||
# given
|
||||
datasource_properties = {"connection-url": "jdbc:/old-connection-url"}
|
||||
ds_status = {"created": False}
|
||||
|
||||
def read_func(jboss_config, name, profile):
|
||||
if ds_status["created"]:
|
||||
return {"success": True, "result": datasource_properties}
|
||||
else:
|
||||
return {"success": False, "err_code": "JBAS014807"}
|
||||
|
||||
def create_func(jboss_config, name, datasource_properties, profile):
|
||||
ds_status["created"] = True
|
||||
return {"success": True}
|
||||
|
||||
read_mock = MagicMock(side_effect=read_func)
|
||||
create_mock = MagicMock(side_effect=create_func)
|
||||
update_mock = MagicMock()
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_datasource": read_mock,
|
||||
"jboss7.create_datasource": create_mock,
|
||||
"jboss7.update_datasource": update_mock,
|
||||
},
|
||||
):
|
||||
|
||||
# when
|
||||
result = jboss7.datasource_exists(
|
||||
name="appDS",
|
||||
jboss_config={},
|
||||
datasource_properties=datasource_properties,
|
||||
profile=None,
|
||||
)
|
||||
|
||||
# then
|
||||
create_mock.assert_called_with(
|
||||
name="appDS",
|
||||
jboss_config={},
|
||||
datasource_properties=datasource_properties,
|
||||
profile=None,
|
||||
)
|
||||
|
||||
self.assertFalse(update_mock.called)
|
||||
self.assertEqual(result["comment"], "Datasource created.")
|
||||
|
||||
def test_should_update_the_datasource_if_exists(self):
|
||||
ds_status = {"updated": False}
|
||||
|
||||
def read_func(jboss_config, name, profile):
|
||||
if ds_status["updated"]:
|
||||
return {
|
||||
"success": True,
|
||||
"result": {"connection-url": "jdbc:/new-connection-url"},
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"success": True,
|
||||
"result": {"connection-url": "jdbc:/old-connection-url"},
|
||||
}
|
||||
|
||||
def update_func(jboss_config, name, new_properties, profile):
|
||||
ds_status["updated"] = True
|
||||
return {"success": True}
|
||||
|
||||
read_mock = MagicMock(side_effect=read_func)
|
||||
create_mock = MagicMock()
|
||||
update_mock = MagicMock(side_effect=update_func)
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_datasource": read_mock,
|
||||
"jboss7.create_datasource": create_mock,
|
||||
"jboss7.update_datasource": update_mock,
|
||||
},
|
||||
):
|
||||
result = jboss7.datasource_exists(
|
||||
name="appDS",
|
||||
jboss_config={},
|
||||
datasource_properties={"connection-url": "jdbc:/new-connection-url"},
|
||||
profile=None,
|
||||
)
|
||||
|
||||
update_mock.assert_called_with(
|
||||
name="appDS",
|
||||
jboss_config={},
|
||||
new_properties={"connection-url": "jdbc:/new-connection-url"},
|
||||
profile=None,
|
||||
)
|
||||
self.assertTrue(read_mock.called)
|
||||
self.assertEqual(result["comment"], "Datasource updated.")
|
||||
|
||||
def test_should_recreate_the_datasource_if_specified(self):
|
||||
read_mock = MagicMock(
|
||||
return_value={
|
||||
"success": True,
|
||||
"result": {"connection-url": "jdbc:/same-connection-url"},
|
||||
}
|
||||
)
|
||||
create_mock = MagicMock(return_value={"success": True})
|
||||
remove_mock = MagicMock(return_value={"success": True})
|
||||
update_mock = MagicMock()
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_datasource": read_mock,
|
||||
"jboss7.create_datasource": create_mock,
|
||||
"jboss7.remove_datasource": remove_mock,
|
||||
"jboss7.update_datasource": update_mock,
|
||||
},
|
||||
):
|
||||
result = jboss7.datasource_exists(
|
||||
name="appDS",
|
||||
jboss_config={},
|
||||
datasource_properties={"connection-url": "jdbc:/same-connection-url"},
|
||||
recreate=True,
|
||||
)
|
||||
|
||||
remove_mock.assert_called_with(name="appDS", jboss_config={}, profile=None)
|
||||
create_mock.assert_called_with(
|
||||
name="appDS",
|
||||
jboss_config={},
|
||||
datasource_properties={"connection-url": "jdbc:/same-connection-url"},
|
||||
profile=None,
|
||||
)
|
||||
self.assertEqual(result["changes"]["removed"], "appDS")
|
||||
self.assertEqual(result["changes"]["created"], "appDS")
|
||||
|
||||
def test_should_inform_if_the_datasource_has_not_changed(self):
|
||||
read_mock = MagicMock(
|
||||
return_value={
|
||||
"success": True,
|
||||
"result": {"connection-url": "jdbc:/same-connection-url"},
|
||||
}
|
||||
)
|
||||
create_mock = MagicMock()
|
||||
remove_mock = MagicMock()
|
||||
update_mock = MagicMock(return_value={"success": True})
|
||||
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_datasource": read_mock,
|
||||
"jboss7.create_datasource": create_mock,
|
||||
"jboss7.remove_datasource": remove_mock,
|
||||
"jboss7.update_datasource": update_mock,
|
||||
},
|
||||
):
|
||||
result = jboss7.datasource_exists(
|
||||
name="appDS",
|
||||
jboss_config={},
|
||||
datasource_properties={"connection-url": "jdbc:/old-connection-url"},
|
||||
)
|
||||
|
||||
update_mock.assert_called_with(
|
||||
name="appDS",
|
||||
jboss_config={},
|
||||
new_properties={"connection-url": "jdbc:/old-connection-url"},
|
||||
profile=None,
|
||||
)
|
||||
self.assertFalse(create_mock.called)
|
||||
self.assertEqual(result["comment"], "Datasource not changed.")
|
||||
|
||||
def test_should_create_binding_if_not_exists(self):
|
||||
# given
|
||||
binding_status = {"created": False}
|
||||
|
||||
def read_func(jboss_config, binding_name, profile):
|
||||
if binding_status["created"]:
|
||||
return {"success": True, "result": {"value": "DEV"}}
|
||||
else:
|
||||
return {"success": False, "err_code": "JBAS014807"}
|
||||
|
||||
def create_func(jboss_config, binding_name, value, profile):
|
||||
binding_status["created"] = True
|
||||
return {"success": True}
|
||||
|
||||
read_mock = MagicMock(side_effect=read_func)
|
||||
create_mock = MagicMock(side_effect=create_func)
|
||||
update_mock = MagicMock()
|
||||
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_simple_binding": read_mock,
|
||||
"jboss7.create_simple_binding": create_mock,
|
||||
"jboss7.update_simple_binding": update_mock,
|
||||
},
|
||||
):
|
||||
|
||||
# when
|
||||
result = jboss7.bindings_exist(
|
||||
name="bindings", jboss_config={}, bindings={"env": "DEV"}, profile=None
|
||||
)
|
||||
|
||||
# then
|
||||
create_mock.assert_called_with(
|
||||
jboss_config={}, binding_name="env", value="DEV", profile=None
|
||||
)
|
||||
self.assertEqual(update_mock.call_count, 0)
|
||||
self.assertEqual(result["changes"], {"added": "env:DEV\n"})
|
||||
self.assertEqual(result["comment"], "Bindings changed.")
|
||||
|
||||
def test_should_update_bindings_if_exists_and_different(self):
|
||||
# given
|
||||
binding_status = {"updated": False}
|
||||
|
||||
def read_func(jboss_config, binding_name, profile):
|
||||
if binding_status["updated"]:
|
||||
return {"success": True, "result": {"value": "DEV2"}}
|
||||
else:
|
||||
return {"success": True, "result": {"value": "DEV"}}
|
||||
|
||||
def update_func(jboss_config, binding_name, value, profile):
|
||||
binding_status["updated"] = True
|
||||
return {"success": True}
|
||||
|
||||
read_mock = MagicMock(side_effect=read_func)
|
||||
create_mock = MagicMock()
|
||||
update_mock = MagicMock(side_effect=update_func)
|
||||
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_simple_binding": read_mock,
|
||||
"jboss7.create_simple_binding": create_mock,
|
||||
"jboss7.update_simple_binding": update_mock,
|
||||
},
|
||||
):
|
||||
# when
|
||||
result = jboss7.bindings_exist(
|
||||
name="bindings", jboss_config={}, bindings={"env": "DEV2"}, profile=None
|
||||
)
|
||||
|
||||
# then
|
||||
update_mock.assert_called_with(
|
||||
jboss_config={}, binding_name="env", value="DEV2", profile=None
|
||||
)
|
||||
self.assertEqual(create_mock.call_count, 0)
|
||||
self.assertEqual(result["changes"], {"changed": "env:DEV->DEV2\n"})
|
||||
self.assertEqual(result["comment"], "Bindings changed.")
|
||||
|
||||
def test_should_not_update_bindings_if_same(self):
|
||||
# given
|
||||
read_mock = MagicMock(
|
||||
return_value={"success": True, "result": {"value": "DEV2"}}
|
||||
)
|
||||
create_mock = MagicMock()
|
||||
update_mock = MagicMock()
|
||||
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_simple_binding": read_mock,
|
||||
"jboss7.create_simple_binding": create_mock,
|
||||
"jboss7.update_simple_binding": update_mock,
|
||||
},
|
||||
):
|
||||
# when
|
||||
result = jboss7.bindings_exist(
|
||||
name="bindings", jboss_config={}, bindings={"env": "DEV2"}
|
||||
)
|
||||
|
||||
# then
|
||||
self.assertEqual(create_mock.call_count, 0)
|
||||
self.assertEqual(update_mock.call_count, 0)
|
||||
self.assertEqual(result["changes"], {})
|
||||
self.assertEqual(result["comment"], "Bindings not changed.")
|
||||
|
||||
def test_should_raise_exception_if_cannot_create_binding(self):
|
||||
def read_func(jboss_config, binding_name, profile):
|
||||
return {"success": False, "err_code": "JBAS014807"}
|
||||
|
||||
def create_func(jboss_config, binding_name, value, profile):
|
||||
return {"success": False, "failure-description": "Incorrect binding name."}
|
||||
|
||||
read_mock = MagicMock(side_effect=read_func)
|
||||
create_mock = MagicMock(side_effect=create_func)
|
||||
update_mock = MagicMock()
|
||||
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_simple_binding": read_mock,
|
||||
"jboss7.create_simple_binding": create_mock,
|
||||
"jboss7.update_simple_binding": update_mock,
|
||||
},
|
||||
):
|
||||
# when
|
||||
try:
|
||||
jboss7.bindings_exist(
|
||||
name="bindings",
|
||||
jboss_config={},
|
||||
bindings={"env": "DEV2"},
|
||||
profile=None,
|
||||
)
|
||||
self.fail("An exception should be thrown")
|
||||
except CommandExecutionError as e:
|
||||
self.assertEqual(str(e), "Incorrect binding name.")
|
||||
|
||||
def test_should_raise_exception_if_cannot_update_binding(self):
|
||||
def read_func(jboss_config, binding_name, profile):
|
||||
return {"success": True, "result": {"value": "DEV"}}
|
||||
|
||||
def update_func(jboss_config, binding_name, value, profile):
|
||||
return {"success": False, "failure-description": "Incorrect binding name."}
|
||||
|
||||
read_mock = MagicMock(side_effect=read_func)
|
||||
create_mock = MagicMock()
|
||||
update_mock = MagicMock(side_effect=update_func)
|
||||
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_simple_binding": read_mock,
|
||||
"jboss7.create_simple_binding": create_mock,
|
||||
"jboss7.update_simple_binding": update_mock,
|
||||
},
|
||||
):
|
||||
|
||||
# when
|
||||
try:
|
||||
jboss7.bindings_exist(
|
||||
name="bindings",
|
||||
jboss_config={},
|
||||
bindings={"env": "!@#!///some weird value"},
|
||||
profile=None,
|
||||
)
|
||||
self.fail("An exception should be thrown")
|
||||
except CommandExecutionError as e:
|
||||
self.assertEqual(str(e), "Incorrect binding name.")
|
||||
|
||||
def test_datasource_exist_create_datasource_good_code(self):
|
||||
jboss_config = {
|
||||
"cli_path": "/home/ch44d/Desktop/wildfly-18.0.0.Final/bin/jboss-cli.sh",
|
||||
"controller": "127.0.0.1: 9990",
|
||||
"cli_user": "user",
|
||||
"cli_password": "user",
|
||||
}
|
||||
|
||||
datasource_properties = {
|
||||
"driver - name": "h2",
|
||||
"connection - url": "jdbc:sqlserver://127.0.0.1:1433;DatabaseName=test_s2",
|
||||
"jndi - name": (
|
||||
"java:/home/ch44d/Desktop/sqljdbc_7.4/enu/mssql-jdbc-7.4.1.jre8.jar"
|
||||
),
|
||||
"user - name": "user",
|
||||
"password": "user",
|
||||
"use - java - context": True,
|
||||
}
|
||||
|
||||
read_datasource = MagicMock(
|
||||
return_value={"success": False, "err_code": "WFLYCTL0216"}
|
||||
)
|
||||
|
||||
error_msg = "Error: -1"
|
||||
create_datasource = MagicMock(
|
||||
return_value={"success": False, "stdout": error_msg}
|
||||
)
|
||||
|
||||
with patch.dict(
|
||||
jboss7.__salt__,
|
||||
{
|
||||
"jboss7.read_datasource": read_datasource,
|
||||
"jboss7.create_datasource": create_datasource,
|
||||
},
|
||||
):
|
||||
ret = jboss7.datasource_exists("SQL", jboss_config, datasource_properties)
|
||||
|
||||
self.assertTrue("result" in ret)
|
||||
self.assertFalse(ret["result"])
|
||||
self.assertTrue("comment" in ret)
|
||||
self.assertTrue(error_msg in ret["comment"])
|
||||
|
||||
read_datasource.assert_called_once()
|
||||
create_datasource.assert_called_once()
|
||||
|
||||
def test_datasource_exist_create_datasource_bad_code(self):
|
||||
jboss_config = {
|
||||
"cli_path": "/home/ch44d/Desktop/wildfly-18.0.0.Final/bin/jboss-cli.sh",
|
||||
"controller": "127.0.0.1: 9990",
|
||||
"cli_user": "user",
|
||||
"cli_password": "user",
|
||||
}
|
||||
|
||||
datasource_properties = {
|
||||
"driver - name": "h2",
|
||||
"connection - url": "jdbc:sqlserver://127.0.0.1:1433;DatabaseName=test_s2",
|
||||
"jndi - name": (
|
||||
"java:/home/ch44d/Desktop/sqljdbc_7.4/enu/mssql-jdbc-7.4.1.jre8.jar"
|
||||
),
|
||||
"user - name": "user",
|
||||
"password": "user",
|
||||
"use - java - context": True,
|
||||
}
|
||||
|
||||
read_datasource = MagicMock(
|
||||
return_value={
|
||||
"success": False,
|
||||
"err_code": "WFLYCTL0217",
|
||||
"failure-description": "Something happened",
|
||||
}
|
||||
)
|
||||
|
||||
with patch.dict(jboss7.__salt__, {"jboss7.read_datasource": read_datasource}):
|
||||
self.assertRaises(
|
||||
CommandExecutionError,
|
||||
jboss7.datasource_exists,
|
||||
"SQL",
|
||||
jboss_config,
|
||||
datasource_properties,
|
||||
)
|
||||
read_datasource.assert_called_once()
|
|
@ -1,897 +0,0 @@
|
|||
"""
|
||||
:codeauthor: :email:`Jeff Schroeder <jeffschroeder@computer.org>`
|
||||
"""
|
||||
|
||||
import base64
|
||||
from contextlib import contextmanager
|
||||
|
||||
import pytest
|
||||
|
||||
import salt.modules.kubernetesmod as kubernetesmod
|
||||
import salt.states.kubernetes as kubernetes
|
||||
import salt.utils.stringutils
|
||||
from tests.support.mixins import LoaderModuleMockMixin
|
||||
from tests.support.mock import MagicMock, patch
|
||||
from tests.support.unit import TestCase
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
kubernetesmod.HAS_LIBS is False,
|
||||
reason="Probably Kubernetes client lib is not installed. Skipping test_kubernetes.py",
|
||||
)
|
||||
class KubernetesTestCase(TestCase, LoaderModuleMockMixin):
|
||||
"""
|
||||
Test cases for salt.states.kubernetes
|
||||
"""
|
||||
|
||||
def setup_loader_modules(self):
|
||||
return {kubernetes: {"__env__": "base"}}
|
||||
|
||||
@contextmanager
|
||||
def mock_func(self, func_name, return_value, test=False):
|
||||
"""
|
||||
Mock any of the kubernetes state function return values and set
|
||||
the test options.
|
||||
"""
|
||||
name = "kubernetes.{}".format(func_name)
|
||||
mocked = {name: MagicMock(return_value=return_value)}
|
||||
with patch.dict(kubernetes.__salt__, mocked) as patched:
|
||||
with patch.dict(kubernetes.__opts__, {"test": test}):
|
||||
yield patched
|
||||
|
||||
def make_configmap(self, name, namespace="default", data=None):
|
||||
return self.make_ret_dict(
|
||||
kind="ConfigMap",
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
data=data,
|
||||
)
|
||||
|
||||
def make_secret(self, name, namespace="default", data=None):
|
||||
secret_data = self.make_ret_dict(
|
||||
kind="Secret",
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
data=data,
|
||||
)
|
||||
# Base64 all of the values just like kubectl does
|
||||
for key, value in secret_data["data"].items():
|
||||
secret_data["data"][key] = base64.b64encode(
|
||||
salt.utils.stringutils.to_bytes(value)
|
||||
)
|
||||
|
||||
return secret_data
|
||||
|
||||
def make_node_labels(self, name="minikube"):
|
||||
return {
|
||||
"kubernetes.io/hostname": name,
|
||||
"beta.kubernetes.io/os": "linux",
|
||||
"beta.kubernetes.io/arch": "amd64",
|
||||
"failure-domain.beta.kubernetes.io/region": "us-west-1",
|
||||
}
|
||||
|
||||
def make_node(self, name="minikube"):
|
||||
node_data = self.make_ret_dict(kind="Node", name="minikube")
|
||||
node_data.update(
|
||||
{
|
||||
"api_version": "v1",
|
||||
"kind": "Node",
|
||||
"metadata": {
|
||||
"annotations": {"node.alpha.kubernetes.io/ttl": "0"},
|
||||
"labels": self.make_node_labels(name=name),
|
||||
"name": name,
|
||||
"namespace": None,
|
||||
"self_link": "/api/v1/nodes/{name}".format(name=name),
|
||||
"uid": "7811b8ae-c1a1-11e7-a55a-0800279fb61e",
|
||||
},
|
||||
"spec": {"external_id": name},
|
||||
"status": {},
|
||||
}
|
||||
)
|
||||
return node_data
|
||||
|
||||
def make_namespace(self, name="default"):
|
||||
namespace_data = self.make_ret_dict(kind="Namespace", name=name)
|
||||
del namespace_data["data"]
|
||||
namespace_data.update(
|
||||
{
|
||||
"status": {"phase": "Active"},
|
||||
"spec": {"finalizers": ["kubernetes"]},
|
||||
"metadata": {
|
||||
"name": name,
|
||||
"namespace": None,
|
||||
"labels": None,
|
||||
"self_link": "/api/v1/namespaces/{namespace}".format(
|
||||
namespace=name,
|
||||
),
|
||||
"annotations": None,
|
||||
"uid": "752fceeb-c1a1-11e7-a55a-0800279fb61e",
|
||||
},
|
||||
}
|
||||
)
|
||||
return namespace_data
|
||||
|
||||
def make_ret_dict(self, kind, name, namespace=None, data=None):
|
||||
"""
|
||||
Make a minimal example configmap or secret for using in mocks
|
||||
"""
|
||||
|
||||
assert kind in ("Secret", "ConfigMap", "Namespace", "Node")
|
||||
|
||||
if data is None:
|
||||
data = {}
|
||||
|
||||
self_link = "/api/v1/namespaces/{namespace}/{kind}s/{name}".format(
|
||||
namespace=namespace,
|
||||
kind=kind.lower(),
|
||||
name=name,
|
||||
)
|
||||
|
||||
return_data = {
|
||||
"kind": kind,
|
||||
"data": data,
|
||||
"api_version": "v1",
|
||||
"metadata": {
|
||||
"name": name,
|
||||
"labels": None,
|
||||
"namespace": namespace,
|
||||
"self_link": self_link,
|
||||
"annotations": {"kubernetes.io/change-cause": "salt-call state.apply"},
|
||||
},
|
||||
}
|
||||
return return_data
|
||||
|
||||
def test_configmap_present__fail(self):
|
||||
error = kubernetes.configmap_present(
|
||||
name="testme",
|
||||
data={1: 1},
|
||||
source="salt://beyond/oblivion.jinja",
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": False,
|
||||
"name": "testme",
|
||||
"comment": "'source' cannot be used in combination with 'data'",
|
||||
},
|
||||
error,
|
||||
)
|
||||
|
||||
def test_configmap_present__create_test_true(self):
|
||||
# Create a new configmap with test=True
|
||||
with self.mock_func("show_configmap", return_value=None, test=True):
|
||||
ret = kubernetes.configmap_present(
|
||||
name="example",
|
||||
data={"example.conf": "# empty config file"},
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"comment": "The configmap is going to be created",
|
||||
"changes": {},
|
||||
"name": "example",
|
||||
"result": None,
|
||||
},
|
||||
ret,
|
||||
)
|
||||
|
||||
def test_configmap_present__create(self):
|
||||
# Create a new configmap
|
||||
with self.mock_func("show_configmap", return_value=None):
|
||||
cm = self.make_configmap(
|
||||
name="test",
|
||||
namespace="default",
|
||||
data={"foo": "bar"},
|
||||
)
|
||||
with self.mock_func("create_configmap", return_value=cm):
|
||||
actual = kubernetes.configmap_present(
|
||||
name="test",
|
||||
data={"foo": "bar"},
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"comment": "",
|
||||
"changes": {"data": {"foo": "bar"}},
|
||||
"name": "test",
|
||||
"result": True,
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_configmap_present__create_no_data(self):
|
||||
# Create a new configmap with no 'data' attribute
|
||||
with self.mock_func("show_configmap", return_value=None):
|
||||
cm = self.make_configmap(
|
||||
name="test",
|
||||
namespace="default",
|
||||
)
|
||||
with self.mock_func("create_configmap", return_value=cm):
|
||||
actual = kubernetes.configmap_present(name="test")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"comment": "",
|
||||
"changes": {"data": {}},
|
||||
"name": "test",
|
||||
"result": True,
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_configmap_present__replace_test_true(self):
|
||||
cm = self.make_configmap(
|
||||
name="settings",
|
||||
namespace="saltstack",
|
||||
data={"foobar.conf": "# Example configuration"},
|
||||
)
|
||||
with self.mock_func("show_configmap", return_value=cm, test=True):
|
||||
ret = kubernetes.configmap_present(
|
||||
name="settings",
|
||||
namespace="saltstack",
|
||||
data={"foobar.conf": "# Example configuration"},
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"comment": "The configmap is going to be replaced",
|
||||
"changes": {},
|
||||
"name": "settings",
|
||||
"result": None,
|
||||
},
|
||||
ret,
|
||||
)
|
||||
|
||||
def test_configmap_present__replace(self):
|
||||
cm = self.make_configmap(name="settings", data={"action": "make=war"})
|
||||
# Replace an existing configmap
|
||||
with self.mock_func("show_configmap", return_value=cm):
|
||||
new_cm = cm.copy()
|
||||
new_cm.update({"data": {"action": "make=peace"}})
|
||||
with self.mock_func("replace_configmap", return_value=new_cm):
|
||||
actual = kubernetes.configmap_present(
|
||||
name="settings",
|
||||
data={"action": "make=peace"},
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"comment": (
|
||||
"The configmap is already present. Forcing recreation"
|
||||
),
|
||||
"changes": {"data": {"action": "make=peace"}},
|
||||
"name": "settings",
|
||||
"result": True,
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_configmap_absent__noop_test_true(self):
|
||||
# Nothing to delete with test=True
|
||||
with self.mock_func("show_configmap", return_value=None, test=True):
|
||||
actual = kubernetes.configmap_absent(name="NOT_FOUND")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"comment": "The configmap does not exist",
|
||||
"changes": {},
|
||||
"name": "NOT_FOUND",
|
||||
"result": None,
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_configmap_absent__test_true(self):
|
||||
# Configmap exists with test=True
|
||||
cm = self.make_configmap(name="deleteme", namespace="default")
|
||||
with self.mock_func("show_configmap", return_value=cm, test=True):
|
||||
actual = kubernetes.configmap_absent(name="deleteme")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"comment": "The configmap is going to be deleted",
|
||||
"changes": {},
|
||||
"name": "deleteme",
|
||||
"result": None,
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_configmap_absent__noop(self):
|
||||
# Nothing to delete
|
||||
with self.mock_func("show_configmap", return_value=None):
|
||||
actual = kubernetes.configmap_absent(name="NOT_FOUND")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"comment": "The configmap does not exist",
|
||||
"changes": {},
|
||||
"name": "NOT_FOUND",
|
||||
"result": True,
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_configmap_absent(self):
|
||||
# Configmap exists, delete it!
|
||||
cm = self.make_configmap(name="deleteme", namespace="default")
|
||||
with self.mock_func("show_configmap", return_value=cm):
|
||||
# The return from this module isn't used in the state
|
||||
with self.mock_func("delete_configmap", return_value={}):
|
||||
actual = kubernetes.configmap_absent(name="deleteme")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"comment": "ConfigMap deleted",
|
||||
"changes": {
|
||||
"kubernetes.configmap": {
|
||||
"new": "absent",
|
||||
"old": "present",
|
||||
},
|
||||
},
|
||||
"name": "deleteme",
|
||||
"result": True,
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_secret_present__fail(self):
|
||||
actual = kubernetes.secret_present(
|
||||
name="sekret",
|
||||
data={"password": "monk3y"},
|
||||
source="salt://nope.jinja",
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": False,
|
||||
"name": "sekret",
|
||||
"comment": "'source' cannot be used in combination with 'data'",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_secret_present__exists_test_true(self):
|
||||
secret = self.make_secret(name="sekret")
|
||||
new_secret = secret.copy()
|
||||
new_secret.update({"data": {"password": "uncle"}})
|
||||
# Secret exists already and needs replacing with test=True
|
||||
with self.mock_func("show_secret", return_value=secret):
|
||||
with self.mock_func("replace_secret", return_value=new_secret, test=True):
|
||||
actual = kubernetes.secret_present(
|
||||
name="sekret",
|
||||
data={"password": "uncle"},
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "sekret",
|
||||
"comment": "The secret is going to be replaced",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_secret_present__exists(self):
|
||||
# Secret exists and gets replaced
|
||||
secret = self.make_secret(name="sekret", data={"password": "booyah"})
|
||||
with self.mock_func("show_secret", return_value=secret):
|
||||
with self.mock_func("replace_secret", return_value=secret):
|
||||
actual = kubernetes.secret_present(
|
||||
name="sekret",
|
||||
data={"password": "booyah"},
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {"data": ["password"]},
|
||||
"result": True,
|
||||
"name": "sekret",
|
||||
"comment": "The secret is already present. Forcing recreation",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_secret_present__create(self):
|
||||
# Secret exists and gets replaced
|
||||
secret = self.make_secret(name="sekret", data={"password": "booyah"})
|
||||
with self.mock_func("show_secret", return_value=None):
|
||||
with self.mock_func("create_secret", return_value=secret):
|
||||
actual = kubernetes.secret_present(
|
||||
name="sekret",
|
||||
data={"password": "booyah"},
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {"data": ["password"]},
|
||||
"result": True,
|
||||
"name": "sekret",
|
||||
"comment": "",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_secret_present__create_no_data(self):
|
||||
# Secret exists and gets replaced
|
||||
secret = self.make_secret(name="sekret")
|
||||
with self.mock_func("show_secret", return_value=None):
|
||||
with self.mock_func("create_secret", return_value=secret):
|
||||
actual = kubernetes.secret_present(name="sekret")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {"data": []},
|
||||
"result": True,
|
||||
"name": "sekret",
|
||||
"comment": "",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_secret_present__create_test_true(self):
|
||||
# Secret exists and gets replaced with test=True
|
||||
secret = self.make_secret(name="sekret")
|
||||
with self.mock_func("show_secret", return_value=None):
|
||||
with self.mock_func("create_secret", return_value=secret, test=True):
|
||||
actual = kubernetes.secret_present(name="sekret")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "sekret",
|
||||
"comment": "The secret is going to be created",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_secret_absent__noop_test_true(self):
|
||||
with self.mock_func("show_secret", return_value=None, test=True):
|
||||
actual = kubernetes.secret_absent(name="sekret")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "sekret",
|
||||
"comment": "The secret does not exist",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_secret_absent__noop(self):
|
||||
with self.mock_func("show_secret", return_value=None):
|
||||
actual = kubernetes.secret_absent(name="passwords")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": True,
|
||||
"name": "passwords",
|
||||
"comment": "The secret does not exist",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_secret_absent__delete_test_true(self):
|
||||
secret = self.make_secret(name="credentials", data={"redis": "letmein"})
|
||||
with self.mock_func("show_secret", return_value=secret):
|
||||
with self.mock_func("delete_secret", return_value=secret, test=True):
|
||||
actual = kubernetes.secret_absent(name="credentials")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "credentials",
|
||||
"comment": "The secret is going to be deleted",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_secret_absent__delete(self):
|
||||
secret = self.make_secret(name="foobar", data={"redis": "letmein"})
|
||||
deleted = {
|
||||
"status": None,
|
||||
"kind": "Secret",
|
||||
"code": None,
|
||||
"reason": None,
|
||||
"details": None,
|
||||
"message": None,
|
||||
"api_version": "v1",
|
||||
"metadata": {
|
||||
"self_link": "/api/v1/namespaces/default/secrets/foobar",
|
||||
"resource_version": "30292",
|
||||
},
|
||||
}
|
||||
with self.mock_func("show_secret", return_value=secret):
|
||||
with self.mock_func("delete_secret", return_value=deleted):
|
||||
actual = kubernetes.secret_absent(name="foobar")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {
|
||||
"kubernetes.secret": {"new": "absent", "old": "present"},
|
||||
},
|
||||
"result": True,
|
||||
"name": "foobar",
|
||||
"comment": "Secret deleted",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_node_label_present__add_test_true(self):
|
||||
labels = self.make_node_labels()
|
||||
with self.mock_func("node_labels", return_value=labels, test=True):
|
||||
actual = kubernetes.node_label_present(
|
||||
name="com.zoo-animal",
|
||||
node="minikube",
|
||||
value="monkey",
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "com.zoo-animal",
|
||||
"comment": "The label is going to be set",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_node_label_present__add(self):
|
||||
node_data = self.make_node()
|
||||
# Remove some of the defaults to make it simpler
|
||||
node_data["metadata"]["labels"] = {
|
||||
"beta.kubernetes.io/os": "linux",
|
||||
}
|
||||
labels = node_data["metadata"]["labels"]
|
||||
|
||||
with self.mock_func("node_labels", return_value=labels):
|
||||
with self.mock_func("node_add_label", return_value=node_data):
|
||||
actual = kubernetes.node_label_present(
|
||||
name="failure-domain.beta.kubernetes.io/zone",
|
||||
node="minikube",
|
||||
value="us-central1-a",
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"comment": "",
|
||||
"changes": {
|
||||
"minikube.failure-domain.beta.kubernetes.io/zone": {
|
||||
"new": {
|
||||
"failure-domain.beta.kubernetes.io/zone": (
|
||||
"us-central1-a"
|
||||
),
|
||||
"beta.kubernetes.io/os": "linux",
|
||||
},
|
||||
"old": {"beta.kubernetes.io/os": "linux"},
|
||||
},
|
||||
},
|
||||
"name": "failure-domain.beta.kubernetes.io/zone",
|
||||
"result": True,
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_node_label_present__already_set(self):
|
||||
node_data = self.make_node()
|
||||
labels = node_data["metadata"]["labels"]
|
||||
with self.mock_func("node_labels", return_value=labels):
|
||||
with self.mock_func("node_add_label", return_value=node_data):
|
||||
actual = kubernetes.node_label_present(
|
||||
name="failure-domain.beta.kubernetes.io/region",
|
||||
node="minikube",
|
||||
value="us-west-1",
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": True,
|
||||
"name": "failure-domain.beta.kubernetes.io/region",
|
||||
"comment": (
|
||||
"The label is already set and has the specified value"
|
||||
),
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_node_label_present__update_test_true(self):
|
||||
node_data = self.make_node()
|
||||
labels = node_data["metadata"]["labels"]
|
||||
with self.mock_func("node_labels", return_value=labels):
|
||||
with self.mock_func("node_add_label", return_value=node_data, test=True):
|
||||
actual = kubernetes.node_label_present(
|
||||
name="failure-domain.beta.kubernetes.io/region",
|
||||
node="minikube",
|
||||
value="us-east-1",
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "failure-domain.beta.kubernetes.io/region",
|
||||
"comment": "The label is going to be updated",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_node_label_present__update(self):
|
||||
node_data = self.make_node()
|
||||
# Remove some of the defaults to make it simpler
|
||||
node_data["metadata"]["labels"] = {
|
||||
"failure-domain.beta.kubernetes.io/region": "us-west-1",
|
||||
}
|
||||
labels = node_data["metadata"]["labels"]
|
||||
with self.mock_func("node_labels", return_value=labels):
|
||||
with self.mock_func("node_add_label", return_value=node_data):
|
||||
actual = kubernetes.node_label_present(
|
||||
name="failure-domain.beta.kubernetes.io/region",
|
||||
node="minikube",
|
||||
value="us-east-1",
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {
|
||||
"minikube.failure-domain.beta.kubernetes.io/region": {
|
||||
"new": {
|
||||
"failure-domain.beta.kubernetes.io/region": (
|
||||
"us-east-1"
|
||||
)
|
||||
},
|
||||
"old": {
|
||||
"failure-domain.beta.kubernetes.io/region": (
|
||||
"us-west-1"
|
||||
)
|
||||
},
|
||||
}
|
||||
},
|
||||
"result": True,
|
||||
"name": "failure-domain.beta.kubernetes.io/region",
|
||||
"comment": "The label is already set, changing the value",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_node_label_absent__noop_test_true(self):
|
||||
labels = self.make_node_labels()
|
||||
with self.mock_func("node_labels", return_value=labels, test=True):
|
||||
actual = kubernetes.node_label_absent(
|
||||
name="non-existent-label",
|
||||
node="minikube",
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "non-existent-label",
|
||||
"comment": "The label does not exist",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_node_label_absent__noop(self):
|
||||
labels = self.make_node_labels()
|
||||
with self.mock_func("node_labels", return_value=labels):
|
||||
actual = kubernetes.node_label_absent(
|
||||
name="non-existent-label",
|
||||
node="minikube",
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": True,
|
||||
"name": "non-existent-label",
|
||||
"comment": "The label does not exist",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_node_label_absent__delete_test_true(self):
|
||||
labels = self.make_node_labels()
|
||||
with self.mock_func("node_labels", return_value=labels, test=True):
|
||||
actual = kubernetes.node_label_absent(
|
||||
name="failure-domain.beta.kubernetes.io/region",
|
||||
node="minikube",
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "failure-domain.beta.kubernetes.io/region",
|
||||
"comment": "The label is going to be deleted",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_node_label_absent__delete(self):
|
||||
node_data = self.make_node()
|
||||
labels = node_data["metadata"]["labels"].copy()
|
||||
|
||||
node_data["metadata"]["labels"].pop("failure-domain.beta.kubernetes.io/region")
|
||||
|
||||
with self.mock_func("node_labels", return_value=labels):
|
||||
with self.mock_func("node_remove_label", return_value=node_data):
|
||||
actual = kubernetes.node_label_absent(
|
||||
name="failure-domain.beta.kubernetes.io/region",
|
||||
node="minikube",
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"result": True,
|
||||
"changes": {
|
||||
"kubernetes.node_label": {
|
||||
"new": "absent",
|
||||
"old": "present",
|
||||
}
|
||||
},
|
||||
"comment": "Label removed from node",
|
||||
"name": "failure-domain.beta.kubernetes.io/region",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_namespace_present__create_test_true(self):
|
||||
with self.mock_func("show_namespace", return_value=None, test=True):
|
||||
actual = kubernetes.namespace_present(name="saltstack")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "saltstack",
|
||||
"comment": "The namespace is going to be created",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_namespace_present__create(self):
|
||||
namespace_data = self.make_namespace(name="saltstack")
|
||||
with self.mock_func("show_namespace", return_value=None):
|
||||
with self.mock_func("create_namespace", return_value=namespace_data):
|
||||
actual = kubernetes.namespace_present(name="saltstack")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {"namespace": {"new": namespace_data, "old": {}}},
|
||||
"result": True,
|
||||
"name": "saltstack",
|
||||
"comment": "",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_namespace_present__noop_test_true(self):
|
||||
namespace_data = self.make_namespace(name="saltstack")
|
||||
with self.mock_func("show_namespace", return_value=namespace_data, test=True):
|
||||
actual = kubernetes.namespace_present(name="saltstack")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "saltstack",
|
||||
"comment": "The namespace already exists",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_namespace_present__noop(self):
|
||||
namespace_data = self.make_namespace(name="saltstack")
|
||||
with self.mock_func("show_namespace", return_value=namespace_data):
|
||||
actual = kubernetes.namespace_present(name="saltstack")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": True,
|
||||
"name": "saltstack",
|
||||
"comment": "The namespace already exists",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_namespace_absent__noop_test_true(self):
|
||||
with self.mock_func("show_namespace", return_value=None, test=True):
|
||||
actual = kubernetes.namespace_absent(name="salt")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "salt",
|
||||
"comment": "The namespace does not exist",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_namespace_absent__noop(self):
|
||||
with self.mock_func("show_namespace", return_value=None):
|
||||
actual = kubernetes.namespace_absent(name="salt")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": True,
|
||||
"name": "salt",
|
||||
"comment": "The namespace does not exist",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_namespace_absent__delete_test_true(self):
|
||||
namespace_data = self.make_namespace(name="salt")
|
||||
with self.mock_func("show_namespace", return_value=namespace_data, test=True):
|
||||
actual = kubernetes.namespace_absent(name="salt")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": None,
|
||||
"name": "salt",
|
||||
"comment": "The namespace is going to be deleted",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_namespace_absent__delete_code_200(self):
|
||||
namespace_data = self.make_namespace(name="salt")
|
||||
deleted = namespace_data.copy()
|
||||
deleted["code"] = 200
|
||||
deleted.update({"code": 200, "message": None})
|
||||
with self.mock_func("show_namespace", return_value=namespace_data):
|
||||
with self.mock_func("delete_namespace", return_value=deleted):
|
||||
actual = kubernetes.namespace_absent(name="salt")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {
|
||||
"kubernetes.namespace": {"new": "absent", "old": "present"}
|
||||
},
|
||||
"result": True,
|
||||
"name": "salt",
|
||||
"comment": "Terminating",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_namespace_absent__delete_status_terminating(self):
|
||||
namespace_data = self.make_namespace(name="salt")
|
||||
deleted = namespace_data.copy()
|
||||
deleted.update(
|
||||
{
|
||||
"code": None,
|
||||
"status": "Terminating namespace",
|
||||
"message": "Terminating this shizzzle yo",
|
||||
}
|
||||
)
|
||||
with self.mock_func("show_namespace", return_value=namespace_data):
|
||||
with self.mock_func("delete_namespace", return_value=deleted):
|
||||
actual = kubernetes.namespace_absent(name="salt")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {
|
||||
"kubernetes.namespace": {"new": "absent", "old": "present"}
|
||||
},
|
||||
"result": True,
|
||||
"name": "salt",
|
||||
"comment": "Terminating this shizzzle yo",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_namespace_absent__delete_status_phase_terminating(self):
|
||||
# This is what kubernetes 1.8.0 looks like when deleting namespaces
|
||||
namespace_data = self.make_namespace(name="salt")
|
||||
deleted = namespace_data.copy()
|
||||
deleted.update(
|
||||
{"code": None, "message": None, "status": {"phase": "Terminating"}}
|
||||
)
|
||||
with self.mock_func("show_namespace", return_value=namespace_data):
|
||||
with self.mock_func("delete_namespace", return_value=deleted):
|
||||
actual = kubernetes.namespace_absent(name="salt")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {
|
||||
"kubernetes.namespace": {"new": "absent", "old": "present"}
|
||||
},
|
||||
"result": True,
|
||||
"name": "salt",
|
||||
"comment": "Terminating",
|
||||
},
|
||||
actual,
|
||||
)
|
||||
|
||||
def test_namespace_absent__delete_error(self):
|
||||
namespace_data = self.make_namespace(name="salt")
|
||||
deleted = namespace_data.copy()
|
||||
deleted.update({"code": 418, "message": "I' a teapot!", "status": None})
|
||||
with self.mock_func("show_namespace", return_value=namespace_data):
|
||||
with self.mock_func("delete_namespace", return_value=deleted):
|
||||
actual = kubernetes.namespace_absent(name="salt")
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"changes": {},
|
||||
"result": False,
|
||||
"name": "salt",
|
||||
"comment": "Something went wrong, response: {}".format(
|
||||
deleted,
|
||||
),
|
||||
},
|
||||
actual,
|
||||
)
|
|
@ -1,110 +0,0 @@
|
|||
"""
|
||||
:codeauthor: :email: `Mike Place <mp@saltstack.com>`
|
||||
"""
|
||||
|
||||
|
||||
import io
|
||||
|
||||
from salt import template
|
||||
from tests.support.mock import MagicMock
|
||||
from tests.support.unit import TestCase
|
||||
|
||||
|
||||
class TemplateTestCase(TestCase):
|
||||
|
||||
render_dict = {
|
||||
"jinja": "fake_jinja_func",
|
||||
"json": "fake_json_func",
|
||||
"mako": "fake_make_func",
|
||||
}
|
||||
|
||||
def test_compile_template_bad_type(self):
|
||||
"""
|
||||
Test to ensure that unsupported types cannot be passed to the template compiler
|
||||
"""
|
||||
ret = template.compile_template(["1", "2", "3"], None, None, None, None)
|
||||
self.assertDictEqual(ret, {})
|
||||
|
||||
def test_compile_template_preserves_windows_newlines(self):
|
||||
"""
|
||||
Test to ensure that a file with Windows newlines, when rendered by a
|
||||
template renderer, does not eat the CR character.
|
||||
"""
|
||||
|
||||
def _get_rend(renderer, value):
|
||||
"""
|
||||
We need a new MagicMock each time since we're dealing with StringIO
|
||||
objects which are read like files.
|
||||
"""
|
||||
return {renderer: MagicMock(return_value=io.StringIO(value))}
|
||||
|
||||
input_data_windows = "foo\r\nbar\r\nbaz\r\n"
|
||||
input_data_non_windows = input_data_windows.replace("\r\n", "\n")
|
||||
renderer = "test"
|
||||
blacklist = whitelist = []
|
||||
|
||||
ret = template.compile_template(
|
||||
":string:",
|
||||
_get_rend(renderer, input_data_non_windows),
|
||||
renderer,
|
||||
blacklist,
|
||||
whitelist,
|
||||
input_data=input_data_windows,
|
||||
).read()
|
||||
# Even though the mocked renderer returned a string without the windows
|
||||
# newlines, the compiled template should still have them.
|
||||
self.assertEqual(ret, input_data_windows)
|
||||
|
||||
# Now test that we aren't adding them in unnecessarily.
|
||||
ret = template.compile_template(
|
||||
":string:",
|
||||
_get_rend(renderer, input_data_non_windows),
|
||||
renderer,
|
||||
blacklist,
|
||||
whitelist,
|
||||
input_data=input_data_non_windows,
|
||||
).read()
|
||||
self.assertEqual(ret, input_data_non_windows)
|
||||
|
||||
# Finally, ensure that we're not unnecessarily replacing the \n with
|
||||
# \r\n in the event that the renderer returned a string with the
|
||||
# windows newlines intact.
|
||||
ret = template.compile_template(
|
||||
":string:",
|
||||
_get_rend(renderer, input_data_windows),
|
||||
renderer,
|
||||
blacklist,
|
||||
whitelist,
|
||||
input_data=input_data_windows,
|
||||
).read()
|
||||
self.assertEqual(ret, input_data_windows)
|
||||
|
||||
def test_check_render_pipe_str(self):
|
||||
"""
|
||||
Check that all renderers specified in the pipe string are available.
|
||||
"""
|
||||
ret = template.check_render_pipe_str("jinja|json", self.render_dict, None, None)
|
||||
self.assertIn(("fake_jinja_func", ""), ret)
|
||||
self.assertIn(("fake_json_func", ""), ret)
|
||||
self.assertNotIn(("OBVIOUSLY_NOT_HERE", ""), ret)
|
||||
|
||||
def test_check_renderer_blacklisting(self):
|
||||
"""
|
||||
Check that all renderers specified in the pipe string are available.
|
||||
"""
|
||||
ret = template.check_render_pipe_str(
|
||||
"jinja|json", self.render_dict, ["jinja"], None
|
||||
)
|
||||
self.assertListEqual([("fake_json_func", "")], ret)
|
||||
ret = template.check_render_pipe_str(
|
||||
"jinja|json", self.render_dict, None, ["jinja"]
|
||||
)
|
||||
self.assertListEqual([("fake_jinja_func", "")], ret)
|
||||
ret = template.check_render_pipe_str(
|
||||
"jinja|json", self.render_dict, ["jinja"], ["jinja"]
|
||||
)
|
||||
self.assertListEqual([], ret)
|
||||
ret = template.check_render_pipe_str(
|
||||
"jinja|json", self.render_dict, ["jinja"], ["jinja", "json"]
|
||||
)
|
||||
self.assertListEqual([("fake_json_func", "")], ret)
|
|
@ -1,440 +0,0 @@
|
|||
"""
|
||||
Unit tests for salt.utils.templates.py
|
||||
"""
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from collections import OrderedDict
|
||||
from pathlib import PurePath, PurePosixPath
|
||||
|
||||
import pytest
|
||||
|
||||
import salt.utils.files
|
||||
import salt.utils.templates
|
||||
from tests.support.helpers import with_tempdir
|
||||
from tests.support.mock import patch
|
||||
from tests.support.unit import TestCase
|
||||
|
||||
try:
|
||||
import Cheetah as _
|
||||
|
||||
HAS_CHEETAH = True
|
||||
except ImportError:
|
||||
HAS_CHEETAH = False
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RenderTestCase(TestCase):
|
||||
def setUp(self):
|
||||
# Default context for salt.utils.templates.render_*_tmpl to work
|
||||
self.context = {
|
||||
"opts": {"cachedir": "/D", "__cli": "salt"},
|
||||
"saltenv": None,
|
||||
}
|
||||
|
||||
### Tests for Jinja (whitespace-friendly)
|
||||
def test_render_jinja_sanity(self):
|
||||
tmpl = """OK"""
|
||||
res = salt.utils.templates.render_jinja_tmpl(tmpl, dict(self.context))
|
||||
self.assertEqual(res, "OK")
|
||||
|
||||
def test_render_jinja_evaluate(self):
|
||||
tmpl = """{{ "OK" }}"""
|
||||
res = salt.utils.templates.render_jinja_tmpl(tmpl, dict(self.context))
|
||||
self.assertEqual(res, "OK")
|
||||
|
||||
def test_render_jinja_evaluate_multi(self):
|
||||
tmpl = """{% if 1 -%}OK{%- endif %}"""
|
||||
res = salt.utils.templates.render_jinja_tmpl(tmpl, dict(self.context))
|
||||
self.assertEqual(res, "OK")
|
||||
|
||||
def test_render_jinja_variable(self):
|
||||
tmpl = """{{ var }}"""
|
||||
|
||||
ctx = dict(self.context)
|
||||
ctx["var"] = "OK"
|
||||
res = salt.utils.templates.render_jinja_tmpl(tmpl, ctx)
|
||||
self.assertEqual(res, "OK")
|
||||
|
||||
def test_render_jinja_tojson_sorted(self):
|
||||
templ = """thing: {{ var|tojson(sort_keys=True) }}"""
|
||||
expected = """thing: {"x": "xxx", "y": "yyy", "z": "zzz"}"""
|
||||
|
||||
with patch.dict(self.context, {"var": {"z": "zzz", "y": "yyy", "x": "xxx"}}):
|
||||
res = salt.utils.templates.render_jinja_tmpl(templ, self.context)
|
||||
|
||||
assert res == expected
|
||||
|
||||
def test_render_jinja_tojson_unsorted(self):
|
||||
templ = """thing: {{ var|tojson(sort_keys=False) }}"""
|
||||
expected = """thing: {"z": "zzz", "x": "xxx", "y": "yyy"}"""
|
||||
|
||||
# Values must be added to the dict in the expected order. This is
|
||||
# only necessary for older Pythons that don't remember dict order.
|
||||
d = OrderedDict()
|
||||
d["z"] = "zzz"
|
||||
d["x"] = "xxx"
|
||||
d["y"] = "yyy"
|
||||
|
||||
with patch.dict(self.context, {"var": d}):
|
||||
res = salt.utils.templates.render_jinja_tmpl(templ, self.context)
|
||||
|
||||
assert res == expected
|
||||
|
||||
### Tests for mako template
|
||||
def test_render_mako_sanity(self):
|
||||
tmpl = """OK"""
|
||||
res = salt.utils.templates.render_mako_tmpl(tmpl, dict(self.context))
|
||||
self.assertEqual(res, "OK")
|
||||
|
||||
def test_render_mako_evaluate(self):
|
||||
tmpl = """${ "OK" }"""
|
||||
res = salt.utils.templates.render_mako_tmpl(tmpl, dict(self.context))
|
||||
self.assertEqual(res, "OK")
|
||||
|
||||
def test_render_mako_evaluate_multi(self):
|
||||
tmpl = """
|
||||
% if 1:
|
||||
OK
|
||||
% endif
|
||||
"""
|
||||
res = salt.utils.templates.render_mako_tmpl(tmpl, dict(self.context))
|
||||
stripped = res.strip()
|
||||
self.assertEqual(stripped, "OK")
|
||||
|
||||
def test_render_mako_variable(self):
|
||||
tmpl = """${ var }"""
|
||||
|
||||
ctx = dict(self.context)
|
||||
ctx["var"] = "OK"
|
||||
res = salt.utils.templates.render_mako_tmpl(tmpl, ctx)
|
||||
self.assertEqual(res, "OK")
|
||||
|
||||
### Tests for wempy template
|
||||
@pytest.mark.skipif(
|
||||
sys.version_info > (3,),
|
||||
reason="The wempy module is currently unsupported under Python3",
|
||||
)
|
||||
def test_render_wempy_sanity(self):
|
||||
tmpl = """OK"""
|
||||
res = salt.utils.templates.render_wempy_tmpl(tmpl, dict(self.context))
|
||||
self.assertEqual(res, "OK")
|
||||
|
||||
@pytest.mark.skipif(
|
||||
sys.version_info > (3,),
|
||||
reason="The wempy module is currently unsupported under Python3",
|
||||
)
|
||||
def test_render_wempy_evaluate(self):
|
||||
tmpl = """{{="OK"}}"""
|
||||
res = salt.utils.templates.render_wempy_tmpl(tmpl, dict(self.context))
|
||||
self.assertEqual(res, "OK")
|
||||
|
||||
@pytest.mark.skipif(
|
||||
sys.version_info > (3,),
|
||||
reason="The wempy module is currently unsupported under Python3",
|
||||
)
|
||||
def test_render_wempy_evaluate_multi(self):
|
||||
tmpl = """{{if 1:}}OK{{pass}}"""
|
||||
res = salt.utils.templates.render_wempy_tmpl(tmpl, dict(self.context))
|
||||
self.assertEqual(res, "OK")
|
||||
|
||||
@pytest.mark.skipif(
|
||||
sys.version_info > (3,),
|
||||
reason="The wempy module is currently unsupported under Python3",
|
||||
)
|
||||
def test_render_wempy_variable(self):
|
||||
tmpl = """{{=var}}"""
|
||||
|
||||
ctx = dict(self.context)
|
||||
ctx["var"] = "OK"
|
||||
res = salt.utils.templates.render_wempy_tmpl(tmpl, ctx)
|
||||
self.assertEqual(res, "OK")
|
||||
|
||||
### Tests for genshi template (xml-based)
|
||||
def test_render_genshi_sanity(self):
|
||||
tmpl = """<RU>OK</RU>"""
|
||||
res = salt.utils.templates.render_genshi_tmpl(tmpl, dict(self.context))
|
||||
self.assertEqual(res, "<RU>OK</RU>")
|
||||
|
||||
def test_render_genshi_evaluate(self):
|
||||
tmpl = """<RU>${ "OK" }</RU>"""
|
||||
res = salt.utils.templates.render_genshi_tmpl(tmpl, dict(self.context))
|
||||
self.assertEqual(res, "<RU>OK</RU>")
|
||||
|
||||
def test_render_genshi_evaluate_condition(self):
|
||||
tmpl = """<RU xmlns:py="http://genshi.edgewall.org/" py:if="1">OK</RU>"""
|
||||
res = salt.utils.templates.render_genshi_tmpl(tmpl, dict(self.context))
|
||||
self.assertEqual(res, "<RU>OK</RU>")
|
||||
|
||||
def test_render_genshi_variable(self):
|
||||
tmpl = """<RU>$var</RU>"""
|
||||
|
||||
ctx = dict(self.context)
|
||||
ctx["var"] = "OK"
|
||||
res = salt.utils.templates.render_genshi_tmpl(tmpl, ctx)
|
||||
self.assertEqual(res, "<RU>OK</RU>")
|
||||
|
||||
def test_render_genshi_variable_replace(self):
|
||||
tmpl = """<RU xmlns:py="http://genshi.edgewall.org/" py:content="var">not ok</RU>"""
|
||||
|
||||
ctx = dict(self.context)
|
||||
ctx["var"] = "OK"
|
||||
res = salt.utils.templates.render_genshi_tmpl(tmpl, ctx)
|
||||
self.assertEqual(res, "<RU>OK</RU>")
|
||||
|
||||
### Tests for cheetah template (line-oriented and xml-friendly)
|
||||
@pytest.mark.skipif(not HAS_CHEETAH, reason="The Cheetah Python module is missing.")
|
||||
def test_render_cheetah_sanity(self):
|
||||
tmpl = """OK"""
|
||||
res = salt.utils.templates.render_cheetah_tmpl(tmpl, dict(self.context))
|
||||
self.assertEqual(res, "OK")
|
||||
|
||||
@pytest.mark.skipif(not HAS_CHEETAH, reason="The Cheetah Python module is missing.")
|
||||
def test_render_cheetah_evaluate(self):
|
||||
tmpl = """<%="OK"%>"""
|
||||
res = salt.utils.templates.render_cheetah_tmpl(tmpl, dict(self.context))
|
||||
self.assertEqual(res, "OK")
|
||||
|
||||
@pytest.mark.skipif(not HAS_CHEETAH, reason="The Cheetah Python module is missing.")
|
||||
def test_render_cheetah_evaluate_xml(self):
|
||||
tmpl = """
|
||||
<% if 1: %>
|
||||
OK
|
||||
<% pass %>
|
||||
"""
|
||||
res = salt.utils.templates.render_cheetah_tmpl(tmpl, dict(self.context))
|
||||
stripped = res.strip()
|
||||
self.assertEqual(stripped, "OK")
|
||||
|
||||
@pytest.mark.skipif(not HAS_CHEETAH, reason="The Cheetah Python module is missing.")
|
||||
def test_render_cheetah_evaluate_text(self):
|
||||
tmpl = """
|
||||
#if 1
|
||||
OK
|
||||
#end if
|
||||
"""
|
||||
|
||||
res = salt.utils.templates.render_cheetah_tmpl(tmpl, dict(self.context))
|
||||
stripped = res.strip()
|
||||
self.assertEqual(stripped, "OK")
|
||||
|
||||
@pytest.mark.skipif(not HAS_CHEETAH, reason="The Cheetah Python module is missing.")
|
||||
def test_render_cheetah_variable(self):
|
||||
tmpl = """$var"""
|
||||
|
||||
ctx = dict(self.context)
|
||||
ctx["var"] = "OK"
|
||||
res = salt.utils.templates.render_cheetah_tmpl(tmpl, ctx)
|
||||
self.assertEqual(res.strip(), "OK")
|
||||
|
||||
def test_render_jinja_cve_2021_25283(self):
|
||||
tmpl = """{{ [].__class__ }}"""
|
||||
ctx = dict(self.context)
|
||||
ctx["var"] = "OK"
|
||||
with pytest.raises(salt.exceptions.SaltRenderError):
|
||||
res = salt.utils.templates.render_jinja_tmpl(tmpl, ctx)
|
||||
|
||||
|
||||
class MockRender:
|
||||
def __call__(self, tplstr, context, tmplpath=None):
|
||||
self.tplstr = tplstr
|
||||
self.context = context
|
||||
self.tmplpath = tmplpath
|
||||
return tplstr
|
||||
|
||||
|
||||
class WrapRenderTestCase(TestCase):
|
||||
def assertDictContainsAll(self, actual, **expected):
|
||||
"""Make sure dictionary contains at least all expected values"""
|
||||
actual = {key: actual[key] for key in expected if key in actual}
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def _test_generated_sls_context(self, tmplpath, sls, **expected):
|
||||
"""Generic SLS Context Test"""
|
||||
# DeNormalize tmplpath
|
||||
tmplpath = str(PurePath(PurePosixPath(tmplpath)))
|
||||
if tmplpath.startswith("\\"):
|
||||
tmplpath = "C:{}".format(tmplpath)
|
||||
expected["tplpath"] = tmplpath
|
||||
actual = salt.utils.templates.generate_sls_context(tmplpath, sls)
|
||||
self.assertDictContainsAll(actual, **expected)
|
||||
|
||||
@patch("salt.utils.templates.generate_sls_context")
|
||||
@with_tempdir()
|
||||
def test_sls_context_call(self, tempdir, generate_sls_context):
|
||||
"""Check that generate_sls_context is called with proper parameters"""
|
||||
sls = "foo.bar"
|
||||
tmplpath = "/tmp/foo/bar.sls"
|
||||
|
||||
slsfile = os.path.join(tempdir, "foo")
|
||||
with salt.utils.files.fopen(slsfile, "w") as fp:
|
||||
fp.write("{{ slspath }}")
|
||||
context = {"opts": {}, "saltenv": "base", "sls": sls}
|
||||
render = MockRender()
|
||||
wrapped = salt.utils.templates.wrap_tmpl_func(render)
|
||||
res = wrapped(slsfile, context=context, tmplpath=tmplpath)
|
||||
generate_sls_context.assert_called_with(tmplpath, sls)
|
||||
|
||||
@patch("salt.utils.templates.generate_sls_context")
|
||||
@with_tempdir()
|
||||
def test_sls_context_no_call(self, tempdir, generate_sls_context):
|
||||
"""Check that generate_sls_context is not called if sls is not set"""
|
||||
sls = "foo.bar"
|
||||
tmplpath = "/tmp/foo/bar.sls"
|
||||
|
||||
slsfile = os.path.join(tempdir, "foo")
|
||||
with salt.utils.files.fopen(slsfile, "w") as fp:
|
||||
fp.write("{{ slspath }}")
|
||||
context = {"opts": {}, "saltenv": "base"}
|
||||
render = MockRender()
|
||||
wrapped = salt.utils.templates.wrap_tmpl_func(render)
|
||||
res = wrapped(slsfile, context=context, tmplpath=tmplpath)
|
||||
generate_sls_context.assert_not_called()
|
||||
|
||||
def test_generate_sls_context__top_level(self):
|
||||
"""generate_sls_context - top_level Use case"""
|
||||
self._test_generated_sls_context(
|
||||
"/tmp/boo.sls",
|
||||
"boo",
|
||||
tplfile="boo.sls",
|
||||
tpldir=".",
|
||||
tpldot="",
|
||||
slsdotpath="",
|
||||
slscolonpath="",
|
||||
sls_path="",
|
||||
slspath="",
|
||||
)
|
||||
|
||||
def test_generate_sls_context__one_level_init_implicit(self):
|
||||
"""generate_sls_context - Basic one level with implicit init.sls"""
|
||||
self._test_generated_sls_context(
|
||||
"/tmp/foo/init.sls",
|
||||
"foo",
|
||||
tplfile="foo/init.sls",
|
||||
tpldir="foo",
|
||||
tpldot="foo",
|
||||
slsdotpath="foo",
|
||||
slscolonpath="foo",
|
||||
sls_path="foo",
|
||||
slspath="foo",
|
||||
)
|
||||
|
||||
def test_generate_sls_context__one_level_init_explicit(self):
|
||||
"""generate_sls_context - Basic one level with explicit init.sls"""
|
||||
self._test_generated_sls_context(
|
||||
"/tmp/foo/init.sls",
|
||||
"foo.init",
|
||||
tplfile="foo/init.sls",
|
||||
tpldir="foo",
|
||||
tpldot="foo",
|
||||
slsdotpath="foo",
|
||||
slscolonpath="foo",
|
||||
sls_path="foo",
|
||||
slspath="foo",
|
||||
)
|
||||
|
||||
def test_generate_sls_context__one_level(self):
|
||||
"""generate_sls_context - Basic one level with name"""
|
||||
self._test_generated_sls_context(
|
||||
"/tmp/foo/boo.sls",
|
||||
"foo.boo",
|
||||
tplfile="foo/boo.sls",
|
||||
tpldir="foo",
|
||||
tpldot="foo",
|
||||
slsdotpath="foo",
|
||||
slscolonpath="foo",
|
||||
sls_path="foo",
|
||||
slspath="foo",
|
||||
)
|
||||
|
||||
def test_generate_sls_context__one_level_repeating(self):
|
||||
"""generate_sls_context - Basic one level with name same as dir
|
||||
|
||||
(Issue #56410)
|
||||
"""
|
||||
self._test_generated_sls_context(
|
||||
"/tmp/foo/foo.sls",
|
||||
"foo.foo",
|
||||
tplfile="foo/foo.sls",
|
||||
tpldir="foo",
|
||||
tpldot="foo",
|
||||
slsdotpath="foo",
|
||||
slscolonpath="foo",
|
||||
sls_path="foo",
|
||||
slspath="foo",
|
||||
)
|
||||
|
||||
def test_generate_sls_context__two_level_init_implicit(self):
|
||||
"""generate_sls_context - Basic two level with implicit init.sls"""
|
||||
self._test_generated_sls_context(
|
||||
"/tmp/foo/bar/init.sls",
|
||||
"foo.bar",
|
||||
tplfile="foo/bar/init.sls",
|
||||
tpldir="foo/bar",
|
||||
tpldot="foo.bar",
|
||||
slsdotpath="foo.bar",
|
||||
slscolonpath="foo:bar",
|
||||
sls_path="foo_bar",
|
||||
slspath="foo/bar",
|
||||
)
|
||||
|
||||
def test_generate_sls_context__two_level_init_explicit(self):
|
||||
"""generate_sls_context - Basic two level with explicit init.sls"""
|
||||
self._test_generated_sls_context(
|
||||
"/tmp/foo/bar/init.sls",
|
||||
"foo.bar.init",
|
||||
tplfile="foo/bar/init.sls",
|
||||
tpldir="foo/bar",
|
||||
tpldot="foo.bar",
|
||||
slsdotpath="foo.bar",
|
||||
slscolonpath="foo:bar",
|
||||
sls_path="foo_bar",
|
||||
slspath="foo/bar",
|
||||
)
|
||||
|
||||
def test_generate_sls_context__two_level(self):
|
||||
"""generate_sls_context - Basic two level with name"""
|
||||
self._test_generated_sls_context(
|
||||
"/tmp/foo/bar/boo.sls",
|
||||
"foo.bar.boo",
|
||||
tplfile="foo/bar/boo.sls",
|
||||
tpldir="foo/bar",
|
||||
tpldot="foo.bar",
|
||||
slsdotpath="foo.bar",
|
||||
slscolonpath="foo:bar",
|
||||
sls_path="foo_bar",
|
||||
slspath="foo/bar",
|
||||
)
|
||||
|
||||
def test_generate_sls_context__two_level_repeating(self):
|
||||
"""generate_sls_context - Basic two level with name same as dir
|
||||
|
||||
(Issue #56410)
|
||||
"""
|
||||
self._test_generated_sls_context(
|
||||
"/tmp/foo/foo/foo.sls",
|
||||
"foo.foo.foo",
|
||||
tplfile="foo/foo/foo.sls",
|
||||
tpldir="foo/foo",
|
||||
tpldot="foo.foo",
|
||||
slsdotpath="foo.foo",
|
||||
slscolonpath="foo:foo",
|
||||
sls_path="foo_foo",
|
||||
slspath="foo/foo",
|
||||
)
|
||||
|
||||
@pytest.mark.skip_on_windows
|
||||
def test_generate_sls_context__backslash_in_path(self):
|
||||
"""generate_sls_context - Handle backslash in path on non-windows"""
|
||||
self._test_generated_sls_context(
|
||||
"/tmp/foo/foo\\foo.sls",
|
||||
"foo.foo\\foo",
|
||||
tplfile="foo/foo\\foo.sls",
|
||||
tpldir="foo",
|
||||
tpldot="foo",
|
||||
slsdotpath="foo",
|
||||
slscolonpath="foo",
|
||||
sls_path="foo",
|
||||
slspath="foo",
|
||||
)
|
Loading…
Add table
Reference in a new issue