Run pyupgrade of the modified files from the merge forward

Signed-off-by: Pedro Algarvio <palgarvio@vmware.com>
This commit is contained in:
Pedro Algarvio 2023-06-11 10:03:42 +01:00
parent 4886b6de2d
commit 5069c1f916
No known key found for this signature in database
GPG key ID: BB36BF6584A298FF
31 changed files with 621 additions and 695 deletions

36
salt/cache/consul.py vendored
View file

@ -119,33 +119,29 @@ def store(bank, key, data):
"""
Store a key value.
"""
c_key = "{}/{}".format(bank, key)
tstamp_key = "{}/{}{}".format(bank, key, _tstamp_suffix)
c_key = f"{bank}/{key}"
tstamp_key = f"{bank}/{key}{_tstamp_suffix}"
try:
c_data = salt.payload.dumps(data)
api.kv.put(c_key, c_data)
api.kv.put(tstamp_key, salt.payload.dumps(int(time.time())))
except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError(
"There was an error writing the key, {}: {}".format(c_key, exc)
)
raise SaltCacheError(f"There was an error writing the key, {c_key}: {exc}")
def fetch(bank, key):
"""
Fetch a key value.
"""
c_key = "{}/{}".format(bank, key)
c_key = f"{bank}/{key}"
try:
_, value = api.kv.get(c_key)
if value is None:
return {}
return salt.payload.loads(value["Value"])
except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError(
"There was an error reading the key, {}: {}".format(c_key, exc)
)
raise SaltCacheError(f"There was an error reading the key, {c_key}: {exc}")
def flush(bank, key=None):
@ -156,16 +152,14 @@ def flush(bank, key=None):
c_key = bank
tstamp_key = None
else:
c_key = "{}/{}".format(bank, key)
tstamp_key = "{}/{}{}".format(bank, key, _tstamp_suffix)
c_key = f"{bank}/{key}"
tstamp_key = f"{bank}/{key}{_tstamp_suffix}"
try:
if tstamp_key:
api.kv.delete(tstamp_key)
return api.kv.delete(c_key, recurse=key is None)
except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError(
"There was an error removing the key, {}: {}".format(c_key, exc)
)
raise SaltCacheError(f"There was an error removing the key, {c_key}: {exc}")
def list_(bank):
@ -175,9 +169,7 @@ def list_(bank):
try:
_, keys = api.kv.get(bank + "/", keys=True, separator="/")
except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError(
'There was an error getting the key "{}": {}'.format(bank, exc)
)
raise SaltCacheError(f'There was an error getting the key "{bank}": {exc}')
if keys is None:
keys = []
else:
@ -198,9 +190,7 @@ def contains(bank, key):
c_key = "{}/{}".format(bank, key or "")
_, value = api.kv.get(c_key, keys=True)
except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError(
"There was an error getting the key, {}: {}".format(c_key, exc)
)
raise SaltCacheError(f"There was an error getting the key, {c_key}: {exc}")
return value is not None
@ -209,13 +199,11 @@ def updated(bank, key):
Return the Unix Epoch timestamp of when the key was last updated. Return
None if key is not found.
"""
c_key = "{}/{}{}".format(bank, key, _tstamp_suffix)
c_key = f"{bank}/{key}{_tstamp_suffix}"
try:
_, value = api.kv.get(c_key)
if value is None:
return None
return salt.payload.loads(value["Value"])
except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError(
"There was an error reading the key, {}: {}".format(c_key, exc)
)
raise SaltCacheError(f"There was an error reading the key, {c_key}: {exc}")

View file

@ -421,7 +421,7 @@ def create(vm_):
__utils__["cloud.fire_event"](
"event",
"waiting for ssh",
"salt/cloud/{}/waiting_for_ssh".format(name),
f"salt/cloud/{name}/waiting_for_ssh",
sock_dir=__opts__["sock_dir"],
args={"ip_address": vm_["ssh_host"]},
transport=__opts__["transport"],

View file

@ -72,7 +72,7 @@ def _cert_file(name, cert_type):
"""
Return expected path of a Let's Encrypt live cert
"""
return os.path.join(LE_LIVE, name, "{}.pem".format(cert_type))
return os.path.join(LE_LIVE, name, f"{cert_type}.pem")
def _expires(name):
@ -88,9 +88,9 @@ def _expires(name):
expiry = __salt__["tls.cert_info"](cert_file).get("not_after", 0)
# Cobble it together using the openssl binary
else:
openssl_cmd = "openssl x509 -in {} -noout -enddate".format(cert_file)
openssl_cmd = f"openssl x509 -in {cert_file} -noout -enddate"
# No %e format on my Linux'es here
strptime_sux_cmd = 'date --date="$({} | cut -d= -f2)" +%s'.format(openssl_cmd)
strptime_sux_cmd = f'date --date="$({openssl_cmd} | cut -d= -f2)" +%s'
expiry = float(__salt__["cmd.shell"](strptime_sux_cmd, output_loglevel="quiet"))
# expiry = datetime.datetime.strptime(expiry.split('=', 1)[-1], '%b %e %H:%M:%S %Y %Z')
return datetime.datetime.fromtimestamp(expiry)
@ -195,10 +195,10 @@ def cert(
cmd.append("--renew-by-default")
renew = True
if server:
cmd.append("--server {}".format(server))
cmd.append(f"--server {server}")
if certname:
cmd.append("--cert-name {}".format(certname))
cmd.append(f"--cert-name {certname}")
if test_cert:
if server:
@ -211,41 +211,41 @@ def cert(
if webroot:
cmd.append("--authenticator webroot")
if webroot is not True:
cmd.append("--webroot-path {}".format(webroot))
cmd.append(f"--webroot-path {webroot}")
elif dns_plugin in supported_dns_plugins:
if dns_plugin == "cloudflare":
cmd.append("--dns-cloudflare")
cmd.append("--dns-cloudflare-credentials {}".format(dns_plugin_credentials))
cmd.append(f"--dns-cloudflare-credentials {dns_plugin_credentials}")
else:
return {
"result": False,
"comment": "DNS plugin '{}' is not supported".format(dns_plugin),
"comment": f"DNS plugin '{dns_plugin}' is not supported",
}
else:
cmd.append("--authenticator standalone")
if email:
cmd.append("--email {}".format(email))
cmd.append(f"--email {email}")
if keysize:
cmd.append("--rsa-key-size {}".format(keysize))
cmd.append(f"--rsa-key-size {keysize}")
cmd.append("--domains {}".format(name))
cmd.append(f"--domains {name}")
if aliases is not None:
for dns in aliases:
cmd.append("--domains {}".format(dns))
cmd.append(f"--domains {dns}")
if preferred_challenges:
cmd.append("--preferred-challenges {}".format(preferred_challenges))
cmd.append(f"--preferred-challenges {preferred_challenges}")
if tls_sni_01_port:
cmd.append("--tls-sni-01-port {}".format(tls_sni_01_port))
cmd.append(f"--tls-sni-01-port {tls_sni_01_port}")
if tls_sni_01_address:
cmd.append("--tls-sni-01-address {}".format(tls_sni_01_address))
cmd.append(f"--tls-sni-01-address {tls_sni_01_address}")
if http_01_port:
cmd.append("--http-01-port {}".format(http_01_port))
cmd.append(f"--http-01-port {http_01_port}")
if http_01_address:
cmd.append("--http-01-address {}".format(http_01_address))
cmd.append(f"--http-01-address {http_01_address}")
res = __salt__["cmd.run_all"](" ".join(cmd))
@ -269,13 +269,13 @@ def cert(
}
if "no action taken" in res["stdout"]:
comment = "Certificate {} unchanged".format(cert_file)
comment = f"Certificate {cert_file} unchanged"
result = None
elif renew:
comment = "Certificate {} renewed".format(certname)
comment = f"Certificate {certname} renewed"
result = True
else:
comment = "Certificate {} obtained".format(certname)
comment = f"Certificate {certname} obtained"
result = True
ret = {
@ -339,7 +339,7 @@ def info(name):
cert_info = __salt__["x509.read_certificate"](cert_file)
else:
# Cobble it together using the openssl binary
openssl_cmd = "openssl x509 -in {} -noout -text".format(cert_file)
openssl_cmd = f"openssl x509 -in {cert_file} -noout -text"
cert_info = {"text": __salt__["cmd.run"](openssl_cmd, output_loglevel="quiet")}
return cert_info

View file

@ -207,14 +207,14 @@ if not HAS_APT:
if self.architectures:
opts.append("arch={}".format(",".join(self.architectures)))
if self.signedby:
opts.append("signed-by={}".format(self.signedby))
opts.append(f"signed-by={self.signedby}")
if opts:
repo_line.append("[{}]".format(" ".join(opts)))
repo_line = repo_line + [self.uri, self.dist, " ".join(self.comps)]
if self.comment:
repo_line.append("#{}".format(self.comment))
repo_line.append(f"#{self.comment}")
return " ".join(repo_line) + "\n"
def _parse_sources(self, line):
@ -277,7 +277,7 @@ if not HAS_APT:
architectures = "arch={}".format(",".join(architectures))
opts_count.append(architectures)
if signedby:
signedby = "signed-by={}".format(signedby)
signedby = f"signed-by={signedby}"
opts_count.append(signedby)
if len(opts_count) > 1:
opts_line = "[" + " ".join(opts_count) + "]"
@ -340,7 +340,7 @@ def _reconstruct_ppa_name(owner_name, ppa_name):
"""
Stringify PPA name from args.
"""
return "ppa:{}/{}".format(owner_name, ppa_name)
return f"ppa:{owner_name}/{ppa_name}"
def _call_apt(args, scope=True, **kwargs):
@ -353,7 +353,7 @@ def _call_apt(args, scope=True, **kwargs):
and salt.utils.systemd.has_scope(__context__)
and __salt__["config.get"]("systemd.scope", True)
):
cmd.extend(["systemd-run", "--scope", "--description", '"{}"'.format(__name__)])
cmd.extend(["systemd-run", "--scope", "--description", f'"{__name__}"'])
cmd.extend(args)
params = {
@ -465,7 +465,7 @@ def latest_version(*names, **kwargs):
for name in names:
ret[name] = ""
pkgs = list_pkgs(versions_as_list=True)
repo = ["-o", "APT::Default-Release={}".format(fromrepo)] if fromrepo else None
repo = ["-o", f"APT::Default-Release={fromrepo}"] if fromrepo else None
# Refresh before looking for the latest version available
if refresh:
@ -942,7 +942,7 @@ def install(
continue
else:
version_num = target
pkgstr = "{}={}".format(pkgname, version_num)
pkgstr = f"{pkgname}={version_num}"
else:
pkgstr = pkgpath
@ -1318,7 +1318,7 @@ def upgrade(refresh=True, dist_upgrade=False, **kwargs):
]
for option in dpkg_options:
cmd.append("-o")
cmd.append("DPkg::Options::={}".format(option))
cmd.append(f"DPkg::Options::={option}")
if kwargs.get("force_yes", False):
cmd.append("--force-yes")
@ -1391,15 +1391,15 @@ def hold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
state = get_selections(pattern=target, state="hold")
if not state:
ret[target]["comment"] = "Package {} not currently held.".format(target)
ret[target]["comment"] = f"Package {target} not currently held."
elif not salt.utils.data.is_true(state.get("hold", False)):
if "test" in __opts__ and __opts__["test"]:
ret[target].update(result=None)
ret[target]["comment"] = "Package {} is set to be held.".format(target)
ret[target]["comment"] = f"Package {target} is set to be held."
else:
result = set_selections(selection={"hold": [target]})
ret[target].update(changes=result[target], result=True)
ret[target]["comment"] = "Package {} is now being held.".format(target)
ret[target]["comment"] = f"Package {target} is now being held."
else:
ret[target].update(result=True)
ret[target]["comment"] = "Package {} is already set to be held.".format(
@ -1456,7 +1456,7 @@ def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W06
state = get_selections(pattern=target)
if not state:
ret[target]["comment"] = "Package {} does not have a state.".format(target)
ret[target]["comment"] = f"Package {target} does not have a state."
elif salt.utils.data.is_true(state.get("hold", False)):
if "test" in __opts__ and __opts__["test"]:
ret[target].update(result=None)
@ -1552,7 +1552,7 @@ def list_pkgs(
if __grains__.get("cpuarch", "") == "x86_64":
osarch = __grains__.get("osarch", "")
if arch != "all" and osarch == "amd64" and osarch != arch:
name += ":{}".format(arch)
name += f":{arch}"
if cols:
if ("install" in linetype or "hold" in linetype) and "installed" in status:
__salt__["pkg_resource.add_pkg"](ret["installed"], name, version_num)
@ -1780,7 +1780,7 @@ def _consolidate_repo_sources(sources):
Consolidate APT sources.
"""
if not isinstance(sources, SourcesList):
raise TypeError("'{}' not a '{}'".format(type(sources), SourcesList))
raise TypeError(f"'{type(sources)}' not a '{SourcesList}'")
consolidated = {}
delete_files = set()
@ -1961,7 +1961,7 @@ def get_repo(repo, **kwargs):
dist = __grains__["oscodename"]
owner_name, ppa_name = repo[4:].split("/")
if ppa_auth:
auth_info = "{}@".format(ppa_auth)
auth_info = f"{ppa_auth}@"
repo = LP_PVT_SRC_FORMAT.format(auth_info, owner_name, ppa_name, dist)
else:
if HAS_SOFTWAREPROPERTIES:
@ -1974,7 +1974,7 @@ def get_repo(repo, **kwargs):
repo = softwareproperties.ppa.expand_ppa_line(repo, dist)[0]
except NameError as name_error:
raise CommandExecutionError(
"Could not find ppa {}: {}".format(repo, name_error)
f"Could not find ppa {repo}: {name_error}"
)
else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
@ -2000,7 +2000,7 @@ def get_repo(repo, **kwargs):
)
except SyntaxError:
raise CommandExecutionError(
"Error: repo '{}' is not a well formatted definition".format(repo)
f"Error: repo '{repo}' is not a well formatted definition"
)
for source in repos.values():
@ -2070,7 +2070,7 @@ def del_repo(repo, **kwargs):
) = _split_repo_str(repo)
except SyntaxError:
raise SaltInvocationError(
"Error: repo '{}' not a well formatted definition".format(repo)
f"Error: repo '{repo}' not a well formatted definition"
)
for source in repos:
@ -2132,9 +2132,7 @@ def del_repo(repo, **kwargs):
refresh_db()
return ret
raise CommandExecutionError(
"Repo {} doesn't exist in the sources.list(s)".format(repo)
)
raise CommandExecutionError(f"Repo {repo} doesn't exist in the sources.list(s)")
def _convert_if_int(value):
@ -2427,11 +2425,11 @@ def add_repo_key(
else:
cmd.extend(["adv", "--batch", "--keyserver", keyserver, "--recv", keyid])
elif keyid:
error_msg = "No keyserver specified for keyid: {}".format(keyid)
error_msg = f"No keyserver specified for keyid: {keyid}"
raise SaltInvocationError(error_msg)
else:
raise TypeError(
"{}() takes at least 1 argument (0 given)".format(add_repo_key.__name__)
f"{add_repo_key.__name__}() takes at least 1 argument (0 given)"
)
cmd_ret = _call_apt(cmd, **kwargs)
@ -2731,7 +2729,7 @@ def mod_repo(repo, saltenv="base", aptkey=True, **kwargs):
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
else:
raise CommandExecutionError(
'cannot parse "ppa:" style repo definitions: {}'.format(repo)
f'cannot parse "ppa:" style repo definitions: {repo}'
)
sources = SourcesList()
@ -2769,9 +2767,7 @@ def mod_repo(repo, saltenv="base", aptkey=True, **kwargs):
repo_signedby,
) = _split_repo_str(repo)
except SyntaxError:
raise SyntaxError(
"Error: repo '{}' not a well formatted definition".format(repo)
)
raise SyntaxError(f"Error: repo '{repo}' not a well formatted definition")
full_comp_list = {comp.strip() for comp in repo_comps}
no_proxy = __salt__["config.option"]("no_proxy")
@ -2813,7 +2809,7 @@ def mod_repo(repo, saltenv="base", aptkey=True, **kwargs):
"adv",
"--batch",
"--keyserver-options",
"http-proxy={}".format(http_proxy_url),
f"http-proxy={http_proxy_url}",
"--keyserver",
keyserver,
"--logger-fd",
@ -2859,7 +2855,7 @@ def mod_repo(repo, saltenv="base", aptkey=True, **kwargs):
key_url = kwargs["key_url"]
fn_ = pathlib.Path(__salt__["cp.cache_file"](key_url, saltenv))
if not fn_:
raise CommandExecutionError("Error: file not found: {}".format(key_url))
raise CommandExecutionError(f"Error: file not found: {key_url}")
if kwargs["signedby"] and fn_.name != kwargs["signedby"].name:
# override the signedby defined in the name with the
@ -2879,9 +2875,7 @@ def mod_repo(repo, saltenv="base", aptkey=True, **kwargs):
cmd = ["apt-key", "add", str(fn_)]
out = __salt__["cmd.run_stdout"](cmd, python_shell=False, **kwargs)
if not out.upper().startswith("OK"):
raise CommandExecutionError(
"Error: failed to add key from {}".format(key_url)
)
raise CommandExecutionError(f"Error: failed to add key from {key_url}")
elif "key_text" in kwargs:
key_text = kwargs["key_text"]
@ -2890,9 +2884,7 @@ def mod_repo(repo, saltenv="base", aptkey=True, **kwargs):
cmd, stdin=key_text, python_shell=False, **kwargs
)
if not out.upper().startswith("OK"):
raise CommandExecutionError(
"Error: failed to add key:\n{}".format(key_text)
)
raise CommandExecutionError(f"Error: failed to add key:\n{key_text}")
if "comps" in kwargs:
kwargs["comps"] = [comp.strip() for comp in kwargs["comps"].split(",")]
@ -3276,7 +3268,7 @@ def set_selections(path=None, selection=None, clear=False, saltenv="base"):
salt.utils.yaml.parser.ParserError,
salt.utils.yaml.scanner.ScannerError,
) as exc:
raise SaltInvocationError("Improperly-formatted selection: {}".format(exc))
raise SaltInvocationError(f"Improperly-formatted selection: {exc}")
if path:
path = __salt__["cp.cache_file"](path, saltenv)
@ -3312,7 +3304,7 @@ def set_selections(path=None, selection=None, clear=False, saltenv="base"):
if _state == sel_revmap.get(_pkg):
continue
cmd = ["dpkg", "--set-selections"]
cmd_in = "{} {}".format(_pkg, _state)
cmd_in = f"{_pkg} {_state}"
if not __opts__["test"]:
result = _call_apt(cmd, scope=False, stdin=cmd_in)
if result["retcode"] != 0:
@ -3508,9 +3500,9 @@ def _get_http_proxy_url():
# Set http_proxy_url for use in various internet facing actions...eg apt-key adv
if host and port:
if username and password:
http_proxy_url = "http://{}:{}@{}:{}".format(username, password, host, port)
http_proxy_url = f"http://{username}:{password}@{host}:{port}"
else:
http_proxy_url = "http://{}:{}".format(host, port)
http_proxy_url = f"http://{host}:{port}"
return http_proxy_url

View file

@ -61,7 +61,7 @@ def _config_getter(
password=None,
ignore_retcode=False,
output_encoding=None,
**kwargs
**kwargs,
):
"""
Common code for config.get_* functions, builds and runs the git CLI command
@ -224,7 +224,7 @@ def _git_run(
redirect_stderr=False,
saltenv="base",
output_encoding=None,
**kwargs
**kwargs,
):
"""
simple, throw an exception with the error message on an error return code.
@ -323,7 +323,7 @@ def _git_run(
ignore_retcode=ignore_retcode,
redirect_stderr=redirect_stderr,
output_encoding=output_encoding,
**kwargs
**kwargs,
)
finally:
if tmp_ssh_wrapper:
@ -390,7 +390,7 @@ def _git_run(
ignore_retcode=ignore_retcode,
redirect_stderr=redirect_stderr,
output_encoding=output_encoding,
**kwargs
**kwargs,
)
if result["retcode"] == 0:
@ -403,7 +403,7 @@ def _git_run(
)
err = result["stdout" if redirect_stderr else "stderr"]
if err:
msg += ": {}".format(salt.utils.url.redact_http_basic_auth(err))
msg += f": {salt.utils.url.redact_http_basic_auth(err)}"
raise CommandExecutionError(msg)
return result
@ -564,7 +564,7 @@ def archive(
password=None,
ignore_retcode=False,
output_encoding=None,
**kwargs
**kwargs,
):
"""
.. versionchanged:: 2015.8.0
@ -1215,7 +1215,7 @@ def config_get(
password=None,
ignore_retcode=False,
output_encoding=None,
**kwargs
**kwargs,
):
"""
Get the value of a key in the git configuration file
@ -1293,7 +1293,7 @@ def config_get(
password=password,
ignore_retcode=ignore_retcode,
output_encoding=output_encoding,
**kwargs
**kwargs,
)
# git config --get exits with retcode of 1 when key does not exist
@ -1318,7 +1318,7 @@ def config_get_regexp(
password=None,
ignore_retcode=False,
output_encoding=None,
**kwargs
**kwargs,
):
r"""
.. versionadded:: 2015.8.0
@ -1395,7 +1395,7 @@ def config_get_regexp(
password=password,
ignore_retcode=ignore_retcode,
output_encoding=output_encoding,
**kwargs
**kwargs,
)
# git config --get exits with retcode of 1 when key does not exist
@ -1425,7 +1425,7 @@ def config_set(
password=None,
ignore_retcode=False,
output_encoding=None,
**kwargs
**kwargs,
):
"""
.. versionchanged:: 2015.8.0
@ -1574,7 +1574,7 @@ def config_set(
cwd=cwd,
ignore_retcode=ignore_retcode,
output_encoding=output_encoding,
**{"all": True, "global": global_}
**{"all": True, "global": global_},
)
@ -1586,7 +1586,7 @@ def config_unset(
password=None,
ignore_retcode=False,
output_encoding=None,
**kwargs
**kwargs,
):
"""
.. versionadded:: 2015.8.0
@ -1695,9 +1695,9 @@ def config_unset(
)
is None
):
raise CommandExecutionError("Key '{}' does not exist".format(key))
raise CommandExecutionError(f"Key '{key}' does not exist")
else:
msg = "Multiple values exist for key '{}'".format(key)
msg = f"Multiple values exist for key '{key}'"
if value_regex is not None:
msg += " and value_regex matches multiple values"
raise CommandExecutionError(msg)
@ -2355,9 +2355,9 @@ def init(
if bare:
command.append("--bare")
if template is not None:
command.append("--template={}".format(template))
command.append(f"--template={template}")
if separate_git_dir is not None:
command.append("--separate-git-dir={}".format(separate_git_dir))
command.append(f"--separate-git-dir={separate_git_dir}")
if shared is not None:
if isinstance(shared, int) and not isinstance(shared, bool):
shared = "0" + str(shared)
@ -2365,7 +2365,7 @@ def init(
# Using lower here because booleans would be capitalized when
# converted to a string.
shared = str(shared).lower()
command.append("--shared={}".format(shared))
command.append(f"--shared={shared}")
command.extend(_format_opts(opts))
command.append(cwd)
return _git_run(
@ -2814,7 +2814,7 @@ def list_worktrees(
worktree_root = os.path.join(cwd, worktree_root)
if not os.path.isdir(worktree_root):
raise CommandExecutionError(
"Worktree admin directory {} not present".format(worktree_root)
f"Worktree admin directory {worktree_root} not present"
)
def _read_file(path):
@ -3081,7 +3081,7 @@ def merge(
identity=None,
ignore_retcode=False,
output_encoding=None,
**kwargs
**kwargs,
):
"""
Interface to `git-merge(1)`_
@ -3205,7 +3205,7 @@ def merge_base(
password=None,
ignore_retcode=False,
output_encoding=None,
**kwargs
**kwargs,
):
"""
.. versionadded:: 2015.8.0
@ -3487,7 +3487,7 @@ def merge_tree(
base = merge_base(cwd, refs=[ref1, ref2], output_encoding=output_encoding)
except (SaltInvocationError, CommandExecutionError):
raise CommandExecutionError(
"Unable to determine merge base for {} and {}".format(ref1, ref2)
f"Unable to determine merge base for {ref1} and {ref2}"
)
command.extend([base, ref1, ref2])
return _git_run(
@ -3627,7 +3627,7 @@ def push(
ignore_retcode=False,
saltenv="base",
output_encoding=None,
**kwargs
**kwargs,
):
"""
Interface to `git-push(1)`_
@ -3927,7 +3927,7 @@ def remote_get(
)
if remote not in all_remotes:
raise CommandExecutionError(
"Remote '{}' not present in git checkout located at {}".format(remote, cwd)
f"Remote '{remote}' not present in git checkout located at {cwd}"
)
return all_remotes[remote]
@ -3944,7 +3944,7 @@ def remote_refs(
ignore_retcode=False,
output_encoding=None,
saltenv="base",
**kwargs
**kwargs,
):
"""
.. versionadded:: 2015.8.0
@ -4850,7 +4850,7 @@ def submodule(
ignore_retcode=False,
saltenv="base",
output_encoding=None,
**kwargs
**kwargs,
):
"""
.. versionchanged:: 2015.8.0
@ -5290,7 +5290,7 @@ def worktree_add(
password=None,
ignore_retcode=False,
output_encoding=None,
**kwargs
**kwargs,
):
"""
.. versionadded:: 2015.8.0
@ -5602,5 +5602,5 @@ def worktree_rm(cwd, user=None, output_encoding=None):
try:
salt.utils.files.rm_rf(cwd)
except Exception as exc: # pylint: disable=broad-except
raise CommandExecutionError("Unable to remove {}: {}".format(cwd, exc))
raise CommandExecutionError(f"Unable to remove {cwd}: {exc}")
return True

View file

@ -175,7 +175,7 @@ def bridge_exists(br):
salt '*' openvswitch.bridge_exists br0
"""
cmd = "ovs-vsctl br-exists {}".format(br)
cmd = f"ovs-vsctl br-exists {br}"
result = __salt__["cmd.run_all"](cmd)
retcode = result["retcode"]
return _retcode_to_bool(retcode)
@ -215,8 +215,8 @@ def bridge_create(br, may_exist=True, parent=None, vlan=None):
raise ArgumentValueError("If parent is specified, vlan must also be specified.")
if vlan is not None and parent is None:
raise ArgumentValueError("If vlan is specified, parent must also be specified.")
param_parent = "" if parent is None else " {}".format(parent)
param_vlan = "" if vlan is None else " {}".format(vlan)
param_parent = "" if parent is None else f" {parent}"
param_vlan = "" if vlan is None else f" {vlan}"
cmd = "ovs-vsctl {1}add-br {0}{2}{3}".format(
br, param_may_exist, param_parent, param_vlan
)
@ -244,7 +244,7 @@ def bridge_delete(br, if_exists=True):
salt '*' openvswitch.bridge_delete br0
"""
param_if_exists = _param_if_exists(if_exists)
cmd = "ovs-vsctl {1}del-br {0}".format(br, param_if_exists)
cmd = f"ovs-vsctl {param_if_exists}del-br {br}"
result = __salt__["cmd.run_all"](cmd)
retcode = result["retcode"]
return _retcode_to_bool(retcode)
@ -271,7 +271,7 @@ def bridge_to_parent(br):
salt '*' openvswitch.bridge_to_parent br0
"""
cmd = "ovs-vsctl br-to-parent {}".format(br)
cmd = f"ovs-vsctl br-to-parent {br}"
result = __salt__["cmd.run_all"](cmd)
if result["retcode"] != 0:
return False
@ -298,7 +298,7 @@ def bridge_to_vlan(br):
salt '*' openvswitch.bridge_to_parent br0
"""
cmd = "ovs-vsctl br-to-vlan {}".format(br)
cmd = f"ovs-vsctl br-to-vlan {br}"
result = __salt__["cmd.run_all"](cmd)
if result["retcode"] != 0:
return False
@ -327,9 +327,9 @@ def port_add(br, port, may_exist=False, internal=False):
salt '*' openvswitch.port_add br0 8080
"""
param_may_exist = _param_may_exist(may_exist)
cmd = "ovs-vsctl {2}add-port {0} {1}".format(br, port, param_may_exist)
cmd = f"ovs-vsctl {param_may_exist}add-port {br} {port}"
if internal:
cmd += " -- set interface {} type=internal".format(port)
cmd += f" -- set interface {port} type=internal"
result = __salt__["cmd.run_all"](cmd)
retcode = result["retcode"]
return _retcode_to_bool(retcode)
@ -358,9 +358,9 @@ def port_remove(br, port, if_exists=True):
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = "ovs-vsctl {1}del-port {0}".format(port, param_if_exists)
cmd = f"ovs-vsctl {param_if_exists}del-port {port}"
else:
cmd = "ovs-vsctl {2}del-port {0} {1}".format(br, port, param_if_exists)
cmd = f"ovs-vsctl {param_if_exists}del-port {br} {port}"
result = __salt__["cmd.run_all"](cmd)
retcode = result["retcode"]
return _retcode_to_bool(retcode)
@ -384,7 +384,7 @@ def port_list(br):
salt '*' openvswitch.port_list br0
"""
cmd = "ovs-vsctl list-ports {}".format(br)
cmd = f"ovs-vsctl list-ports {br}"
result = __salt__["cmd.run_all"](cmd)
retcode = result["retcode"]
stdout = result["stdout"]
@ -409,7 +409,7 @@ def port_get_tag(port):
salt '*' openvswitch.port_get_tag tap0
"""
cmd = "ovs-vsctl get port {} tag".format(port)
cmd = f"ovs-vsctl get port {port} tag"
result = __salt__["cmd.run_all"](cmd)
retcode = result["retcode"]
stdout = result["stdout"]
@ -434,7 +434,7 @@ def interface_get_options(port):
salt '*' openvswitch.interface_get_options tap0
"""
cmd = "ovs-vsctl get interface {} options".format(port)
cmd = f"ovs-vsctl get interface {port} options"
result = __salt__["cmd.run_all"](cmd)
retcode = result["retcode"]
stdout = result["stdout"]
@ -459,7 +459,7 @@ def interface_get_type(port):
salt '*' openvswitch.interface_get_type tap0
"""
cmd = "ovs-vsctl get interface {} type".format(port)
cmd = f"ovs-vsctl get interface {port} type"
result = __salt__["cmd.run_all"](cmd)
retcode = result["retcode"]
stdout = result["stdout"]
@ -495,15 +495,15 @@ def port_create_vlan(br, port, id, internal=False):
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = "ovs-vsctl set port {} tag={}".format(port, id)
cmd = f"ovs-vsctl set port {port} tag={id}"
if internal:
cmd += " -- set interface {} type=internal".format(port)
cmd += f" -- set interface {port} type=internal"
result = __salt__["cmd.run_all"](cmd)
return _retcode_to_bool(result["retcode"])
else:
cmd = "ovs-vsctl add-port {} {} tag={}".format(br, port, id)
cmd = f"ovs-vsctl add-port {br} {port} tag={id}"
if internal:
cmd += " -- set interface {} type=internal".format(port)
cmd += f" -- set interface {port} type=internal"
result = __salt__["cmd.run_all"](cmd)
return _retcode_to_bool(result["retcode"])
@ -622,7 +622,7 @@ def db_get(table, record, column, if_exists=False):
salt '*' openvswitch.db_get Port br0 vlan_mode
"""
cmd = ["ovs-vsctl", "--format=json", "--columns={}".format(column)]
cmd = ["ovs-vsctl", "--format=json", f"--columns={column}"]
if if_exists:
cmd += ["--if-exists"]
cmd += ["list", table, record]
@ -666,7 +666,7 @@ def db_set(table, record, column, value, if_exists=False):
cmd = ["ovs-vsctl"]
if if_exists:
cmd += ["--if-exists"]
cmd += ["set", table, record, "{}={}".format(column, json.dumps(value))]
cmd += ["set", table, record, f"{column}={json.dumps(value)}"]
result = __salt__["cmd.run_all"](cmd)
if result["retcode"] != 0:
return result["stderr"]

View file

@ -184,7 +184,7 @@ def get(
ret = salt.utils.data.traverse_dict_and_list(pillar_dict, key, default, delimiter)
if ret is KeyError:
raise KeyError("Pillar key not found: {}".format(key))
raise KeyError(f"Pillar key not found: {key}")
return ret
@ -264,9 +264,7 @@ def items(*args, **kwargs):
valid_rend=__opts__["decrypt_pillar_renderers"],
)
except Exception as exc: # pylint: disable=broad-except
raise CommandExecutionError(
"Failed to decrypt pillar override: {}".format(exc)
)
raise CommandExecutionError(f"Failed to decrypt pillar override: {exc}")
pillar = salt.pillar.get_pillar(
__opts__,
@ -295,7 +293,7 @@ def _obfuscate_inner(var):
elif isinstance(var, (list, set, tuple)):
return type(var)(_obfuscate_inner(v) for v in var)
else:
return "<{}>".format(var.__class__.__name__)
return f"<{var.__class__.__name__}>"
def obfuscate(*args, **kwargs):
@ -538,10 +536,10 @@ def keys(key, delimiter=DEFAULT_TARGET_DELIM):
ret = salt.utils.data.traverse_dict_and_list(__pillar__, key, KeyError, delimiter)
if ret is KeyError:
raise KeyError("Pillar key not found: {}".format(key))
raise KeyError(f"Pillar key not found: {key}")
if not isinstance(ret, dict):
raise ValueError("Pillar value in key {} is not a dict".format(key))
raise ValueError(f"Pillar value in key {key} is not a dict")
return list(ret)

View file

@ -113,7 +113,7 @@ def __virtual__():
"load rh_service.py as virtual 'service'",
)
return __virtualname__
return (False, "Cannot load rh_service module: OS not in {}".format(enable))
return (False, f"Cannot load rh_service module: OS not in {enable}")
def _runlevel():
@ -137,7 +137,7 @@ def _chkconfig_add(name):
/etc/init.d. The service is initially configured to be disabled at all
run-levels.
"""
cmd = "/sbin/chkconfig --add {}".format(name)
cmd = f"/sbin/chkconfig --add {name}"
if __salt__["cmd.retcode"](cmd, python_shell=False) == 0:
log.info('Added initscript "%s" to chkconfig', name)
return True
@ -150,7 +150,7 @@ def _service_is_upstart(name):
"""
Return True if the service is an upstart service, otherwise return False.
"""
return HAS_UPSTART and os.path.exists("/etc/init/{}.conf".format(name))
return HAS_UPSTART and os.path.exists(f"/etc/init/{name}.conf")
def _service_is_sysv(name):
@ -169,7 +169,7 @@ def _service_is_chkconfig(name):
"""
Return True if the service is managed by chkconfig.
"""
cmdline = "/sbin/chkconfig --list {}".format(name)
cmdline = f"/sbin/chkconfig --list {name}"
return (
__salt__["cmd.retcode"](cmdline, python_shell=False, ignore_retcode=True) == 0
)
@ -188,7 +188,7 @@ def _sysv_is_enabled(name, runlevel=None):
if runlevel is None:
runlevel = _runlevel()
return len(glob.glob("/etc/rc.d/rc{}.d/S??{}".format(runlevel, name))) > 0
return len(glob.glob(f"/etc/rc.d/rc{runlevel}.d/S??{name}")) > 0
def _chkconfig_is_enabled(name, runlevel=None):
@ -197,14 +197,14 @@ def _chkconfig_is_enabled(name, runlevel=None):
return ``False``. If ``runlevel`` is ``None``, then use the current
runlevel.
"""
cmdline = "/sbin/chkconfig --list {}".format(name)
cmdline = f"/sbin/chkconfig --list {name}"
result = __salt__["cmd.run_all"](cmdline, python_shell=False)
if runlevel is None:
runlevel = _runlevel()
if result["retcode"] == 0:
for row in result["stdout"].splitlines():
if "{}:on".format(runlevel) in row:
if f"{runlevel}:on" in row:
if row.split()[0] == name:
return True
elif row.split() == [name, "on"]:
@ -220,7 +220,7 @@ def _sysv_enable(name):
"""
if not _service_is_chkconfig(name) and not _chkconfig_add(name):
return False
cmd = "/sbin/chkconfig {} on".format(name)
cmd = f"/sbin/chkconfig {name} on"
return not __salt__["cmd.retcode"](cmd, python_shell=False)
@ -233,7 +233,7 @@ def _sysv_disable(name):
"""
if not _service_is_chkconfig(name) and not _chkconfig_add(name):
return False
cmd = "/sbin/chkconfig {} off".format(name)
cmd = f"/sbin/chkconfig {name} off"
return not __salt__["cmd.retcode"](cmd, python_shell=False)
@ -244,7 +244,7 @@ def _sysv_delete(name):
"""
if not _service_is_chkconfig(name):
return False
cmd = "/sbin/chkconfig --del {}".format(name)
cmd = f"/sbin/chkconfig --del {name}"
return not __salt__["cmd.retcode"](cmd)
@ -253,10 +253,10 @@ def _upstart_delete(name):
Delete an upstart service. This will only rename the .conf file
"""
if HAS_UPSTART:
if os.path.exists("/etc/init/{}.conf".format(name)):
if os.path.exists(f"/etc/init/{name}.conf"):
os.rename(
"/etc/init/{}.conf".format(name),
"/etc/init/{}.conf.removed".format(name),
f"/etc/init/{name}.conf",
f"/etc/init/{name}.conf.removed",
)
return True
@ -435,9 +435,9 @@ def start(name):
salt '*' service.start <service name>
"""
if _service_is_upstart(name):
cmd = "start {}".format(name)
cmd = f"start {name}"
else:
cmd = "/sbin/service {} start".format(name)
cmd = f"/sbin/service {name} start"
return not __salt__["cmd.retcode"](cmd, python_shell=False)
@ -452,9 +452,9 @@ def stop(name):
salt '*' service.stop <service name>
"""
if _service_is_upstart(name):
cmd = "stop {}".format(name)
cmd = f"stop {name}"
else:
cmd = "/sbin/service {} stop".format(name)
cmd = f"/sbin/service {name} stop"
return not __salt__["cmd.retcode"](cmd, python_shell=False)
@ -469,9 +469,9 @@ def restart(name):
salt '*' service.restart <service name>
"""
if _service_is_upstart(name):
cmd = "restart {}".format(name)
cmd = f"restart {name}"
else:
cmd = "/sbin/service {} restart".format(name)
cmd = f"/sbin/service {name} restart"
return not __salt__["cmd.retcode"](cmd, python_shell=False)
@ -486,9 +486,9 @@ def reload_(name):
salt '*' service.reload <service name>
"""
if _service_is_upstart(name):
cmd = "reload {}".format(name)
cmd = f"reload {name}"
else:
cmd = "/sbin/service {} reload".format(name)
cmd = f"/sbin/service {name} reload"
return not __salt__["cmd.retcode"](cmd, python_shell=False)
@ -526,12 +526,12 @@ def status(name, sig=None):
results = {}
for service in services:
if _service_is_upstart(service):
cmd = "status {}".format(service)
cmd = f"status {service}"
results[service] = "start/running" in __salt__["cmd.run"](
cmd, python_shell=False
)
else:
cmd = "/sbin/service {} status".format(service)
cmd = f"/sbin/service {service} status"
results[service] = (
__salt__["cmd.retcode"](cmd, python_shell=False, ignore_retcode=True)
== 0

View file

@ -143,14 +143,14 @@ def setenforce(mode):
mode = "0"
modestring = "disabled"
else:
return "Invalid mode {}".format(mode)
return f"Invalid mode {mode}"
elif isinstance(mode, int):
if mode:
mode = "1"
else:
mode = "0"
else:
return "Invalid mode {}".format(mode)
return f"Invalid mode {mode}"
# enforce file does not exist if currently disabled. Only for toggling enforcing/permissive
if getenforce() != "Disabled":
@ -204,9 +204,9 @@ def setsebool(boolean, value, persist=False):
salt '*' selinux.setsebool virt_use_usb off
"""
if persist:
cmd = "setsebool -P {} {}".format(boolean, value)
cmd = f"setsebool -P {boolean} {value}"
else:
cmd = "setsebool {} {}".format(boolean, value)
cmd = f"setsebool {boolean} {value}"
return not __salt__["cmd.retcode"](cmd, python_shell=False)
@ -227,7 +227,7 @@ def setsebools(pairs, persist=False):
else:
cmd = "setsebool "
for boolean, value in pairs.items():
cmd = "{} {}={}".format(cmd, boolean, value)
cmd = f"{cmd} {boolean}={value}"
return not __salt__["cmd.retcode"](cmd, python_shell=False)
@ -284,9 +284,9 @@ def setsemod(module, state):
.. versionadded:: 2016.3.0
"""
if state.lower() == "enabled":
cmd = "semodule -e {}".format(module)
cmd = f"semodule -e {module}"
elif state.lower() == "disabled":
cmd = "semodule -d {}".format(module)
cmd = f"semodule -d {module}"
return not __salt__["cmd.retcode"](cmd)
@ -304,7 +304,7 @@ def install_semod(module_path):
"""
if module_path.find("salt://") == 0:
module_path = __salt__["cp.cache_file"](module_path)
cmd = "semodule -i {}".format(module_path)
cmd = f"semodule -i {module_path}"
return not __salt__["cmd.retcode"](cmd)
@ -320,7 +320,7 @@ def remove_semod(module):
.. versionadded:: 2016.11.6
"""
cmd = "semodule -r {}".format(module)
cmd = f"semodule -r {module}"
return not __salt__["cmd.retcode"](cmd)
@ -376,7 +376,7 @@ def _validate_filetype(filetype):
specification. Throws an SaltInvocationError if it isn't.
"""
if filetype not in _SELINUX_FILETYPES.keys():
raise SaltInvocationError("Invalid filetype given: {}".format(filetype))
raise SaltInvocationError(f"Invalid filetype given: {filetype}")
return True
@ -394,7 +394,7 @@ def _parse_protocol_port(name, protocol, port):
protocol_port_pattern = r"^(tcp|udp)\/(([\d]+)\-?[\d]+)$"
name_parts = re.match(protocol_port_pattern, name)
if not name_parts:
name_parts = re.match(protocol_port_pattern, "{}/{}".format(protocol, port))
name_parts = re.match(protocol_port_pattern, f"{protocol}/{port}")
if not name_parts:
raise SaltInvocationError(
'Invalid name "{}" format and protocol and port not provided or invalid:'
@ -609,20 +609,20 @@ def _fcontext_add_or_delete_policy(
"""
if action not in ["add", "delete"]:
raise SaltInvocationError(
'Actions supported are "add" and "delete", not "{}".'.format(action)
f'Actions supported are "add" and "delete", not "{action}".'
)
cmd = "semanage fcontext --{}".format(action)
cmd = f"semanage fcontext --{action}"
# "semanage --ftype a" isn't valid on Centos 6,
# don't pass --ftype since "a" is the default filetype.
if filetype is not None and filetype != "a":
_validate_filetype(filetype)
cmd += " --ftype {}".format(filetype)
cmd += f" --ftype {filetype}"
if sel_type is not None:
cmd += " --type {}".format(sel_type)
cmd += f" --type {sel_type}"
if sel_user is not None:
cmd += " --seuser {}".format(sel_user)
cmd += f" --seuser {sel_user}"
if sel_level is not None:
cmd += " --range {}".format(sel_level)
cmd += f" --range {sel_level}"
cmd += " " + re.escape(name)
return __salt__["cmd.run_all"](cmd)
@ -841,15 +841,15 @@ def _port_add_or_delete_policy(
"""
if action not in ["add", "delete"]:
raise SaltInvocationError(
'Actions supported are "add" and "delete", not "{}".'.format(action)
f'Actions supported are "add" and "delete", not "{action}".'
)
if action == "add" and not sel_type:
raise SaltInvocationError("SELinux Type is required to add a policy")
(protocol, port) = _parse_protocol_port(name, protocol, port)
cmd = "semanage port --{} --proto {}".format(action, protocol)
cmd = f"semanage port --{action} --proto {protocol}"
if sel_type:
cmd += " --type {}".format(sel_type)
cmd += f" --type {sel_type}"
if sel_range:
cmd += " --range {}".format(sel_range)
cmd += " {}".format(port)
cmd += f" --range {sel_range}"
cmd += f" {port}"
return __salt__["cmd.run_all"](cmd)

View file

@ -4650,7 +4650,7 @@ class _policy_info:
"""
add quotes around the string
"""
return '"{}"'.format(val)
return f'"{val}"'
@classmethod
def _binary_enable_zero_disable_one_conversion(cls, val, **kwargs):
@ -4664,7 +4664,7 @@ class _policy_info:
elif ord(val) == 1:
return "Enabled"
else:
return "Invalid Value: {!r}".format(val)
return f"Invalid Value: {val!r}"
else:
return "Not Defined"
except TypeError:
@ -4806,9 +4806,9 @@ class _policy_info:
try:
userSid = win32security.LookupAccountSid("", _sid)
if userSid[1]:
userSid = "{1}\\{0}".format(userSid[0], userSid[1])
userSid = f"{userSid[1]}\\{userSid[0]}"
else:
userSid = "{}".format(userSid[0])
userSid = f"{userSid[0]}"
# TODO: This needs to be more specific
except Exception: # pylint: disable=broad-except
userSid = win32security.ConvertSidToStringSid(_sid)
@ -5000,7 +5000,7 @@ def _updateNamespace(item, new_namespace):
temp_item = item.tag[i + 1 :]
else:
temp_item = item.tag
item.tag = "{{{0}}}{1}".format(new_namespace, temp_item)
item.tag = f"{{{new_namespace}}}{temp_item}"
for child in item.getiterator():
if isinstance(child.tag, str):
temp_item = ""
@ -5009,7 +5009,7 @@ def _updateNamespace(item, new_namespace):
temp_item = child.tag[i + 1 :]
else:
temp_item = child.tag
child.tag = "{{{0}}}{1}".format(new_namespace, temp_item)
child.tag = f"{{{new_namespace}}}{temp_item}"
return item
@ -5077,10 +5077,10 @@ def _parse_xml(adm_file):
modified_xml = ""
with salt.utils.files.fopen(adm_file, "rb") as rfh:
file_hash = "{:X}".format(zlib.crc32(rfh.read()) & 0xFFFFFFFF)
file_hash = f"{zlib.crc32(rfh.read()) & 0xFFFFFFFF:X}"
name, ext = os.path.splitext(os.path.basename(adm_file))
hashed_filename = "{}-{}{}".format(name, file_hash, ext)
hashed_filename = f"{name}-{file_hash}{ext}"
cache_dir = os.path.join(__opts__["cachedir"], "lgpo", "policy_defs")
if not os.path.exists(cache_dir):
@ -5092,7 +5092,7 @@ def _parse_xml(adm_file):
log.debug("LGPO: Generating policy template cache for %s%s", name, ext)
# Remove old files, keep the cache clean
file_list = glob.glob(os.path.join(cache_dir, "{}*{}".format(name, ext)))
file_list = glob.glob(os.path.join(cache_dir, f"{name}*{ext}"))
for file_path in file_list:
os.remove(file_path)
@ -5650,7 +5650,7 @@ def _set_advaudit_value(option, value):
"""
# Set the values in both audit.csv files
if not _set_advaudit_file_data(option=option, value=value):
raise CommandExecutionError("Failed to set audit.csv option: {}".format(option))
raise CommandExecutionError(f"Failed to set audit.csv option: {option}")
# Apply the settings locally
if not _set_advaudit_pol_data(option=option, value=value):
# Only log this error, it will be in effect the next time the machine
@ -5695,7 +5695,7 @@ def _get_netsh_value(profile, option):
def _set_netsh_value(profile, section, option, value):
if section not in ("firewallpolicy", "settings", "logging", "state"):
raise ValueError("LGPO: Invalid section: {}".format(section))
raise ValueError(f"LGPO: Invalid section: {section}")
log.trace(
"LGPO: Setting the following\nProfile: %s\nSection: %s\nOption: %s\nValue: %s",
profile,
@ -5739,7 +5739,7 @@ def _load_secedit_data():
Returns:
str: The contents of the file generated by the secedit command
"""
f_exp = os.path.join(__opts__["cachedir"], "secedit-{}.txt".format(UUID))
f_exp = os.path.join(__opts__["cachedir"], f"secedit-{UUID}.txt")
try:
__salt__["cmd.run"](["secedit", "/export", "/cfg", f_exp])
with salt.utils.files.fopen(f_exp, encoding="utf-16") as fp:
@ -5789,7 +5789,7 @@ def _write_secedit_data(inf_data):
# Set file names
# The database must persist in order for the settings to remain in effect
f_sdb = os.path.join(os.getenv("WINDIR"), "security", "database", "salt.sdb")
f_inf = os.path.join(__opts__["cachedir"], "secedit-{}.inf".format(UUID))
f_inf = os.path.join(__opts__["cachedir"], f"secedit-{UUID}.inf")
try:
# Write the changes to the inf file
@ -5949,9 +5949,7 @@ def _getAdmlPresentationRefId(adml_data, ref_id):
"""
helper function to check for a presentation label for a policy element
"""
search_results = adml_data.xpath(
'//*[@*[local-name() = "refId"] = "{}"]'.format(ref_id)
)
search_results = adml_data.xpath(f'//*[@*[local-name() = "refId"] = "{ref_id}"]')
alternate_label = ""
if search_results:
for result in search_results:
@ -6217,7 +6215,7 @@ def _encode_string(value):
elif not isinstance(value, str):
# Should we raise an error here, or attempt to cast to a string
raise TypeError(
"Value {} is not a string type\nType: {}".format(repr(value), type(value))
f"Value {repr(value)} is not a string type\nType: {type(value)}"
)
return b"".join([value.encode("utf-16-le"), encoded_null])
@ -6258,7 +6256,7 @@ def _buildKnownDataSearchString(
encoded_semicolon,
chr(registry.vtype[reg_vtype]).encode("utf-32-le"),
encoded_semicolon,
chr(len(" {}".format(chr(0)).encode("utf-16-le"))).encode("utf-32-le"),
chr(len(f" {chr(0)}".encode("utf-16-le"))).encode("utf-32-le"),
encoded_semicolon,
" ".encode("utf-16-le"),
encoded_null,
@ -6438,7 +6436,7 @@ def _processValueItem(
encoded_semicolon,
chr(registry.vtype[this_vtype]).encode("utf-32-le"),
encoded_semicolon,
chr(len(" {}".format(chr(0)).encode("utf-16-le"))).encode(
chr(len(f" {chr(0)}".encode("utf-16-le"))).encode(
"utf-32-le"
),
encoded_semicolon,
@ -6493,7 +6491,7 @@ def _processValueItem(
encoded_semicolon,
chr(
len(
"{}{}".format(element_values[i], chr(0)).encode(
f"{element_values[i]}{chr(0)}".encode(
"utf-16-le"
)
)
@ -6524,9 +6522,7 @@ def _processValueItem(
encoded_semicolon,
chr(registry.vtype[this_vtype]).encode("utf-32-le"),
encoded_semicolon,
chr(len(" {}".format(chr(0)).encode("utf-16-le"))).encode(
"utf-32-le"
),
chr(len(f" {chr(0)}".encode("utf-16-le"))).encode("utf-32-le"),
encoded_semicolon,
" ".encode("utf-16-le"),
encoded_null,
@ -6590,9 +6586,7 @@ def _processValueItem(
encoded_semicolon,
chr(registry.vtype[this_vtype]).encode("utf-32-le"),
encoded_semicolon,
chr(len(" {}".format(chr(0)).encode("utf-16-le"))).encode(
"utf-32-le"
),
chr(len(f" {chr(0)}".encode("utf-16-le"))).encode("utf-32-le"),
encoded_semicolon,
" ".encode("utf-16-le"),
encoded_null,
@ -6644,10 +6638,10 @@ def _checkAllAdmxPolicies(
if policy_file_data:
log.trace("POLICY CLASS %s has file data", policy_class)
policy_filedata_split = re.sub(
salt.utils.stringutils.to_bytes(r"\]{}$".format(chr(0))),
salt.utils.stringutils.to_bytes(rf"\]{chr(0)}$"),
b"",
re.sub(
salt.utils.stringutils.to_bytes(r"^\[{}".format(chr(0))),
salt.utils.stringutils.to_bytes(rf"^\[{chr(0)}"),
b"",
re.sub(
re.escape(REG_POL_HEADER.encode("utf-16-le")),
@ -6661,7 +6655,7 @@ def _checkAllAdmxPolicies(
# Get the policy for each item defined in Registry.pol
for policy_item in policy_filedata_split:
policy_item_key = (
policy_item.split("{};".format(chr(0)).encode("utf-16-le"))[0]
policy_item.split(f"{chr(0)};".encode("utf-16-le"))[0]
.decode("utf-16-le")
.lower()
)
@ -7424,7 +7418,7 @@ def _build_parent_list(policy_definition, return_full_policy_names, adml_languag
parent_list = []
policy_namespace = next(iter(policy_definition.nsmap))
parent_category = policy_definition.xpath(
"{}:parentCategory/@ref".format(policy_namespace),
f"{policy_namespace}:parentCategory/@ref",
namespaces=policy_definition.nsmap,
)
admx_policy_definitions = _get_policy_definitions(language=adml_language)
@ -7495,14 +7489,14 @@ def _admx_policy_parent_walk(
)
path.append(this_parent_name)
if tparent_category.xpath(
"{}:parentCategory/@ref".format(policy_namespace), namespaces=policy_nsmap
f"{policy_namespace}:parentCategory/@ref", namespaces=policy_nsmap
):
# parent has a parent
path = _admx_policy_parent_walk(
path=path,
policy_namespace=policy_namespace,
parent_category=tparent_category.xpath(
"{}:parentCategory/@ref".format(policy_namespace),
f"{policy_namespace}:parentCategory/@ref",
namespaces=policy_nsmap,
)[0],
policy_nsmap=policy_nsmap,
@ -8534,7 +8528,7 @@ def _lookup_admin_template(policy_name, policy_class, adml_language="en-US"):
False,
None,
[],
"Unable to find {} policy {}".format(policy_class, policy_name),
f"Unable to find {policy_class} policy {policy_name}",
)

View file

@ -341,22 +341,22 @@ def set_value(
"REG_SZ",
]
if v_type not in valid_types:
msg = "Invalid type: {}".format(v_type)
msg = f"Invalid type: {v_type}"
raise SaltInvocationError(msg)
if v_type in ["REG_SZ", "REG_EXPAND_SZ"]:
if not isinstance(v_data, str):
msg = "{} data must be a string".format(v_type)
msg = f"{v_type} data must be a string"
raise SaltInvocationError(msg)
elif v_type == "REG_MULTI_SZ":
if not isinstance(v_data, list):
msg = "{} data must be a list".format(v_type)
msg = f"{v_type} data must be a list"
raise SaltInvocationError(msg)
elif v_type in ["REG_DWORD", "REG_QWORD"]:
try:
int(v_data)
except (TypeError, ValueError):
msg = "{} data must be an integer".format(v_type)
msg = f"{v_type} data must be an integer"
raise SaltInvocationError(msg)
pol_data = read_reg_pol(policy_class=policy_class)
@ -466,17 +466,17 @@ def disable_value(key, v_name, policy_class="machine"):
return None
log.debug(f"LGPO_REG Mod: Disabling value name: {v_name}")
pol_data[found_key].pop(found_name)
found_name = "**del.{}".format(found_name)
found_name = f"**del.{found_name}"
pol_data[found_key][found_name] = {"data": " ", "type": "REG_SZ"}
else:
log.debug(f"LGPO_REG Mod: Setting new disabled value name: {v_name}")
pol_data[found_key]["**del.{}".format(v_name)] = {
pol_data[found_key][f"**del.{v_name}"] = {
"data": " ",
"type": "REG_SZ",
}
else:
log.debug(f"LGPO_REG Mod: Adding new key and disabled value name: {found_name}")
pol_data[key] = {"**del.{}".format(v_name): {"data": " ", "type": "REG_SZ"}}
pol_data[key] = {f"**del.{v_name}": {"data": " ", "type": "REG_SZ"}}
success = True
if not write_reg_pol(pol_data, policy_class=policy_class):

View file

@ -509,15 +509,15 @@ def get_system_info():
def byte_calc(val):
val = float(val)
if val < 2**10:
return "{:.3f}B".format(val)
return f"{val:.3f}B"
elif val < 2**20:
return "{:.3f}KB".format(val / 2**10)
return f"{val / 2**10:.3f}KB"
elif val < 2**30:
return "{:.3f}MB".format(val / 2**20)
return f"{val / 2**20:.3f}MB"
elif val < 2**40:
return "{:.3f}GB".format(val / 2**30)
return f"{val / 2**30:.3f}GB"
else:
return "{:.3f}TB".format(val / 2**40)
return f"{val / 2**40:.3f}TB"
# Lookup dicts for Win32_OperatingSystem
os_type = {1: "Work Station", 2: "Domain Controller", 3: "Server"}
@ -772,10 +772,10 @@ def join_domain(
status = get_domain_workgroup()
if "Domain" in status:
if status["Domain"] == domain:
return "Already joined to {}".format(domain)
return f"Already joined to {domain}"
if username and "\\" not in username and "@" not in username:
username = "{}@{}".format(username, domain)
username = f"{username}@{domain}"
if username and password is None:
return "Must specify a password if you pass a username"
@ -918,11 +918,11 @@ def unjoin_domain(
status = get_domain_workgroup()
if "Workgroup" in status:
if status["Workgroup"] == workgroup:
return "Already joined to {}".format(workgroup)
return f"Already joined to {workgroup}"
if username and "\\" not in username and "@" not in username:
if domain:
username = "{}@{}".format(username, domain)
username = f"{username}@{domain}"
else:
return "Must specify domain if not supplied in username"
@ -1060,7 +1060,7 @@ def get_system_time():
elif hours > 12:
hours = hours - 12
meridian = "PM"
return "{:02d}:{:02d}:{:02d} {}".format(hours, now[5], now[6], meridian)
return f"{hours:02d}:{now[5]:02d}:{now[6]:02d} {meridian}"
def set_system_time(newtime):
@ -1199,7 +1199,7 @@ def get_system_date():
salt '*' system.get_system_date
"""
now = win32api.GetLocalTime()
return "{:02d}/{:02d}/{:04d}".format(now[1], now[3], now[0])
return f"{now[1]:02d}/{now[3]:02d}/{now[0]:04d}"
def set_system_date(newdate):

View file

@ -110,7 +110,7 @@ def _strip_headers(output, *args):
def _get_copr_repo(copr):
copr = copr.split(":", 1)[1]
copr = copr.split("/", 1)
return "copr:copr.fedorainfracloud.org:{}:{}".format(copr[0], copr[1])
return f"copr:copr.fedorainfracloud.org:{copr[0]}:{copr[1]}"
def _get_hold(line, pattern=__HOLD_PATTERN, full=True):
@ -123,14 +123,14 @@ def _get_hold(line, pattern=__HOLD_PATTERN, full=True):
"""
if full:
if _yum() == "dnf":
lock_re = r"({}-\S+)".format(pattern)
lock_re = rf"({pattern}-\S+)"
else:
lock_re = r"(\d+:{}-\S+)".format(pattern)
lock_re = rf"(\d+:{pattern}-\S+)"
else:
if _yum() == "dnf":
lock_re = r"({}-\S+)".format(pattern)
lock_re = rf"({pattern}-\S+)"
else:
lock_re = r"\d+:({}-\S+)".format(pattern)
lock_re = rf"\d+:({pattern}-\S+)"
match = re.search(lock_re, line)
if match:
@ -271,9 +271,7 @@ def _check_versionlock():
"""
vl_plugin = _versionlock_pkg()
if vl_plugin not in list_pkgs():
raise SaltInvocationError(
"Cannot proceed, {} is not installed.".format(vl_plugin)
)
raise SaltInvocationError(f"Cannot proceed, {vl_plugin} is not installed.")
def _get_options(**kwargs):
@ -303,26 +301,26 @@ def _get_options(**kwargs):
if fromrepo:
log.info("Restricting to repo '%s'", fromrepo)
ret.extend(["--disablerepo=*", "--enablerepo={}".format(fromrepo)])
ret.extend(["--disablerepo=*", f"--enablerepo={fromrepo}"])
else:
if disablerepo:
targets = (
[disablerepo] if not isinstance(disablerepo, list) else disablerepo
)
log.info("Disabling repo(s): %s", ", ".join(targets))
ret.extend(["--disablerepo={}".format(x) for x in targets])
ret.extend([f"--disablerepo={x}" for x in targets])
if enablerepo:
targets = [enablerepo] if not isinstance(enablerepo, list) else enablerepo
log.info("Enabling repo(s): %s", ", ".join(targets))
ret.extend(["--enablerepo={}".format(x) for x in targets])
ret.extend([f"--enablerepo={x}" for x in targets])
if disableexcludes:
log.info("Disabling excludes for '%s'", disableexcludes)
ret.append("--disableexcludes={}".format(disableexcludes))
ret.append(f"--disableexcludes={disableexcludes}")
if branch:
log.info("Adding branch '%s'", branch)
ret.append("--branch={}".format(branch))
ret.append(f"--branch={branch}")
for item in setopt:
ret.extend(["--setopt", str(item)])
@ -335,10 +333,10 @@ def _get_options(**kwargs):
value = kwargs[key]
if isinstance(value, str):
log.info("Found extra option --%s=%s", key, value)
ret.append("--{}={}".format(key, value))
ret.append(f"--{key}={value}")
elif value is True:
log.info("Found extra option --%s", key)
ret.append("--{}".format(key))
ret.append(f"--{key}")
if ret:
log.info("Adding extra options: %s", ret)
@ -372,10 +370,10 @@ def _get_yum_config(strict_parser=True):
for name, value in yb.conf.items():
conf[name] = value
except (AttributeError, yum.Errors.ConfigError) as exc:
raise CommandExecutionError("Could not query yum config: {}".format(exc))
raise CommandExecutionError(f"Could not query yum config: {exc}")
except yum.Errors.YumBaseError as yum_base_error:
raise CommandExecutionError(
"Error accessing yum or rpmdb: {}".format(yum_base_error)
f"Error accessing yum or rpmdb: {yum_base_error}"
)
else:
# fall back to parsing the config ourselves
@ -394,14 +392,14 @@ def _get_yum_config(strict_parser=True):
if not fn:
raise CommandExecutionError(
"No suitable yum config file found in: {}".format(paths)
f"No suitable yum config file found in: {paths}"
)
cp = configparser.ConfigParser(strict=strict_parser)
try:
cp.read(fn)
except OSError as exc:
raise CommandExecutionError("Unable to read from {}: {}".format(fn, exc))
raise CommandExecutionError(f"Unable to read from {fn}: {exc}")
if cp.has_section("main"):
for opt in cp.options("main"):
@ -995,7 +993,7 @@ def list_repo_pkgs(*args, **kwargs):
else:
for repo in repos:
if _yum() == "tdnf":
cmd = ["--quiet", "--enablerepo={}".format(repo), "list"]
cmd = ["--quiet", f"--enablerepo={repo}", "list"]
else:
cmd = [
"--quiet",
@ -1254,7 +1252,7 @@ def install(
update_holds=False,
saltenv="base",
ignore_epoch=False,
**kwargs
**kwargs,
):
"""
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
@ -1468,7 +1466,7 @@ def install(
sources,
saltenv=saltenv,
normalize=normalize and kwargs.get("split_arch", True),
**kwargs
**kwargs,
)
except MinionError as exc:
raise CommandExecutionError(exc)
@ -1527,9 +1525,7 @@ def install(
cur_patches = list_patches()
for advisory_id in pkg_params:
if advisory_id not in cur_patches:
raise CommandExecutionError(
'Advisory id "{}" not found'.format(advisory_id)
)
raise CommandExecutionError(f'Advisory id "{advisory_id}" not found')
else:
pkg_params_items.append(advisory_id)
else:
@ -1647,7 +1643,7 @@ def install(
continue
if ignore_epoch is True:
pkgstr = "{}-{}{}".format(pkgname, version_num, arch)
pkgstr = f"{pkgname}-{version_num}{arch}"
else:
pkgstr = "{}-{}{}".format(
pkgname, version_num.split(":", 1)[-1], arch
@ -1771,7 +1767,7 @@ def install(
with _temporarily_unhold(to_install, targets):
if targets:
if pkg_type == "advisory":
targets = ["--advisory={}".format(t) for t in targets]
targets = [f"--advisory={t}" for t in targets]
cmd = ["-y"]
if _yum() == "dnf":
cmd.extend(["--best", "--allowerasing"])
@ -1848,7 +1844,7 @@ def upgrade(
minimal=False,
obsoletes=True,
diff_attr=None,
**kwargs
**kwargs,
):
"""
Run a full system upgrade (a ``yum upgrade`` or ``dnf upgrade``), or
@ -2076,7 +2072,7 @@ def update(
normalize=True,
minimal=False,
obsoletes=False,
**kwargs
**kwargs,
):
"""
.. versionadded:: 2019.2.0
@ -2322,7 +2318,7 @@ def hold(
if target not in current_locks:
if "test" in __opts__ and __opts__["test"]:
ret[target].update(result=None)
ret[target]["comment"] = "Package {} is set to be held.".format(target)
ret[target]["comment"] = f"Package {target} is set to be held."
else:
out = _call_yum(["versionlock", target])
if out["retcode"] == 0:
@ -2415,7 +2411,7 @@ def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W06
search_locks = [
x
for x in current_locks
if fnmatch.fnmatch(x, "*{}*".format(target))
if fnmatch.fnmatch(x, f"*{target}*")
and target == _get_hold(x, full=False)
]
@ -2437,10 +2433,10 @@ def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W06
else:
ret[target][
"comment"
] = "Package {} was unable to be unheld.".format(target)
] = f"Package {target} was unable to be unheld."
else:
ret[target].update(result=True)
ret[target]["comment"] = "Package {} is not being held.".format(target)
ret[target]["comment"] = f"Package {target} is not being held."
return ret
@ -2631,7 +2627,7 @@ def group_info(name, expand=False, ignore_groups=None):
ret["group"] = g_info.get("environment group") or g_info.get("group")
ret["id"] = g_info.get("environment-id") or g_info.get("group-id")
if not ret["group"] and not ret["id"]:
raise CommandExecutionError("Group '{}' not found".format(name))
raise CommandExecutionError(f"Group '{name}' not found")
ret["description"] = g_info.get("description", "")
@ -2830,7 +2826,7 @@ def list_repos(basedir=None, **kwargs):
if not os.path.exists(bdir):
continue
for repofile in os.listdir(bdir):
repopath = "{}/{}".format(bdir, repofile)
repopath = f"{bdir}/{repofile}"
if not repofile.endswith(".repo"):
continue
filerepos = _parse_repo_file(repopath, strict_parser)[1]
@ -2902,7 +2898,7 @@ def del_repo(repo, basedir=None, **kwargs): # pylint: disable=W0613
repos = list_repos(basedirs, **kwargs)
if repo not in repos:
return "Error: the {} repo does not exist in {}".format(repo, basedirs)
return f"Error: the {repo} repo does not exist in {basedirs}"
# Find out what file the repo lives in
repofile = ""
@ -2921,7 +2917,7 @@ def del_repo(repo, basedir=None, **kwargs): # pylint: disable=W0613
# If this is the only repo in the file, delete the file itself
if onlyrepo:
os.remove(repofile)
return "File {} containing repo {} has been removed".format(repofile, repo)
return f"File {repofile} containing repo {repo} has been removed"
# There must be other repos in this file, write the file with them
header, filerepos = _parse_repo_file(repofile, strict_parser)
@ -2935,20 +2931,20 @@ def del_repo(repo, basedir=None, **kwargs): # pylint: disable=W0613
filerepos[stanza]["comments"]
)
del filerepos[stanza]["comments"]
content += "\n[{}]".format(stanza)
content += f"\n[{stanza}]"
for line in filerepos[stanza]:
# A whitespace is needed at the beginning of the new line in order
# to avoid breaking multiple line values allowed on repo files.
value = filerepos[stanza][line]
if isinstance(value, str) and "\n" in value:
value = "\n ".join(value.split("\n"))
content += "\n{}={}".format(line, value)
content += "\n{}\n".format(comments)
content += f"\n{line}={value}"
content += f"\n{comments}\n"
with salt.utils.files.fopen(repofile, "w") as fileout:
fileout.write(salt.utils.stringutils.to_str(content))
return "Repo {} has been removed from {}".format(repo, repofile)
return f"Repo {repo} has been removed from {repofile}"
def mod_repo(repo, basedir=None, **kwargs):
@ -3036,7 +3032,7 @@ def mod_repo(repo, basedir=None, **kwargs):
"The repo does not exist and needs to be created, but none "
"of the following basedir directories exist: {}".format(basedirs)
)
repofile = "{}/{}.repo".format(newdir, repo)
repofile = f"{newdir}/{repo}.repo"
if use_copr:
# Is copr plugin installed?
copr_plugin_name = ""
@ -3047,7 +3043,7 @@ def mod_repo(repo, basedir=None, **kwargs):
if not __salt__["pkg_resource.version"](copr_plugin_name):
raise SaltInvocationError(
"{} must be installed to use COPR".format(copr_plugin_name)
f"{copr_plugin_name} must be installed to use COPR"
)
# Enable COPR
@ -3064,7 +3060,7 @@ def mod_repo(repo, basedir=None, **kwargs):
repofile = repos[repo]["file"]
header, filerepos = _parse_repo_file(repofile, strict_parser)
else:
repofile = "{}/{}.repo".format(newdir, repo)
repofile = f"{newdir}/{repo}.repo"
if "name" not in repo_opts:
raise SaltInvocationError(
@ -3108,7 +3104,7 @@ def mod_repo(repo, basedir=None, **kwargs):
comments = salt.utils.pkg.rpm.combine_comments(
filerepos[stanza].pop("comments", [])
)
content += "[{}]\n".format(stanza)
content += f"[{stanza}]\n"
for line in filerepos[stanza].keys():
# A whitespace is needed at the beginning of the new line in order
# to avoid breaking multiple line values allowed on repo files.
@ -3327,11 +3323,7 @@ def download(*packages, **kwargs):
to_purge = []
for pkg in packages:
to_purge.extend(
[
os.path.join(CACHE_DIR, x)
for x in cached_pkgs
if x.startswith("{}-".format(pkg))
]
[os.path.join(CACHE_DIR, x) for x in cached_pkgs if x.startswith(f"{pkg}-")]
)
for purge_target in set(to_purge):
log.debug("Removing cached package %s", purge_target)
@ -3340,7 +3332,7 @@ def download(*packages, **kwargs):
except OSError as exc:
log.error("Unable to remove %s: %s", purge_target, exc)
cmd = ["yumdownloader", "-q", "--destdir={}".format(CACHE_DIR)]
cmd = ["yumdownloader", "-q", f"--destdir={CACHE_DIR}"]
cmd.extend(packages)
__salt__["cmd.run"](cmd, output_loglevel="trace", python_shell=False)
ret = {}
@ -3350,7 +3342,7 @@ def download(*packages, **kwargs):
pkg_name = None
pkg_file = None
for query_pkg in packages:
if dld_result.startswith("{}-".format(query_pkg)):
if dld_result.startswith(f"{query_pkg}-"):
pkg_name = query_pkg
pkg_file = dld_result
break

View file

@ -189,11 +189,9 @@ def _query(method, params, url, auth=None):
)
return ret
except ValueError as err:
raise SaltException(
"URL or HTTP headers are probably not correct! ({})".format(err)
)
raise SaltException(f"URL or HTTP headers are probably not correct! ({err})")
except OSError as err:
raise SaltException("Check hostname in URL! ({})".format(err))
raise SaltException(f"Check hostname in URL! ({err})")
def _login(**kwargs):
@ -232,9 +230,9 @@ def _login(**kwargs):
name = name[len(prefix) :]
except IndexError:
return
val = __salt__["config.get"]("zabbix.{}".format(name), None) or __salt__[
val = __salt__["config.get"](f"zabbix.{name}", None) or __salt__[
"config.get"
]("zabbix:{}".format(name), None)
](f"zabbix:{name}", None)
if val is not None:
connargs[key] = val
@ -258,7 +256,7 @@ def _login(**kwargs):
else:
raise KeyError
except KeyError as err:
raise SaltException("URL is probably not correct! ({})".format(err))
raise SaltException(f"URL is probably not correct! ({err})")
def _params_extend(params, _ignore_name=False, **kwargs):
@ -2143,7 +2141,7 @@ def usermacro_get(
hostmacroids=None,
globalmacroids=None,
globalmacro=False,
**connection_args
**connection_args,
):
"""
Retrieve user macros according to the given parameters.

View file

@ -1109,7 +1109,7 @@ def ext_pillar(minion_id, pillar, *args, **kwargs):
# Fetch device from API
headers = {}
if api_token:
headers = {"Authorization": "Token {}".format(api_token)}
headers = {"Authorization": f"Token {api_token}"}
else:
log.error("The value for api_token is not set")
return ret

View file

@ -109,15 +109,13 @@ def cert(
else:
ret["result"] = True
ret["comment"].append(
"Certificate {} exists and does not need renewal.".format(certname)
f"Certificate {certname} exists and does not need renewal."
)
if action:
if __opts__["test"]:
ret["result"] = None
ret["comment"].append(
"Certificate {} would have been {}ed.".format(certname, action)
)
ret["comment"].append(f"Certificate {certname} would have been {action}ed.")
ret["changes"] = {"old": "current certificate", "new": "new certificate"}
else:
res = __salt__["acme.cert"](

File diff suppressed because it is too large Load diff

View file

@ -50,7 +50,7 @@ def _convert_to_mb(size):
if str_size[-1:].isdigit():
size = int(str_size)
else:
raise salt.exceptions.ArgumentValueError("Size {} is invalid.".format(size))
raise salt.exceptions.ArgumentValueError(f"Size {size} is invalid.")
if unit == "s":
target_size = size / 2048
@ -63,7 +63,7 @@ def _convert_to_mb(size):
elif unit == "p":
target_size = size * 1024 * 1024 * 1024
else:
raise salt.exceptions.ArgumentValueError("Unit {} is invalid.".format(unit))
raise salt.exceptions.ArgumentValueError(f"Unit {unit} is invalid.")
return target_size
@ -81,19 +81,19 @@ def pv_present(name, **kwargs):
ret = {"changes": {}, "comment": "", "name": name, "result": True}
if __salt__["lvm.pvdisplay"](name, quiet=True):
ret["comment"] = "Physical Volume {} already present".format(name)
ret["comment"] = f"Physical Volume {name} already present"
elif __opts__["test"]:
ret["comment"] = "Physical Volume {} is set to be created".format(name)
ret["comment"] = f"Physical Volume {name} is set to be created"
ret["result"] = None
return ret
else:
changes = __salt__["lvm.pvcreate"](name, **kwargs)
if __salt__["lvm.pvdisplay"](name):
ret["comment"] = "Created Physical Volume {}".format(name)
ret["comment"] = f"Created Physical Volume {name}"
ret["changes"]["created"] = changes
else:
ret["comment"] = "Failed to create Physical Volume {}".format(name)
ret["comment"] = f"Failed to create Physical Volume {name}"
ret["result"] = False
return ret
@ -108,19 +108,19 @@ def pv_absent(name):
ret = {"changes": {}, "comment": "", "name": name, "result": True}
if not __salt__["lvm.pvdisplay"](name, quiet=True):
ret["comment"] = "Physical Volume {} does not exist".format(name)
ret["comment"] = f"Physical Volume {name} does not exist"
elif __opts__["test"]:
ret["comment"] = "Physical Volume {} is set to be removed".format(name)
ret["comment"] = f"Physical Volume {name} is set to be removed"
ret["result"] = None
return ret
else:
changes = __salt__["lvm.pvremove"](name)
if __salt__["lvm.pvdisplay"](name, quiet=True):
ret["comment"] = "Failed to remove Physical Volume {}".format(name)
ret["comment"] = f"Failed to remove Physical Volume {name}"
ret["result"] = False
else:
ret["comment"] = "Removed Physical Volume {}".format(name)
ret["comment"] = f"Removed Physical Volume {name}"
ret["changes"]["removed"] = changes
return ret
@ -144,23 +144,23 @@ def vg_present(name, devices=None, **kwargs):
devices = devices.split(",")
if __salt__["lvm.vgdisplay"](name, quiet=True):
ret["comment"] = "Volume Group {} already present".format(name)
ret["comment"] = f"Volume Group {name} already present"
for device in devices:
realdev = os.path.realpath(device)
pvs = __salt__["lvm.pvdisplay"](realdev, real=True)
if pvs and pvs.get(realdev, None):
if pvs[realdev]["Volume Group Name"] == name:
ret["comment"] = "{}\n{}".format(
ret["comment"], "{} is part of Volume Group".format(device)
ret["comment"], f"{device} is part of Volume Group"
)
elif pvs[realdev]["Volume Group Name"] in ["", "#orphans_lvm2"]:
__salt__["lvm.vgextend"](name, device)
pvs = __salt__["lvm.pvdisplay"](realdev, real=True)
if pvs[realdev]["Volume Group Name"] == name:
ret["changes"].update({device: "added to {}".format(name)})
ret["changes"].update({device: f"added to {name}"})
else:
ret["comment"] = "{}\n{}".format(
ret["comment"], "{} could not be added".format(device)
ret["comment"], f"{device} could not be added"
)
ret["result"] = False
else:
@ -173,21 +173,21 @@ def vg_present(name, devices=None, **kwargs):
ret["result"] = False
else:
ret["comment"] = "{}\n{}".format(
ret["comment"], "pv {} is not present".format(device)
ret["comment"], f"pv {device} is not present"
)
ret["result"] = False
elif __opts__["test"]:
ret["comment"] = "Volume Group {} is set to be created".format(name)
ret["comment"] = f"Volume Group {name} is set to be created"
ret["result"] = None
return ret
else:
changes = __salt__["lvm.vgcreate"](name, devices, **kwargs)
if __salt__["lvm.vgdisplay"](name):
ret["comment"] = "Created Volume Group {}".format(name)
ret["comment"] = f"Created Volume Group {name}"
ret["changes"]["created"] = changes
else:
ret["comment"] = "Failed to create Volume Group {}".format(name)
ret["comment"] = f"Failed to create Volume Group {name}"
ret["result"] = False
return ret
@ -202,19 +202,19 @@ def vg_absent(name):
ret = {"changes": {}, "comment": "", "name": name, "result": True}
if not __salt__["lvm.vgdisplay"](name, quiet=True):
ret["comment"] = "Volume Group {} already absent".format(name)
ret["comment"] = f"Volume Group {name} already absent"
elif __opts__["test"]:
ret["comment"] = "Volume Group {} is set to be removed".format(name)
ret["comment"] = f"Volume Group {name} is set to be removed"
ret["result"] = None
return ret
else:
changes = __salt__["lvm.vgremove"](name)
if not __salt__["lvm.vgdisplay"](name, quiet=True):
ret["comment"] = "Removed Volume Group {}".format(name)
ret["comment"] = f"Removed Volume Group {name}"
ret["changes"]["removed"] = changes
else:
ret["comment"] = "Failed to remove Volume Group {}".format(name)
ret["comment"] = f"Failed to remove Volume Group {name}"
ret["result"] = False
return ret
@ -230,7 +230,7 @@ def lv_present(
thinpool=False,
force=False,
resizefs=False,
**kwargs
**kwargs,
):
"""
Ensure that a Logical Volume is present, creating it if absent.
@ -299,14 +299,14 @@ def lv_present(
if thinvolume:
lvpath = "/dev/{}/{}".format(vgname.split("/")[0], name)
else:
lvpath = "/dev/{}/{}".format(vgname, name)
lvpath = f"/dev/{vgname}/{name}"
lv_info = __salt__["lvm.lvdisplay"](lvpath, quiet=True)
lv_info = lv_info.get(lvpath)
if not lv_info:
if __opts__["test"]:
ret["comment"] = "Logical Volume {} is set to be created".format(name)
ret["comment"] = f"Logical Volume {name} is set to be created"
ret["result"] = None
return ret
else:
@ -320,11 +320,11 @@ def lv_present(
thinvolume=thinvolume,
thinpool=thinpool,
force=force,
**kwargs
**kwargs,
)
if __salt__["lvm.lvdisplay"](lvpath):
ret["comment"] = "Created Logical Volume {}".format(name)
ret["comment"] = f"Created Logical Volume {name}"
ret["changes"]["created"] = changes
else:
ret["comment"] = "Failed to create Logical Volume {}. Error: {}".format(
@ -332,7 +332,7 @@ def lv_present(
)
ret["result"] = False
else:
ret["comment"] = "Logical Volume {} already present".format(name)
ret["comment"] = f"Logical Volume {name} already present"
if size or extents:
old_extents = int(lv_info["Current Logical Extents Associated"])
@ -386,7 +386,7 @@ def lv_present(
lv_info = __salt__["lvm.lvdisplay"](lvpath, quiet=True)[lvpath]
new_size_mb = _convert_to_mb(lv_info["Logical Volume Size"] + "s")
if new_size_mb != old_size_mb:
ret["comment"] = "Resized Logical Volume {}".format(name)
ret["comment"] = f"Resized Logical Volume {name}"
ret["changes"]["resized"] = changes
else:
ret[
@ -410,20 +410,20 @@ def lv_absent(name, vgname=None):
"""
ret = {"changes": {}, "comment": "", "name": name, "result": True}
lvpath = "/dev/{}/{}".format(vgname, name)
lvpath = f"/dev/{vgname}/{name}"
if not __salt__["lvm.lvdisplay"](lvpath, quiet=True):
ret["comment"] = "Logical Volume {} already absent".format(name)
ret["comment"] = f"Logical Volume {name} already absent"
elif __opts__["test"]:
ret["comment"] = "Logical Volume {} is set to be removed".format(name)
ret["comment"] = f"Logical Volume {name} is set to be removed"
ret["result"] = None
return ret
else:
changes = __salt__["lvm.lvremove"](name, vgname)
if not __salt__["lvm.lvdisplay"](lvpath, quiet=True):
ret["comment"] = "Removed Logical Volume {}".format(name)
ret["comment"] = f"Removed Logical Volume {name}"
ret["changes"]["removed"] = changes
else:
ret["comment"] = "Failed to remove Logical Volume {}".format(name)
ret["comment"] = f"Failed to remove Logical Volume {name}"
ret["result"] = False
return ret

View file

@ -32,16 +32,16 @@ def present(name, parent=None, vlan=None):
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
# Comment and change messages
comment_bridge_created = "Bridge {} created.".format(name)
comment_bridge_notcreated = "Unable to create bridge: {}.".format(name)
comment_bridge_exists = "Bridge {} already exists.".format(name)
comment_bridge_created = f"Bridge {name} created."
comment_bridge_notcreated = f"Unable to create bridge: {name}."
comment_bridge_exists = f"Bridge {name} already exists."
comment_bridge_mismatch = (
"Bridge {} already exists, but has a different" " parent or VLAN ID."
).format(name)
changes_bridge_created = {
name: {
"old": "Bridge {} does not exist.".format(name),
"new": "Bridge {} created".format(name),
"old": f"Bridge {name} does not exist.",
"new": f"Bridge {name} created",
}
}
@ -103,13 +103,13 @@ def absent(name):
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
# Comment and change messages
comment_bridge_deleted = "Bridge {} deleted.".format(name)
comment_bridge_notdeleted = "Unable to delete bridge: {}.".format(name)
comment_bridge_notexists = "Bridge {} does not exist.".format(name)
comment_bridge_deleted = f"Bridge {name} deleted."
comment_bridge_notdeleted = f"Unable to delete bridge: {name}."
comment_bridge_notexists = f"Bridge {name} does not exist."
changes_bridge_deleted = {
name: {
"old": "Bridge {} exists.".format(name),
"new": "Bridge {} deleted.".format(name),
"old": f"Bridge {name} exists.",
"new": f"Bridge {name} deleted.",
}
}

View file

@ -229,7 +229,7 @@ def _get_cibfile_tmp(cibname):
"""
Get the full path of a temporary CIB-file with the name of the CIB
"""
cibfile_tmp = "{}.tmp".format(_get_cibfile(cibname))
cibfile_tmp = f"{_get_cibfile(cibname)}.tmp"
log.trace("cibfile_tmp: %s", cibfile_tmp)
return cibfile_tmp
@ -238,7 +238,7 @@ def _get_cibfile_cksum(cibname):
"""
Get the full path of the file containing a checksum of a CIB-file with the name of the CIB
"""
cibfile_cksum = "{}.cksum".format(_get_cibfile(cibname))
cibfile_cksum = f"{_get_cibfile(cibname)}.cksum"
log.trace("cibfile_cksum: %s", cibfile_cksum)
return cibfile_cksum
@ -336,7 +336,7 @@ def _item_present(
# constraints match on '(id:<id>)'
elif item in ["constraint"]:
for line in is_existing["stdout"].splitlines():
if "(id:{})".format(item_id) in line:
if f"(id:{item_id})" in line:
item_create_required = False
# item_id was provided,
@ -370,7 +370,7 @@ def _item_present(
log.trace("Output of pcs.item_create: %s", item_create)
if item_create["retcode"] in [0]:
ret["comment"] += "Created {} {} ({})\n".format(item, item_id, item_type)
ret["comment"] += f"Created {item} {item_id} ({item_type})\n"
ret["changes"].update({item_id: {"old": "", "new": str(item_id)}})
else:
ret["result"] = False
@ -435,11 +435,11 @@ def auth(name, nodes, pcsuser="hacluster", pcspasswd="hacluster", extra_args=Non
authorized_dict[node] == "Already authorized"
or authorized_dict[node] == "Authorized"
):
ret["comment"] += "Node {} is already authorized\n".format(node)
ret["comment"] += f"Node {node} is already authorized\n"
else:
auth_required = True
if __opts__["test"]:
ret["comment"] += "Node is set to authorize: {}\n".format(node)
ret["comment"] += f"Node is set to authorize: {node}\n"
if not auth_required:
return ret
@ -463,7 +463,7 @@ def auth(name, nodes, pcsuser="hacluster", pcspasswd="hacluster", extra_args=Non
for node in nodes:
if node in authorize_dict and authorize_dict[node] == "Authorized":
ret["comment"] += "Authorized {}\n".format(node)
ret["comment"] += f"Authorized {node}\n"
ret["changes"].update({node: {"old": "", "new": "Authorized"}})
else:
ret["result"] = False
@ -604,13 +604,13 @@ def cluster_setup(
"Success",
"Cluster enabled",
]:
ret["comment"] += "Set up {}\n".format(node)
ret["comment"] += f"Set up {node}\n"
ret["changes"].update({node: {"old": "", "new": "Setup"}})
else:
ret["result"] = False
ret["comment"] += "Failed to setup {}\n".format(node)
ret["comment"] += f"Failed to setup {node}\n"
if node in setup_dict:
ret["comment"] += "{}: setup_dict: {}\n".format(node, setup_dict[node])
ret["comment"] += f"{node}: setup_dict: {setup_dict[node]}\n"
ret["comment"] += str(setup)
log.trace("ret: %s", ret)
@ -664,7 +664,7 @@ def cluster_node_present(name, node, extra_args=None):
node_add_required = False
ret[
"comment"
] += "Node {} is already member of the cluster\n".format(node)
] += f"Node {node} is already member of the cluster\n"
else:
current_nodes += value.split()
@ -673,7 +673,7 @@ def cluster_node_present(name, node, extra_args=None):
if __opts__["test"]:
ret["result"] = None
ret["comment"] += "Node {} is set to be added to the cluster\n".format(node)
ret["comment"] += f"Node {node} is set to be added to the cluster\n"
return ret
if not isinstance(extra_args, (list, tuple)):
@ -710,11 +710,11 @@ def cluster_node_present(name, node, extra_args=None):
)
if node in node_add_dict and node_add_dict[node] in ["Succeeded", "Success"]:
ret["comment"] += "Added node {}\n".format(node)
ret["comment"] += f"Added node {node}\n"
ret["changes"].update({node: {"old": "", "new": "Added"}})
else:
ret["result"] = False
ret["comment"] += "Failed to add node{}\n".format(node)
ret["comment"] += f"Failed to add node{node}\n"
if node in node_add_dict:
ret["comment"] += "{}: node_add_dict: {}\n".format(
node, node_add_dict[node]
@ -806,10 +806,10 @@ def cib_present(name, cibname, scope=None, extra_args=None):
if not cib_create_required:
__salt__["file.remove"](cibfile_tmp)
ret["comment"] += "CIB {} is already equal to the live CIB\n".format(cibname)
ret["comment"] += f"CIB {cibname} is already equal to the live CIB\n"
if not cib_cksum_required:
ret["comment"] += "CIB {} checksum is correct\n".format(cibname)
ret["comment"] += f"CIB {cibname} checksum is correct\n"
if not cib_required:
return ret
@ -818,7 +818,7 @@ def cib_present(name, cibname, scope=None, extra_args=None):
__salt__["file.remove"](cibfile_tmp)
ret["result"] = None
if cib_create_required:
ret["comment"] += "CIB {} is set to be created/updated\n".format(cibname)
ret["comment"] += f"CIB {cibname} is set to be created/updated\n"
if cib_cksum_required:
ret["comment"] += "CIB {} checksum is set to be created/updated\n".format(
cibname
@ -829,11 +829,11 @@ def cib_present(name, cibname, scope=None, extra_args=None):
__salt__["file.move"](cibfile_tmp, cibfile)
if __salt__["file.check_hash"](path=cibfile, file_hash=cib_hash_live):
ret["comment"] += "Created/updated CIB {}\n".format(cibname)
ret["comment"] += f"Created/updated CIB {cibname}\n"
ret["changes"].update({"cibfile": cibfile})
else:
ret["result"] = False
ret["comment"] += "Failed to create/update CIB {}\n".format(cibname)
ret["comment"] += f"Failed to create/update CIB {cibname}\n"
if cib_cksum_required:
_file_write(cibfile_cksum, cib_hash_live)
@ -894,7 +894,7 @@ def cib_pushed(name, cibname, scope=None, extra_args=None):
if not os.path.exists(cibfile):
ret["result"] = False
ret["comment"] += "CIB-file {} does not exist\n".format(cibfile)
ret["comment"] += f"CIB-file {cibfile} does not exist\n"
return ret
cib_hash_cibfile = "{}:{}".format(
@ -926,11 +926,11 @@ def cib_pushed(name, cibname, scope=None, extra_args=None):
log.trace("Output of pcs.cib_push: %s", cib_push)
if cib_push["retcode"] in [0]:
ret["comment"] += "Pushed CIB {}\n".format(cibname)
ret["comment"] += f"Pushed CIB {cibname}\n"
ret["changes"].update({"cibfile_pushed": cibfile})
else:
ret["result"] = False
ret["comment"] += "Failed to push CIB {}\n".format(cibname)
ret["comment"] += f"Failed to push CIB {cibname}\n"
log.trace("ret: %s", ret)
@ -968,7 +968,7 @@ def prop_has_value(name, prop, value, extra_args=None, cibname=None):
return _item_present(
name=name,
item="property",
item_id="{}={}".format(prop, value),
item_id=f"{prop}={value}",
item_type=None,
create="set",
extra_args=extra_args,
@ -1008,7 +1008,7 @@ def resource_defaults_to(name, default, value, extra_args=None, cibname=None):
return _item_present(
name=name,
item="resource",
item_id="{}={}".format(default, value),
item_id=f"{default}={value}",
item_type=None,
show="defaults",
create="defaults",
@ -1049,7 +1049,7 @@ def resource_op_defaults_to(name, op_default, value, extra_args=None, cibname=No
return _item_present(
name=name,
item="resource",
item_id="{}={}".format(op_default, value),
item_id=f"{op_default}={value}",
item_type=None,
show=["op", "defaults"],
create=["op", "defaults"],

View file

@ -198,7 +198,7 @@ def _check_pkg_version_format(pkg):
for vcs in supported_vcs:
if pkg.startswith(vcs):
from_vcs = True
install_req = _from_line(pkg.split("{}+".format(vcs))[-1])
install_req = _from_line(pkg.split(f"{vcs}+")[-1])
break
else:
install_req = _from_line(pkg)
@ -767,7 +767,7 @@ def installed(
cur_version = __salt__["pip.version"](bin_env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret["result"] = False
ret["comment"] = "Error installing '{}': {}".format(name, err)
ret["comment"] = f"Error installing '{name}': {err}"
return ret
# Check that the pip binary supports the 'use_wheel' option
if use_wheel:
@ -853,7 +853,7 @@ def installed(
# TODO: Check requirements file against currently-installed
# packages to provide more accurate state output.
comments.append(
"Requirements file '{}' will be processed.".format(requirements)
f"Requirements file '{requirements}' will be processed."
)
if editable:
comments.append(
@ -956,7 +956,7 @@ def installed(
# Call to install the package. Actual installation takes place here
pip_install_call = __salt__["pip.install"](
pkgs="{}".format(pkgs_str) if pkgs_str else "",
pkgs=f"{pkgs_str}" if pkgs_str else "",
requirements=requirements,
bin_env=bin_env,
use_wheel=use_wheel,
@ -1081,10 +1081,10 @@ def installed(
and prefix.lower() not in already_installed_packages
):
ver = pipsearch[prefix]
ret["changes"]["{}=={}".format(prefix, ver)] = "Installed"
ret["changes"][f"{prefix}=={ver}"] = "Installed"
# Case for packages that are an URL
else:
ret["changes"]["{}==???".format(state_name)] = "Installed"
ret["changes"][f"{state_name}==???"] = "Installed"
# Set comments
aicomms = "\n".join(already_installed_comments)
@ -1109,19 +1109,15 @@ def installed(
if requirements or editable:
comments = []
if requirements:
comments.append(
'Unable to process requirements file "{}"'.format(requirements)
)
comments.append(f'Unable to process requirements file "{requirements}"')
if editable:
comments.append(
"Unable to install from VCS checkout {}.".format(editable)
)
comments.append(f"Unable to install from VCS checkout {editable}.")
comments.append(error)
ret["comment"] = " ".join(comments)
else:
pkgs_str = ", ".join([state_name for _, state_name in target_pkgs])
aicomms = "\n".join(already_installed_comments)
error_comm = "Failed to install packages: {}. {}".format(pkgs_str, error)
error_comm = f"Failed to install packages: {pkgs_str}. {error}"
ret["comment"] = aicomms + ("\n" if aicomms else "") + error_comm
else:
ret["result"] = False
@ -1159,7 +1155,7 @@ def removed(
pip_list = __salt__["pip.list"](bin_env=bin_env, user=user, cwd=cwd)
except (CommandExecutionError, CommandNotFoundError) as err:
ret["result"] = False
ret["comment"] = "Error uninstalling '{}': {}".format(name, err)
ret["comment"] = f"Error uninstalling '{name}': {err}"
return ret
if name not in pip_list:
@ -1169,7 +1165,7 @@ def removed(
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Package {} is set to be removed".format(name)
ret["comment"] = f"Package {name} is set to be removed"
return ret
if __salt__["pip.uninstall"](

View file

@ -127,7 +127,7 @@ def present(name, params, **kwargs):
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Action "{}" would be fixed.'.format(name)
ret["comment"] = f'Zabbix Action "{name}" would be fixed.'
ret["changes"] = {
name: {
"old": (
@ -151,14 +151,14 @@ def present(name, params, **kwargs):
)
if action_update:
ret["result"] = True
ret["comment"] = 'Zabbix Action "{}" updated.'.format(name)
ret["comment"] = f'Zabbix Action "{name}" updated.'
ret["changes"] = {
name: {
"old": (
'Zabbix Action "{}" differed '
"in following parameters: {}".format(name, diff_params)
),
"new": 'Zabbix Action "{}" fixed.'.format(name),
"new": f'Zabbix Action "{name}" fixed.',
}
}
@ -173,10 +173,10 @@ def present(name, params, **kwargs):
else:
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Action "{}" would be created.'.format(name)
ret["comment"] = f'Zabbix Action "{name}" would be created.'
ret["changes"] = {
name: {
"old": 'Zabbix Action "{}" does not exist.'.format(name),
"old": f'Zabbix Action "{name}" does not exist.',
"new": (
'Zabbix Action "{}" would be created according definition.'.format(
name
@ -193,10 +193,10 @@ def present(name, params, **kwargs):
if action_create:
ret["result"] = True
ret["comment"] = 'Zabbix Action "{}" created.'.format(name)
ret["comment"] = f'Zabbix Action "{name}" created.'
ret["changes"] = {
name: {
"old": 'Zabbix Action "{}" did not exist.'.format(name),
"old": f'Zabbix Action "{name}" did not exist.',
"new": (
'Zabbix Action "{}" created according definition.'.format(
name
@ -235,15 +235,15 @@ def absent(name, **kwargs):
if not object_id:
ret["result"] = True
ret["comment"] = 'Zabbix Action "{}" does not exist.'.format(name)
ret["comment"] = f'Zabbix Action "{name}" does not exist.'
else:
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Action "{}" would be deleted.'.format(name)
ret["comment"] = f'Zabbix Action "{name}" would be deleted.'
ret["changes"] = {
name: {
"old": 'Zabbix Action "{}" exists.'.format(name),
"new": 'Zabbix Action "{}" would be deleted.'.format(name),
"old": f'Zabbix Action "{name}" exists.',
"new": f'Zabbix Action "{name}" would be deleted.',
}
}
else:
@ -253,11 +253,11 @@ def absent(name, **kwargs):
if action_delete:
ret["result"] = True
ret["comment"] = 'Zabbix Action "{}" deleted.'.format(name)
ret["comment"] = f'Zabbix Action "{name}" deleted.'
ret["changes"] = {
name: {
"old": 'Zabbix Action "{}" existed.'.format(name),
"new": 'Zabbix Action "{}" deleted.'.format(name),
"old": f'Zabbix Action "{name}" existed.',
"new": f'Zabbix Action "{name}" deleted.',
}
}

View file

@ -487,10 +487,10 @@ def is_present(name, **kwargs):
if not object_id:
ret["result"] = False
ret["comment"] = 'Zabbix Template "{}" does not exist.'.format(name)
ret["comment"] = f'Zabbix Template "{name}" does not exist.'
else:
ret["result"] = True
ret["comment"] = 'Zabbix Template "{}" exists.'.format(name)
ret["comment"] = f'Zabbix Template "{name}" exists.'
return ret
@ -690,7 +690,7 @@ def present(name, params, static_host_list=True, **kwargs):
"selectMacros": "extend",
"filter": {"host": name},
},
**kwargs
**kwargs,
)
log.info("TEMPLATE get result: %s", str(json.dumps(tmpl_get, indent=4)))
@ -797,7 +797,7 @@ def present(name, params, static_host_list=True, **kwargs):
TEMPLATE_COMPONENT_DEF[component]["qselectpid"]: template_id
},
filter_key=TEMPLATE_COMPONENT_DEF[component]["filter"],
**kwargs
**kwargs,
)
else:
defined_c_list_subs = []
@ -807,7 +807,7 @@ def present(name, params, static_host_list=True, **kwargs):
template_id,
defined_c_list_subs,
existing_c_list_subs,
**kwargs
**kwargs,
)
log.info(
@ -846,7 +846,7 @@ def present(name, params, static_host_list=True, **kwargs):
defined_p_list_subs = __salt__["zabbix.substitute_params"](
d_rule_component[proto_name],
extend_params={c_def["qselectpid"]: template_id},
**kwargs
**kwargs,
)
else:
defined_p_list_subs = []
@ -857,7 +857,7 @@ def present(name, params, static_host_list=True, **kwargs):
defined_p_list_subs,
existing_p_list_subs,
template_id=template_id,
**kwargs
**kwargs,
)
log.info(
@ -884,10 +884,10 @@ def present(name, params, static_host_list=True, **kwargs):
if tmpl_action:
ret["result"] = True
if dry_run:
ret["comment"] = 'Zabbix Template "{}" would be created.'.format(name)
ret["comment"] = f'Zabbix Template "{name}" would be created.'
ret["changes"] = {
name: {
"old": 'Zabbix Template "{}" does not exist.'.format(name),
"old": f'Zabbix Template "{name}" does not exist.',
"new": (
'Zabbix Template "{}" would be created '
"according definition.".format(name)
@ -895,10 +895,10 @@ def present(name, params, static_host_list=True, **kwargs):
}
}
else:
ret["comment"] = 'Zabbix Template "{}" created.'.format(name)
ret["comment"] = f'Zabbix Template "{name}" created.'
ret["changes"] = {
name: {
"old": 'Zabbix Template "{}" did not exist.'.format(name),
"old": f'Zabbix Template "{name}" did not exist.',
"new": (
'Zabbix Template "{}" created according definition.'.format(
name
@ -909,10 +909,10 @@ def present(name, params, static_host_list=True, **kwargs):
else:
ret["result"] = True
if dry_run:
ret["comment"] = 'Zabbix Template "{}" would be updated.'.format(name)
ret["comment"] = f'Zabbix Template "{name}" would be updated.'
ret["changes"] = {
name: {
"old": 'Zabbix Template "{}" differs.'.format(name),
"old": f'Zabbix Template "{name}" differs.',
"new": (
'Zabbix Template "{}" would be updated '
"according definition.".format(name)
@ -920,10 +920,10 @@ def present(name, params, static_host_list=True, **kwargs):
}
}
else:
ret["comment"] = 'Zabbix Template "{}" updated.'.format(name)
ret["comment"] = f'Zabbix Template "{name}" updated.'
ret["changes"] = {
name: {
"old": 'Zabbix Template "{}" differed.'.format(name),
"old": f'Zabbix Template "{name}" differed.',
"new": (
'Zabbix Template "{}" updated according definition.'.format(
name
@ -962,15 +962,15 @@ def absent(name, **kwargs):
if not object_id:
ret["result"] = True
ret["comment"] = 'Zabbix Template "{}" does not exist.'.format(name)
ret["comment"] = f'Zabbix Template "{name}" does not exist.'
else:
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Template "{}" would be deleted.'.format(name)
ret["comment"] = f'Zabbix Template "{name}" would be deleted.'
ret["changes"] = {
name: {
"old": 'Zabbix Template "{}" exists.'.format(name),
"new": 'Zabbix Template "{}" would be deleted.'.format(name),
"old": f'Zabbix Template "{name}" exists.',
"new": f'Zabbix Template "{name}" would be deleted.',
}
}
else:
@ -979,11 +979,11 @@ def absent(name, **kwargs):
)
if tmpl_delete:
ret["result"] = True
ret["comment"] = 'Zabbix Template "{}" deleted.'.format(name)
ret["comment"] = f'Zabbix Template "{name}" deleted.'
ret["changes"] = {
name: {
"old": 'Zabbix Template "{}" existed.'.format(name),
"new": 'Zabbix Template "{}" deleted.'.format(name),
"old": f'Zabbix Template "{name}" existed.',
"new": f'Zabbix Template "{name}" deleted.',
}
}

View file

@ -95,7 +95,7 @@ def present(name, params, **kwargs):
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" would be fixed.'.format(name)
ret["comment"] = f'Zabbix Value map "{name}" would be fixed.'
ret["changes"] = {
name: {
"old": (
@ -119,14 +119,14 @@ def present(name, params, **kwargs):
)
if valuemap_update:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" updated.'.format(name)
ret["comment"] = f'Zabbix Value map "{name}" updated.'
ret["changes"] = {
name: {
"old": (
'Zabbix Value map "{}" differed '
"in following parameters: {}".format(name, diff_params)
),
"new": 'Zabbix Value map "{}" fixed.'.format(name),
"new": f'Zabbix Value map "{name}" fixed.',
}
}
@ -141,10 +141,10 @@ def present(name, params, **kwargs):
else:
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" would be created.'.format(name)
ret["comment"] = f'Zabbix Value map "{name}" would be created.'
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{}" does not exist.'.format(name),
"old": f'Zabbix Value map "{name}" does not exist.',
"new": (
'Zabbix Value map "{}" would be created '
"according definition.".format(name)
@ -163,10 +163,10 @@ def present(name, params, **kwargs):
if valuemap_create:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" created.'.format(name)
ret["comment"] = f'Zabbix Value map "{name}" created.'
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{}" did not exist.'.format(name),
"old": f'Zabbix Value map "{name}" did not exist.',
"new": (
'Zabbix Value map "{}" created according definition.'.format(
name
@ -205,15 +205,15 @@ def absent(name, **kwargs):
if not object_id:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" does not exist.'.format(name)
ret["comment"] = f'Zabbix Value map "{name}" does not exist.'
else:
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" would be deleted.'.format(name)
ret["comment"] = f'Zabbix Value map "{name}" would be deleted.'
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{}" exists.'.format(name),
"new": 'Zabbix Value map "{}" would be deleted.'.format(name),
"old": f'Zabbix Value map "{name}" exists.',
"new": f'Zabbix Value map "{name}" would be deleted.',
}
}
else:
@ -223,11 +223,11 @@ def absent(name, **kwargs):
if valuemap_delete:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" deleted.'.format(name)
ret["comment"] = f'Zabbix Value map "{name}" deleted.'
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{}" existed.'.format(name),
"new": 'Zabbix Value map "{}" deleted.'.format(name),
"old": f'Zabbix Value map "{name}" existed.',
"new": f'Zabbix Value map "{name}" deleted.',
}
}

View file

@ -107,7 +107,7 @@ def get_metadata(path, refresh_token_if_needed=True):
# Connections to instance meta-data must fail fast and never be proxied
result = requests.get(
"http://169.254.169.254/latest/{}".format(path),
f"http://169.254.169.254/latest/{path}",
proxies={"http": ""},
headers=headers,
timeout=AWS_METADATA_TIMEOUT,
@ -160,7 +160,7 @@ def creds(provider):
return provider["id"], provider["key"], ""
try:
result = get_metadata("meta-data/iam/security-credentials/{}".format(role))
result = get_metadata(f"meta-data/iam/security-credentials/{role}")
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError):
return provider["id"], provider["key"], ""
@ -202,7 +202,7 @@ def sig2(method, endpoint, params, provider, aws_api_version):
params_with_headers["AWSAccessKeyId"] = access_key_id
params_with_headers["SignatureVersion"] = "2"
params_with_headers["SignatureMethod"] = "HmacSHA256"
params_with_headers["Timestamp"] = "{}".format(timestamp)
params_with_headers["Timestamp"] = f"{timestamp}"
params_with_headers["Version"] = aws_api_version
keys = sorted(params_with_headers.keys())
values = list(list(map(params_with_headers.get, keys)))
@ -346,9 +346,7 @@ def sig4(
for header in sorted(new_headers.keys(), key=str.lower):
lower_header = header.lower()
a_canonical_headers.append(
"{}:{}".format(lower_header, new_headers[header].strip())
)
a_canonical_headers.append(f"{lower_header}:{new_headers[header].strip()}")
a_signed_headers.append(lower_header)
canonical_headers = "\n".join(a_canonical_headers) + "\n"
signed_headers = ";".join(a_signed_headers)
@ -390,7 +388,7 @@ def sig4(
new_headers["Authorization"] = authorization_header
requesturl = "{}?{}".format(requesturl, querystring)
requesturl = f"{requesturl}?{querystring}"
return new_headers, requesturl
@ -485,11 +483,9 @@ def query(
if endpoint is None:
if not requesturl:
endpoint = prov_dict.get(
"endpoint", "{}.{}.{}".format(product, location, service_url)
)
endpoint = prov_dict.get("endpoint", f"{product}.{location}.{service_url}")
requesturl = "https://{}/".format(endpoint)
requesturl = f"https://{endpoint}/"
else:
endpoint = urllib.parse.urlparse(requesturl).netloc
if endpoint == "":
@ -508,7 +504,7 @@ def query(
aws_api_version = prov_dict.get(
"aws_api_version",
prov_dict.get("{}_api_version".format(product), DEFAULT_AWS_API_VERSION),
prov_dict.get(f"{product}_api_version", DEFAULT_AWS_API_VERSION),
)
# Fallback to ec2's id & key if none is found, for this component

View file

@ -70,7 +70,7 @@ if HAS_MAKO:
if scheme in ("salt", "file"):
return uri
elif scheme:
raise ValueError("Unsupported URL scheme({}) in {}".format(scheme, uri))
raise ValueError(f"Unsupported URL scheme({scheme}) in {uri}")
return self.lookup.adjust_uri(uri, filename)
def get_template(self, uri, relativeto=None):

View file

@ -173,16 +173,16 @@ def write_reg_pol_data(
if not search_reg_pol(r"\[General\]\r\n", gpt_ini_data):
log.debug("LGPO_REG Util: Adding [General] section to gpt.ini")
gpt_ini_data = "[General]\r\n" + gpt_ini_data
if search_reg_pol(r"{}=".format(re.escape(gpt_extension)), gpt_ini_data):
if search_reg_pol(rf"{re.escape(gpt_extension)}=", gpt_ini_data):
# ensure the line contains the ADM guid
gpt_ext_loc = re.search(
r"^{}=.*\r\n".format(re.escape(gpt_extension)),
rf"^{re.escape(gpt_extension)}=.*\r\n",
gpt_ini_data,
re.IGNORECASE | re.MULTILINE,
)
gpt_ext_str = gpt_ini_data[gpt_ext_loc.start() : gpt_ext_loc.end()]
if not search_reg_pol(
search_string=r"{}".format(re.escape(gpt_extension_guid)),
search_string=rf"{re.escape(gpt_extension_guid)}",
policy_data=gpt_ext_str,
):
log.debug("LGPO_REG Util: Inserting gpt extension GUID")
@ -339,7 +339,7 @@ def reg_pol_to_dict(policy_data):
# REG_QWORD : 64-bit little endian
v_data = struct.unpack("<q", v_data)[0]
else:
msg = "LGPO_REG Util: Found unknown registry type: {}".format(v_type)
msg = f"LGPO_REG Util: Found unknown registry type: {v_type}"
raise CommandExecutionError(msg)
# Lookup the REG Type from the number
@ -392,9 +392,9 @@ def dict_to_reg_pol(data):
# The first three items are pretty straight forward
policy = [
# Key followed by null byte
"{}".format(key).encode("utf-16-le") + pol_section_term,
f"{key}".encode("utf-16-le") + pol_section_term,
# Value name followed by null byte
"{}".format(v_name).encode("utf-16-le") + pol_section_term,
f"{v_name}".encode("utf-16-le") + pol_section_term,
# Type in 32-bit little-endian
struct.pack("<i", v_type),
]

View file

@ -132,7 +132,7 @@ def test__load_policy_definitions():
# Remove source file
os.remove(bogus_fle)
# Remove cached file
search_string = "{}\\_bogus*.adml".format(cache_dir)
search_string = f"{cache_dir}\\_bogus*.adml"
for file_name in glob.glob(search_string):
os.remove(file_name)

View file

@ -33,7 +33,7 @@ def mock_func(func_name, return_value, test=False):
Mock any of the kubernetes state function return values and set
the test options.
"""
name = "kubernetes.{}".format(func_name)
name = f"kubernetes.{func_name}"
mocked = {name: MagicMock(return_value=return_value)}
with patch.dict(kubernetes.__salt__, mocked) as patched:
with patch.dict(kubernetes.__opts__, {"test": test}):
@ -85,7 +85,7 @@ def make_node(name="minikube"):
"labels": make_node_labels(name=name),
"name": name,
"namespace": None,
"link": "/api/v1/nodes/{name}".format(name=name),
"link": f"/api/v1/nodes/{name}",
"uid": "7811b8ae-c1a1-11e7-a55a-0800279fb61e",
},
"spec": {"external_id": name},

View file

@ -79,7 +79,7 @@ def test_boolean():
mock_en = MagicMock(return_value=[])
with patch.dict(selinux.__salt__, {"selinux.list_sebool": mock_en}):
comt = "Boolean {} is not available".format(name)
comt = f"Boolean {name} is not available"
ret.update({"comment": comt})
assert selinux.boolean(name, value) == ret

View file

@ -26,7 +26,7 @@ def _test_generated_sls_context(tmplpath, sls, **expected):
# DeNormalize tmplpath
tmplpath = str(PurePath(PurePosixPath(tmplpath)))
if tmplpath.startswith("\\"):
tmplpath = "C:{}".format(tmplpath)
tmplpath = f"C:{tmplpath}"
expected["tplpath"] = tmplpath
actual = generate_sls_context(tmplpath, sls)
assert {key: actual[key] for key in expected if key in actual} == actual