mirror of
https://github.com/saltstack/salt.git
synced 2025-04-10 06:41:40 +00:00
Update code to be Py3.7+ to reduce merge forward conflicts
This commit is contained in:
parent
3dea2eb541
commit
03ad4c6337
1424 changed files with 11463 additions and 12874 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -121,6 +121,7 @@ Session.vim
|
|||
|
||||
# Nox requirements archives
|
||||
nox.*.tar.bzip2
|
||||
nox.*.tar.gz
|
||||
nox.*.tar.xz
|
||||
|
||||
# Debian packages
|
||||
|
|
|
@ -1625,8 +1625,8 @@ repos:
|
|||
rev: v3.15.1
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
name: Drop six usage and Py2 support
|
||||
args: [--py3-plus, --keep-mock]
|
||||
name: Upgrade code to Py3.7+
|
||||
args: [--py37-plus, --keep-mock]
|
||||
exclude: >
|
||||
(?x)^(
|
||||
salt/client/ssh/ssh_py_shim.py
|
||||
|
|
|
@ -53,7 +53,7 @@ class LiterateCoding(Directive):
|
|||
comment; False designates code.
|
||||
"""
|
||||
comment_char = "#" # TODO: move this into a directive option
|
||||
comment = re.compile(r"^\s*{}[ \n]".format(comment_char))
|
||||
comment = re.compile(rf"^\s*{comment_char}[ \n]")
|
||||
section_test = lambda val: bool(comment.match(val))
|
||||
|
||||
sections = []
|
||||
|
@ -136,7 +136,7 @@ class LiterateFormula(LiterateCoding):
|
|||
formulas_dirs = config.formulas_dirs
|
||||
fpath = sls_path.replace(".", "/")
|
||||
|
||||
name_options = ("{}.sls".format(fpath), os.path.join(fpath, "init.sls"))
|
||||
name_options = (f"{fpath}.sls", os.path.join(fpath, "init.sls"))
|
||||
|
||||
paths = [
|
||||
os.path.join(fdir, fname)
|
||||
|
@ -151,7 +151,7 @@ class LiterateFormula(LiterateCoding):
|
|||
except OSError:
|
||||
pass
|
||||
|
||||
raise OSError("Could not find sls file '{}'".format(sls_path))
|
||||
raise OSError(f"Could not find sls file '{sls_path}'")
|
||||
|
||||
|
||||
class CurrentFormula(Directive):
|
||||
|
@ -196,7 +196,7 @@ class Formula(Directive):
|
|||
targetnode = nodes.target("", "", ids=["module-" + formname], ismod=True)
|
||||
self.state.document.note_explicit_target(targetnode)
|
||||
|
||||
indextext = "{}-formula)".format(formname)
|
||||
indextext = f"{formname}-formula)"
|
||||
inode = addnodes.index(
|
||||
entries=[("single", indextext, "module-" + formname, "")]
|
||||
)
|
||||
|
@ -221,9 +221,9 @@ class State(Directive):
|
|||
|
||||
formula = env.temp_data.get("salt:formula")
|
||||
|
||||
indextext = "{1} ({0}-formula)".format(formula, statename)
|
||||
indextext = f"{statename} ({formula}-formula)"
|
||||
inode = addnodes.index(
|
||||
entries=[("single", indextext, "module-{}".format(statename), "")]
|
||||
entries=[("single", indextext, f"module-{statename}", "")]
|
||||
)
|
||||
|
||||
return [targetnode, inode]
|
||||
|
|
28
noxfile.py
28
noxfile.py
|
@ -107,7 +107,7 @@ def session_warn(session, message):
|
|||
try:
|
||||
session.warn(message)
|
||||
except AttributeError:
|
||||
session.log("WARNING: {}".format(message))
|
||||
session.log(f"WARNING: {message}")
|
||||
|
||||
|
||||
def session_run_always(session, *command, **kwargs):
|
||||
|
@ -132,15 +132,15 @@ def session_run_always(session, *command, **kwargs):
|
|||
|
||||
def find_session_runner(session, name, python_version, onedir=False, **kwargs):
|
||||
if onedir:
|
||||
name += "-onedir-{}".format(ONEDIR_PYTHON_PATH)
|
||||
name += f"-onedir-{ONEDIR_PYTHON_PATH}"
|
||||
else:
|
||||
name += "-{}".format(python_version)
|
||||
name += f"-{python_version}"
|
||||
for s, _ in session._runner.manifest.list_all_sessions():
|
||||
if name not in s.signatures:
|
||||
continue
|
||||
for signature in s.signatures:
|
||||
for key, value in kwargs.items():
|
||||
param = "{}={!r}".format(key, value)
|
||||
param = f"{key}={value!r}"
|
||||
if param not in signature:
|
||||
break
|
||||
else:
|
||||
|
@ -211,7 +211,7 @@ def _get_pip_requirements_file(session, crypto=None, requirements_type="ci"):
|
|||
)
|
||||
if os.path.exists(_requirements_file):
|
||||
return _requirements_file
|
||||
session.error("Could not find a windows requirements file for {}".format(pydir))
|
||||
session.error(f"Could not find a windows requirements file for {pydir}")
|
||||
elif IS_DARWIN:
|
||||
if crypto is None:
|
||||
_requirements_file = os.path.join(
|
||||
|
@ -224,7 +224,7 @@ def _get_pip_requirements_file(session, crypto=None, requirements_type="ci"):
|
|||
)
|
||||
if os.path.exists(_requirements_file):
|
||||
return _requirements_file
|
||||
session.error("Could not find a darwin requirements file for {}".format(pydir))
|
||||
session.error(f"Could not find a darwin requirements file for {pydir}")
|
||||
elif IS_FREEBSD:
|
||||
if crypto is None:
|
||||
_requirements_file = os.path.join(
|
||||
|
@ -237,7 +237,7 @@ def _get_pip_requirements_file(session, crypto=None, requirements_type="ci"):
|
|||
)
|
||||
if os.path.exists(_requirements_file):
|
||||
return _requirements_file
|
||||
session.error("Could not find a freebsd requirements file for {}".format(pydir))
|
||||
session.error(f"Could not find a freebsd requirements file for {pydir}")
|
||||
else:
|
||||
if crypto is None:
|
||||
_requirements_file = os.path.join(
|
||||
|
@ -250,7 +250,7 @@ def _get_pip_requirements_file(session, crypto=None, requirements_type="ci"):
|
|||
)
|
||||
if os.path.exists(_requirements_file):
|
||||
return _requirements_file
|
||||
session.error("Could not find a linux requirements file for {}".format(pydir))
|
||||
session.error(f"Could not find a linux requirements file for {pydir}")
|
||||
|
||||
|
||||
def _upgrade_pip_setuptools_and_wheel(session, upgrade=True):
|
||||
|
@ -569,7 +569,7 @@ def test_parametrized(session, coverage, transport, crypto):
|
|||
session.install(*install_command, silent=PIP_INSTALL_SILENT)
|
||||
|
||||
cmd_args = [
|
||||
"--transport={}".format(transport),
|
||||
f"--transport={transport}",
|
||||
] + session.posargs
|
||||
_pytest(session, coverage=coverage, cmd_args=cmd_args)
|
||||
|
||||
|
@ -1014,7 +1014,7 @@ def _pytest(session, coverage, cmd_args, env=None, on_rerun=False):
|
|||
if arg == "--log-file" or arg.startswith("--log-file="):
|
||||
break
|
||||
else:
|
||||
args.append("--log-file={}".format(RUNTESTS_LOGFILE))
|
||||
args.append(f"--log-file={RUNTESTS_LOGFILE}")
|
||||
args.extend(cmd_args)
|
||||
|
||||
if PRINT_SYSTEM_INFO_ONLY and "--sys-info-and-exit" not in args:
|
||||
|
@ -1487,7 +1487,7 @@ def _lint(session, rcfile, flags, paths, upgrade_setuptools_and_pip=True):
|
|||
]
|
||||
session.install(*install_command, silent=PIP_INSTALL_SILENT)
|
||||
|
||||
cmd_args = ["pylint", "--rcfile={}".format(rcfile)] + list(flags) + list(paths)
|
||||
cmd_args = ["pylint", f"--rcfile={rcfile}"] + list(flags) + list(paths)
|
||||
cmd_kwargs = {"env": {"PYTHONUNBUFFERED": "1"}}
|
||||
session.run(*cmd_args, **cmd_kwargs)
|
||||
|
||||
|
@ -1528,8 +1528,8 @@ def lint(session):
|
|||
"""
|
||||
Run PyLint against Salt and it's test suite.
|
||||
"""
|
||||
session.notify("lint-salt-{}".format(session.python))
|
||||
session.notify("lint-tests-{}".format(session.python))
|
||||
session.notify(f"lint-salt-{session.python}")
|
||||
session.notify(f"lint-tests-{session.python}")
|
||||
|
||||
|
||||
@nox.session(python="3", name="lint-salt")
|
||||
|
@ -1593,7 +1593,7 @@ def docs(session, compress, update, clean):
|
|||
"""
|
||||
Build Salt's Documentation
|
||||
"""
|
||||
session.notify("docs-html-{}(compress={})".format(session.python, compress))
|
||||
session.notify(f"docs-html-{session.python}(compress={compress})")
|
||||
session.notify(
|
||||
find_session_runner(
|
||||
session,
|
||||
|
|
|
@ -22,7 +22,7 @@ class TornadoImporter:
|
|||
|
||||
def create_module(self, spec):
|
||||
if USE_VENDORED_TORNADO:
|
||||
mod = importlib.import_module("salt.ext.{}".format(spec.name))
|
||||
mod = importlib.import_module(f"salt.ext.{spec.name}")
|
||||
else: # pragma: no cover
|
||||
# Remove 'salt.ext.' from the module
|
||||
mod = importlib.import_module(spec.name[9:])
|
||||
|
|
|
@ -108,9 +108,9 @@ DFLT_LOG_FMT_LOGFILE = "%(asctime)s,%(msecs)03d [%(name)-17s:%(lineno)-4d][%(lev
|
|||
class SaltLogRecord(logging.LogRecord):
|
||||
def __init__(self, *args, **kwargs):
|
||||
logging.LogRecord.__init__(self, *args, **kwargs)
|
||||
self.bracketname = "[{:<17}]".format(str(self.name))
|
||||
self.bracketlevel = "[{:<8}]".format(str(self.levelname))
|
||||
self.bracketprocess = "[{:>5}]".format(str(self.process))
|
||||
self.bracketname = f"[{str(self.name):<17}]"
|
||||
self.bracketlevel = f"[{str(self.levelname):<8}]"
|
||||
self.bracketprocess = f"[{str(self.process):>5}]"
|
||||
|
||||
|
||||
class SaltColorLogRecord(SaltLogRecord):
|
||||
|
@ -124,11 +124,11 @@ class SaltColorLogRecord(SaltLogRecord):
|
|||
self.colorname = "{}[{:<17}]{}".format(
|
||||
LOG_COLORS["name"], str(self.name), reset
|
||||
)
|
||||
self.colorlevel = "{}[{:<8}]{}".format(clevel, str(self.levelname), reset)
|
||||
self.colorlevel = f"{clevel}[{str(self.levelname):<8}]{reset}"
|
||||
self.colorprocess = "{}[{:>5}]{}".format(
|
||||
LOG_COLORS["process"], str(self.process), reset
|
||||
)
|
||||
self.colormsg = "{}{}{}".format(cmsg, self.getMessage(), reset)
|
||||
self.colormsg = f"{cmsg}{self.getMessage()}{reset}"
|
||||
|
||||
|
||||
def get_log_record_factory():
|
||||
|
@ -726,7 +726,7 @@ def setup_logfile_handler(
|
|||
syslog_opts["address"] = str(path.resolve().parent)
|
||||
except OSError as exc:
|
||||
raise LoggingRuntimeError(
|
||||
"Failed to setup the Syslog logging handler: {}".format(exc)
|
||||
f"Failed to setup the Syslog logging handler: {exc}"
|
||||
) from exc
|
||||
elif parsed_log_path.path:
|
||||
# In case of udp or tcp with a facility specified
|
||||
|
@ -736,7 +736,7 @@ def setup_logfile_handler(
|
|||
# Logging facilities start with LOG_ if this is not the case
|
||||
# fail right now!
|
||||
raise LoggingRuntimeError(
|
||||
"The syslog facility '{}' is not known".format(facility_name)
|
||||
f"The syslog facility '{facility_name}' is not known"
|
||||
)
|
||||
else:
|
||||
# This is the case of udp or tcp without a facility specified
|
||||
|
@ -747,7 +747,7 @@ def setup_logfile_handler(
|
|||
# This python syslog version does not know about the user provided
|
||||
# facility name
|
||||
raise LoggingRuntimeError(
|
||||
"The syslog facility '{}' is not known".format(facility_name)
|
||||
f"The syslog facility '{facility_name}' is not known"
|
||||
)
|
||||
syslog_opts["facility"] = facility
|
||||
|
||||
|
@ -767,7 +767,7 @@ def setup_logfile_handler(
|
|||
handler = SysLogHandler(**syslog_opts)
|
||||
except OSError as exc:
|
||||
raise LoggingRuntimeError(
|
||||
"Failed to setup the Syslog logging handler: {}".format(exc)
|
||||
f"Failed to setup the Syslog logging handler: {exc}"
|
||||
) from exc
|
||||
else:
|
||||
# make sure, the logging directory exists and attempt to create it if necessary
|
||||
|
|
|
@ -137,7 +137,7 @@ class LoadAuth:
|
|||
mod = self.opts["eauth_acl_module"]
|
||||
if not mod:
|
||||
mod = load["eauth"]
|
||||
fstr = "{}.acl".format(mod)
|
||||
fstr = f"{mod}.acl"
|
||||
if fstr not in self.auth:
|
||||
return None
|
||||
fcall = salt.utils.args.format_call(
|
||||
|
@ -474,7 +474,7 @@ class LoadAuth:
|
|||
msg = 'Authentication failure of type "user" occurred'
|
||||
if not auth_ret: # auth_ret can be a boolean or the effective user id
|
||||
if show_username:
|
||||
msg = "{} for user {}.".format(msg, username)
|
||||
msg = f"{msg} for user {username}."
|
||||
ret["error"] = {"name": "UserAuthenticationError", "message": msg}
|
||||
return ret
|
||||
|
||||
|
@ -535,7 +535,7 @@ class Resolver:
|
|||
if not eauth:
|
||||
print("External authentication system has not been specified")
|
||||
return ret
|
||||
fstr = "{}.auth".format(eauth)
|
||||
fstr = f"{eauth}.auth"
|
||||
if fstr not in self.auth:
|
||||
print(
|
||||
'The specified external authentication system "{}" is not available'.format(
|
||||
|
@ -554,14 +554,14 @@ class Resolver:
|
|||
if arg in self.opts:
|
||||
ret[arg] = self.opts[arg]
|
||||
elif arg.startswith("pass"):
|
||||
ret[arg] = getpass.getpass("{}: ".format(arg))
|
||||
ret[arg] = getpass.getpass(f"{arg}: ")
|
||||
else:
|
||||
ret[arg] = input("{}: ".format(arg))
|
||||
ret[arg] = input(f"{arg}: ")
|
||||
for kwarg, default in list(args["kwargs"].items()):
|
||||
if kwarg in self.opts:
|
||||
ret["kwarg"] = self.opts[kwarg]
|
||||
else:
|
||||
ret[kwarg] = input("{} [{}]: ".format(kwarg, default))
|
||||
ret[kwarg] = input(f"{kwarg} [{default}]: ")
|
||||
|
||||
# Use current user if empty
|
||||
if "username" in ret and not ret["username"]:
|
||||
|
|
|
@ -111,7 +111,7 @@ def __django_auth_setup():
|
|||
django_module_name, globals(), locals(), "SaltExternalAuthModel"
|
||||
)
|
||||
# pylint: enable=possibly-unused-variable
|
||||
DJANGO_AUTH_CLASS_str = "django_auth_module.{}".format(django_model_name)
|
||||
DJANGO_AUTH_CLASS_str = f"django_auth_module.{django_model_name}"
|
||||
DJANGO_AUTH_CLASS = eval(DJANGO_AUTH_CLASS_str) # pylint: disable=W0123
|
||||
|
||||
|
||||
|
|
|
@ -54,15 +54,15 @@ def _config(key, mandatory=True, opts=None):
|
|||
"""
|
||||
try:
|
||||
if opts:
|
||||
value = opts["auth.ldap.{}".format(key)]
|
||||
value = opts[f"auth.ldap.{key}"]
|
||||
else:
|
||||
value = __opts__["auth.ldap.{}".format(key)]
|
||||
value = __opts__[f"auth.ldap.{key}"]
|
||||
except KeyError:
|
||||
try:
|
||||
value = __defopts__["auth.ldap.{}".format(key)]
|
||||
value = __defopts__[f"auth.ldap.{key}"]
|
||||
except KeyError:
|
||||
if mandatory:
|
||||
msg = "missing auth.ldap.{} in master config".format(key)
|
||||
msg = f"missing auth.ldap.{key} in master config"
|
||||
raise SaltInvocationError(msg)
|
||||
return False
|
||||
return value
|
||||
|
@ -120,13 +120,13 @@ class _LDAPConnection:
|
|||
|
||||
schema = "ldaps" if tls else "ldap"
|
||||
if self.uri == "":
|
||||
self.uri = "{}://{}:{}".format(schema, self.server, self.port)
|
||||
self.uri = f"{schema}://{self.server}:{self.port}"
|
||||
|
||||
try:
|
||||
if no_verify:
|
||||
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
|
||||
|
||||
self.ldap = ldap.initialize("{}".format(self.uri))
|
||||
self.ldap = ldap.initialize(f"{self.uri}")
|
||||
self.ldap.protocol_version = 3 # ldap.VERSION3
|
||||
self.ldap.set_option(ldap.OPT_REFERRALS, 0) # Needed for AD
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ class PamMessage(Structure):
|
|||
]
|
||||
|
||||
def __repr__(self):
|
||||
return "<PamMessage {} '{}'>".format(self.msg_style, self.msg)
|
||||
return f"<PamMessage {self.msg_style} '{self.msg}'>"
|
||||
|
||||
|
||||
class PamResponse(Structure):
|
||||
|
@ -118,7 +118,7 @@ class PamResponse(Structure):
|
|||
]
|
||||
|
||||
def __repr__(self):
|
||||
return "<PamResponse {} '{}'>".format(self.resp_retcode, self.resp)
|
||||
return f"<PamResponse {self.resp_retcode} '{self.resp}'>"
|
||||
|
||||
|
||||
CONV_FUNC = CFUNCTYPE(
|
||||
|
@ -236,8 +236,7 @@ def authenticate(username, password):
|
|||
ret = subprocess.run(
|
||||
[str(pyexe), str(pyfile)],
|
||||
env=env,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
capture_output=True,
|
||||
check=False,
|
||||
)
|
||||
if ret.returncode == 0:
|
||||
|
|
|
@ -75,7 +75,7 @@ class Beacon:
|
|||
|
||||
# Run the validate function if it's available,
|
||||
# otherwise there is a warning about it being missing
|
||||
validate_str = "{}.validate".format(beacon_name)
|
||||
validate_str = f"{beacon_name}.validate"
|
||||
if validate_str in self.beacons:
|
||||
valid, vcomment = self.beacons[validate_str](b_config[mod])
|
||||
|
||||
|
@ -96,7 +96,7 @@ class Beacon:
|
|||
continue
|
||||
|
||||
b_config[mod].append({"_beacon_name": mod})
|
||||
fun_str = "{}.beacon".format(beacon_name)
|
||||
fun_str = f"{beacon_name}.beacon"
|
||||
if fun_str in self.beacons:
|
||||
runonce = self._determine_beacon_config(
|
||||
current_beacon_config, "run_once"
|
||||
|
@ -125,7 +125,7 @@ class Beacon:
|
|||
if re.match("state.*", job["fun"]):
|
||||
is_running = True
|
||||
if is_running:
|
||||
close_str = "{}.close".format(beacon_name)
|
||||
close_str = f"{beacon_name}.close"
|
||||
if close_str in self.beacons:
|
||||
log.info("Closing beacon %s. State run in progress.", mod)
|
||||
self.beacons[close_str](b_config[mod])
|
||||
|
@ -140,7 +140,7 @@ class Beacon:
|
|||
try:
|
||||
raw = self.beacons[fun_str](b_config[mod])
|
||||
except: # pylint: disable=bare-except
|
||||
error = "{}".format(sys.exc_info()[1])
|
||||
error = f"{sys.exc_info()[1]}"
|
||||
log.error("Unable to start %s beacon, %s", mod, error)
|
||||
# send beacon error event
|
||||
tag = "salt/beacon/{}/{}/".format(self.opts["id"], mod)
|
||||
|
@ -309,7 +309,7 @@ class Beacon:
|
|||
"""
|
||||
beacon_name = next(item.get("beacon_module", name) for item in beacon_data)
|
||||
|
||||
validate_str = "{}.validate".format(beacon_name)
|
||||
validate_str = f"{beacon_name}.validate"
|
||||
# Run the validate function if it's available,
|
||||
# otherwise there is a warning about it being missing
|
||||
if validate_str in self.beacons:
|
||||
|
@ -348,9 +348,9 @@ class Beacon:
|
|||
complete = False
|
||||
else:
|
||||
if name in self.opts["beacons"]:
|
||||
comment = "Updating settings for beacon item: {}".format(name)
|
||||
comment = f"Updating settings for beacon item: {name}"
|
||||
else:
|
||||
comment = "Added new beacon item: {}".format(name)
|
||||
comment = f"Added new beacon item: {name}"
|
||||
complete = True
|
||||
self.opts["beacons"].update(data)
|
||||
|
||||
|
@ -376,12 +376,10 @@ class Beacon:
|
|||
data[name] = beacon_data
|
||||
|
||||
if name in self._get_beacons(include_opts=False):
|
||||
comment = (
|
||||
"Cannot modify beacon item {}, it is configured in pillar.".format(name)
|
||||
)
|
||||
comment = f"Cannot modify beacon item {name}, it is configured in pillar."
|
||||
complete = False
|
||||
else:
|
||||
comment = "Updating settings for beacon item: {}".format(name)
|
||||
comment = f"Updating settings for beacon item: {name}"
|
||||
complete = True
|
||||
self.opts["beacons"].update(data)
|
||||
|
||||
|
@ -403,16 +401,14 @@ class Beacon:
|
|||
"""
|
||||
|
||||
if name in self._get_beacons(include_opts=False):
|
||||
comment = (
|
||||
"Cannot delete beacon item {}, it is configured in pillar.".format(name)
|
||||
)
|
||||
comment = f"Cannot delete beacon item {name}, it is configured in pillar."
|
||||
complete = False
|
||||
else:
|
||||
if name in self.opts["beacons"]:
|
||||
del self.opts["beacons"][name]
|
||||
comment = "Deleting beacon item: {}".format(name)
|
||||
comment = f"Deleting beacon item: {name}"
|
||||
else:
|
||||
comment = "Beacon item {} not found.".format(name)
|
||||
comment = f"Beacon item {name} not found."
|
||||
complete = True
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
|
@ -466,13 +462,11 @@ class Beacon:
|
|||
"""
|
||||
|
||||
if name in self._get_beacons(include_opts=False):
|
||||
comment = (
|
||||
"Cannot enable beacon item {}, it is configured in pillar.".format(name)
|
||||
)
|
||||
comment = f"Cannot enable beacon item {name}, it is configured in pillar."
|
||||
complete = False
|
||||
else:
|
||||
self._update_enabled(name, True)
|
||||
comment = "Enabling beacon item {}".format(name)
|
||||
comment = f"Enabling beacon item {name}"
|
||||
complete = True
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
|
@ -502,7 +496,7 @@ class Beacon:
|
|||
complete = False
|
||||
else:
|
||||
self._update_enabled(name, False)
|
||||
comment = "Disabling beacon item {}".format(name)
|
||||
comment = f"Disabling beacon item {name}"
|
||||
complete = True
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
|
|
|
@ -130,7 +130,7 @@ except ImportError:
|
|||
def __virtual__():
|
||||
if os.path.isfile(BTMP):
|
||||
return __virtualname__
|
||||
err_msg = "{} does not exist.".format(BTMP)
|
||||
err_msg = f"{BTMP} does not exist."
|
||||
log.error("Unable to load %s beacon: %s", __virtualname__, err_msg)
|
||||
return False, err_msg
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ def beacon(config):
|
|||
# if our mount doesn't end with a $, insert one.
|
||||
mount_re = mount
|
||||
if not mount.endswith("$"):
|
||||
mount_re = "{}$".format(mount)
|
||||
mount_re = f"{mount}$"
|
||||
|
||||
if salt.utils.platform.is_windows():
|
||||
# mount_re comes in formatted with a $ at the end
|
||||
|
|
|
@ -68,7 +68,7 @@ def _get_notifier(config):
|
|||
Check the context for the notifier and construct it if not present
|
||||
"""
|
||||
beacon_name = config.get("_beacon_name", "inotify")
|
||||
notifier = "{}.notifier".format(beacon_name)
|
||||
notifier = f"{beacon_name}.notifier"
|
||||
if notifier not in __context__:
|
||||
__context__["inotify.queue"] = collections.deque()
|
||||
wm = pyinotify.WatchManager()
|
||||
|
@ -353,7 +353,7 @@ def beacon(config):
|
|||
def close(config):
|
||||
config = salt.utils.beacons.list_to_dict(config)
|
||||
beacon_name = config.get("_beacon_name", "inotify")
|
||||
notifier = "{}.notifier".format(beacon_name)
|
||||
notifier = f"{beacon_name}.notifier"
|
||||
if notifier in __context__:
|
||||
__context__[notifier].stop()
|
||||
del __context__[notifier]
|
||||
|
|
|
@ -298,7 +298,7 @@ def validate(config):
|
|||
" dictionary".format(fun),
|
||||
)
|
||||
if fun not in __salt__:
|
||||
return False, "Execution function {} is not availabe!".format(fun)
|
||||
return False, f"Execution function {fun} is not availabe!"
|
||||
return True, "Valid configuration for the napal beacon!"
|
||||
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ def validate(config):
|
|||
# a simple str is taking as the single function with no args / kwargs
|
||||
fun = config["salt_fun"]
|
||||
if fun not in __salt__:
|
||||
return False, "{} not in __salt__".format(fun)
|
||||
return False, f"{fun} not in __salt__"
|
||||
else:
|
||||
for entry in config["salt_fun"]:
|
||||
if isinstance(entry, dict):
|
||||
|
@ -56,7 +56,7 @@ def validate(config):
|
|||
if not isinstance(args_kwargs_dict[key], list):
|
||||
return (
|
||||
False,
|
||||
"args key for fun {} must be list".format(fun),
|
||||
f"args key for fun {fun} must be list",
|
||||
)
|
||||
elif key == "kwargs":
|
||||
if not isinstance(args_kwargs_dict[key], list):
|
||||
|
@ -70,19 +70,19 @@ def validate(config):
|
|||
if not isinstance(key_value, dict):
|
||||
return (
|
||||
False,
|
||||
"{} is not a key / value pair".format(key_value),
|
||||
f"{key_value} is not a key / value pair",
|
||||
)
|
||||
else:
|
||||
return (
|
||||
False,
|
||||
"key {} not allowed under fun {}".format(key, fun),
|
||||
f"key {key} not allowed under fun {fun}",
|
||||
)
|
||||
else:
|
||||
# entry must be function itself
|
||||
fun = entry
|
||||
|
||||
if fun not in __salt__:
|
||||
return False, "{} not in __salt__".format(fun)
|
||||
return False, f"{fun} not in __salt__"
|
||||
|
||||
return True, "valid config"
|
||||
|
||||
|
|
|
@ -23,9 +23,9 @@ def _run_proxy_processes(proxies):
|
|||
result = {}
|
||||
if not __salt__["salt_proxy.is_running"](proxy)["result"]:
|
||||
__salt__["salt_proxy.configure_proxy"](proxy, start=True)
|
||||
result[proxy] = "Proxy {} was started".format(proxy)
|
||||
result[proxy] = f"Proxy {proxy} was started"
|
||||
else:
|
||||
msg = "Proxy {} is already running".format(proxy)
|
||||
msg = f"Proxy {proxy} is already running"
|
||||
result[proxy] = msg
|
||||
log.debug(msg)
|
||||
ret.append(result)
|
||||
|
|
|
@ -73,7 +73,7 @@ def beacon(config):
|
|||
config = salt.utils.beacons.list_to_dict(config)
|
||||
|
||||
for sensor in config.get("sensors", {}):
|
||||
sensor_function = "sensehat.get_{}".format(sensor)
|
||||
sensor_function = f"sensehat.get_{sensor}"
|
||||
if sensor_function not in __salt__:
|
||||
log.error("No sensor for meassuring %s. Skipping.", sensor)
|
||||
continue
|
||||
|
@ -95,6 +95,6 @@ def beacon(config):
|
|||
|
||||
current_value = __salt__[sensor_function]()
|
||||
if not sensor_min <= current_value <= sensor_max:
|
||||
ret.append({"tag": "sensehat/{}".format(sensor), sensor: current_value})
|
||||
ret.append({"tag": f"sensehat/{sensor}", sensor: current_value})
|
||||
|
||||
return ret
|
||||
|
|
|
@ -73,7 +73,7 @@ def beacon(config):
|
|||
__context__[pkey] = {}
|
||||
for pid in track_pids:
|
||||
if pid not in __context__[pkey]:
|
||||
cmd = ["strace", "-f", "-e", "execve", "-p", "{}".format(pid)]
|
||||
cmd = ["strace", "-f", "-e", "execve", "-p", f"{pid}"]
|
||||
__context__[pkey][pid] = {}
|
||||
__context__[pkey][pid]["vt"] = salt.utils.vt.Terminal(
|
||||
cmd,
|
||||
|
|
|
@ -80,7 +80,7 @@ def beacon(config):
|
|||
for uuid in current_images:
|
||||
event = {}
|
||||
if uuid not in IMGADM_STATE["images"]:
|
||||
event["tag"] = "imported/{}".format(uuid)
|
||||
event["tag"] = f"imported/{uuid}"
|
||||
for label in current_images[uuid]:
|
||||
event[label] = current_images[uuid][label]
|
||||
|
||||
|
@ -91,7 +91,7 @@ def beacon(config):
|
|||
for uuid in IMGADM_STATE["images"]:
|
||||
event = {}
|
||||
if uuid not in current_images:
|
||||
event["tag"] = "deleted/{}".format(uuid)
|
||||
event["tag"] = f"deleted/{uuid}"
|
||||
for label in IMGADM_STATE["images"][uuid]:
|
||||
event[label] = IMGADM_STATE["images"][uuid][label]
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ def beacon(config):
|
|||
for uuid in current_vms:
|
||||
event = {}
|
||||
if uuid not in VMADM_STATE["vms"]:
|
||||
event["tag"] = "created/{}".format(uuid)
|
||||
event["tag"] = f"created/{uuid}"
|
||||
for label in current_vms[uuid]:
|
||||
if label == "state":
|
||||
continue
|
||||
|
@ -96,7 +96,7 @@ def beacon(config):
|
|||
for uuid in VMADM_STATE["vms"]:
|
||||
event = {}
|
||||
if uuid not in current_vms:
|
||||
event["tag"] = "deleted/{}".format(uuid)
|
||||
event["tag"] = f"deleted/{uuid}"
|
||||
for label in VMADM_STATE["vms"][uuid]:
|
||||
if label == "state":
|
||||
continue
|
||||
|
|
|
@ -143,7 +143,7 @@ def beacon(config):
|
|||
for func in entry:
|
||||
ret[func] = {}
|
||||
try:
|
||||
data = __salt__["status.{}".format(func)]()
|
||||
data = __salt__[f"status.{func}"]()
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.debug(
|
||||
"Status beacon attempted to process function %s "
|
||||
|
@ -166,8 +166,6 @@ def beacon(config):
|
|||
except TypeError:
|
||||
ret[func][item] = data[int(item)]
|
||||
except KeyError as exc:
|
||||
ret[func] = (
|
||||
"Status beacon is incorrectly configured: {}".format(exc)
|
||||
)
|
||||
ret[func] = f"Status beacon is incorrectly configured: {exc}"
|
||||
|
||||
return [{"tag": ctime, "data": ret}]
|
||||
|
|
|
@ -159,7 +159,7 @@ except ImportError:
|
|||
def __virtual__():
|
||||
if os.path.isfile(WTMP):
|
||||
return __virtualname__
|
||||
err_msg = "{} does not exist.".format(WTMP)
|
||||
err_msg = f"{WTMP} does not exist."
|
||||
log.error("Unable to load %s beacon: %s", __virtualname__, err_msg)
|
||||
return False, err_msg
|
||||
|
||||
|
|
16
salt/cache/__init__.py
vendored
16
salt/cache/__init__.py
vendored
|
@ -69,7 +69,7 @@ class Cache:
|
|||
|
||||
def __lazy_init(self):
|
||||
self._modules = salt.loader.cache(self.opts)
|
||||
fun = "{}.init_kwargs".format(self.driver)
|
||||
fun = f"{self.driver}.init_kwargs"
|
||||
if fun in self.modules:
|
||||
self._kwargs = self.modules[fun](self._kwargs)
|
||||
else:
|
||||
|
@ -140,7 +140,7 @@ class Cache:
|
|||
Raises an exception if cache driver detected an error accessing data
|
||||
in the cache backend (auth, permissions, etc).
|
||||
"""
|
||||
fun = "{}.store".format(self.driver)
|
||||
fun = f"{self.driver}.store"
|
||||
return self.modules[fun](bank, key, data, **self._kwargs)
|
||||
|
||||
def fetch(self, bank, key):
|
||||
|
@ -164,7 +164,7 @@ class Cache:
|
|||
Raises an exception if cache driver detected an error accessing data
|
||||
in the cache backend (auth, permissions, etc).
|
||||
"""
|
||||
fun = "{}.fetch".format(self.driver)
|
||||
fun = f"{self.driver}.fetch"
|
||||
return self.modules[fun](bank, key, **self._kwargs)
|
||||
|
||||
def updated(self, bank, key):
|
||||
|
@ -188,7 +188,7 @@ class Cache:
|
|||
Raises an exception if cache driver detected an error accessing data
|
||||
in the cache backend (auth, permissions, etc).
|
||||
"""
|
||||
fun = "{}.updated".format(self.driver)
|
||||
fun = f"{self.driver}.updated"
|
||||
return self.modules[fun](bank, key, **self._kwargs)
|
||||
|
||||
def flush(self, bank, key=None):
|
||||
|
@ -209,7 +209,7 @@ class Cache:
|
|||
Raises an exception if cache driver detected an error accessing data
|
||||
in the cache backend (auth, permissions, etc).
|
||||
"""
|
||||
fun = "{}.flush".format(self.driver)
|
||||
fun = f"{self.driver}.flush"
|
||||
return self.modules[fun](bank, key=key, **self._kwargs)
|
||||
|
||||
def list(self, bank):
|
||||
|
@ -228,7 +228,7 @@ class Cache:
|
|||
Raises an exception if cache driver detected an error accessing data
|
||||
in the cache backend (auth, permissions, etc).
|
||||
"""
|
||||
fun = "{}.list".format(self.driver)
|
||||
fun = f"{self.driver}.list"
|
||||
return self.modules[fun](bank, **self._kwargs)
|
||||
|
||||
def contains(self, bank, key=None):
|
||||
|
@ -253,7 +253,7 @@ class Cache:
|
|||
Raises an exception if cache driver detected an error accessing data
|
||||
in the cache backend (auth, permissions, etc).
|
||||
"""
|
||||
fun = "{}.contains".format(self.driver)
|
||||
fun = f"{self.driver}.contains"
|
||||
return self.modules[fun](bank, key, **self._kwargs)
|
||||
|
||||
|
||||
|
@ -288,7 +288,7 @@ class MemCache(Cache):
|
|||
break
|
||||
|
||||
def _get_storage_id(self):
|
||||
fun = "{}.storage_id".format(self.driver)
|
||||
fun = f"{self.driver}.storage_id"
|
||||
if fun in self.modules:
|
||||
return self.modules[fun](self.kwargs)
|
||||
else:
|
||||
|
|
36
salt/cache/consul.py
vendored
36
salt/cache/consul.py
vendored
|
@ -119,33 +119,29 @@ def store(bank, key, data):
|
|||
"""
|
||||
Store a key value.
|
||||
"""
|
||||
c_key = "{}/{}".format(bank, key)
|
||||
tstamp_key = "{}/{}{}".format(bank, key, _tstamp_suffix)
|
||||
c_key = f"{bank}/{key}"
|
||||
tstamp_key = f"{bank}/{key}{_tstamp_suffix}"
|
||||
|
||||
try:
|
||||
c_data = salt.payload.dumps(data)
|
||||
api.kv.put(c_key, c_data)
|
||||
api.kv.put(tstamp_key, salt.payload.dumps(int(time.time())))
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
"There was an error writing the key, {}: {}".format(c_key, exc)
|
||||
)
|
||||
raise SaltCacheError(f"There was an error writing the key, {c_key}: {exc}")
|
||||
|
||||
|
||||
def fetch(bank, key):
|
||||
"""
|
||||
Fetch a key value.
|
||||
"""
|
||||
c_key = "{}/{}".format(bank, key)
|
||||
c_key = f"{bank}/{key}"
|
||||
try:
|
||||
_, value = api.kv.get(c_key)
|
||||
if value is None:
|
||||
return {}
|
||||
return salt.payload.loads(value["Value"])
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
"There was an error reading the key, {}: {}".format(c_key, exc)
|
||||
)
|
||||
raise SaltCacheError(f"There was an error reading the key, {c_key}: {exc}")
|
||||
|
||||
|
||||
def flush(bank, key=None):
|
||||
|
@ -156,16 +152,14 @@ def flush(bank, key=None):
|
|||
c_key = bank
|
||||
tstamp_key = None
|
||||
else:
|
||||
c_key = "{}/{}".format(bank, key)
|
||||
tstamp_key = "{}/{}{}".format(bank, key, _tstamp_suffix)
|
||||
c_key = f"{bank}/{key}"
|
||||
tstamp_key = f"{bank}/{key}{_tstamp_suffix}"
|
||||
try:
|
||||
if tstamp_key:
|
||||
api.kv.delete(tstamp_key)
|
||||
return api.kv.delete(c_key, recurse=key is None)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
"There was an error removing the key, {}: {}".format(c_key, exc)
|
||||
)
|
||||
raise SaltCacheError(f"There was an error removing the key, {c_key}: {exc}")
|
||||
|
||||
|
||||
def list_(bank):
|
||||
|
@ -175,9 +169,7 @@ def list_(bank):
|
|||
try:
|
||||
_, keys = api.kv.get(bank + "/", keys=True, separator="/")
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
'There was an error getting the key "{}": {}'.format(bank, exc)
|
||||
)
|
||||
raise SaltCacheError(f'There was an error getting the key "{bank}": {exc}')
|
||||
if keys is None:
|
||||
keys = []
|
||||
else:
|
||||
|
@ -198,9 +190,7 @@ def contains(bank, key):
|
|||
c_key = "{}/{}".format(bank, key or "")
|
||||
_, value = api.kv.get(c_key, keys=True)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
"There was an error getting the key, {}: {}".format(c_key, exc)
|
||||
)
|
||||
raise SaltCacheError(f"There was an error getting the key, {c_key}: {exc}")
|
||||
return value is not None
|
||||
|
||||
|
||||
|
@ -209,13 +199,11 @@ def updated(bank, key):
|
|||
Return the Unix Epoch timestamp of when the key was last updated. Return
|
||||
None if key is not found.
|
||||
"""
|
||||
c_key = "{}/{}{}".format(bank, key, _tstamp_suffix)
|
||||
c_key = f"{bank}/{key}{_tstamp_suffix}"
|
||||
try:
|
||||
_, value = api.kv.get(c_key)
|
||||
if value is None:
|
||||
return None
|
||||
return salt.payload.loads(value["Value"])
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
"There was an error reading the key, {}: {}".format(c_key, exc)
|
||||
)
|
||||
raise SaltCacheError(f"There was an error reading the key, {c_key}: {exc}")
|
||||
|
|
38
salt/cache/etcd_cache.py
vendored
38
salt/cache/etcd_cache.py
vendored
|
@ -141,16 +141,14 @@ def store(bank, key, data):
|
|||
Store a key value.
|
||||
"""
|
||||
_init_client()
|
||||
etcd_key = "{}/{}/{}".format(path_prefix, bank, key)
|
||||
etcd_tstamp_key = "{}/{}/{}".format(path_prefix, bank, key + _tstamp_suffix)
|
||||
etcd_key = f"{path_prefix}/{bank}/{key}"
|
||||
etcd_tstamp_key = f"{path_prefix}/{bank}/{key + _tstamp_suffix}"
|
||||
try:
|
||||
value = salt.payload.dumps(data)
|
||||
client.write(etcd_key, base64.b64encode(value))
|
||||
client.write(etcd_tstamp_key, int(time.time()))
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
"There was an error writing the key, {}: {}".format(etcd_key, exc)
|
||||
)
|
||||
raise SaltCacheError(f"There was an error writing the key, {etcd_key}: {exc}")
|
||||
|
||||
|
||||
def fetch(bank, key):
|
||||
|
@ -158,16 +156,14 @@ def fetch(bank, key):
|
|||
Fetch a key value.
|
||||
"""
|
||||
_init_client()
|
||||
etcd_key = "{}/{}/{}".format(path_prefix, bank, key)
|
||||
etcd_key = f"{path_prefix}/{bank}/{key}"
|
||||
try:
|
||||
value = client.read(etcd_key).value
|
||||
return salt.payload.loads(base64.b64decode(value))
|
||||
except etcd.EtcdKeyNotFound:
|
||||
return {}
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
"There was an error reading the key, {}: {}".format(etcd_key, exc)
|
||||
)
|
||||
raise SaltCacheError(f"There was an error reading the key, {etcd_key}: {exc}")
|
||||
|
||||
|
||||
def flush(bank, key=None):
|
||||
|
@ -176,11 +172,11 @@ def flush(bank, key=None):
|
|||
"""
|
||||
_init_client()
|
||||
if key is None:
|
||||
etcd_key = "{}/{}".format(path_prefix, bank)
|
||||
etcd_key = f"{path_prefix}/{bank}"
|
||||
tstamp_key = None
|
||||
else:
|
||||
etcd_key = "{}/{}/{}".format(path_prefix, bank, key)
|
||||
tstamp_key = "{}/{}/{}".format(path_prefix, bank, key + _tstamp_suffix)
|
||||
etcd_key = f"{path_prefix}/{bank}/{key}"
|
||||
tstamp_key = f"{path_prefix}/{bank}/{key + _tstamp_suffix}"
|
||||
try:
|
||||
client.read(etcd_key)
|
||||
except etcd.EtcdKeyNotFound:
|
||||
|
@ -190,9 +186,7 @@ def flush(bank, key=None):
|
|||
client.delete(tstamp_key)
|
||||
client.delete(etcd_key, recursive=True)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
"There was an error removing the key, {}: {}".format(etcd_key, exc)
|
||||
)
|
||||
raise SaltCacheError(f"There was an error removing the key, {etcd_key}: {exc}")
|
||||
|
||||
|
||||
def _walk(r):
|
||||
|
@ -218,14 +212,14 @@ def ls(bank):
|
|||
bank.
|
||||
"""
|
||||
_init_client()
|
||||
path = "{}/{}".format(path_prefix, bank)
|
||||
path = f"{path_prefix}/{bank}"
|
||||
try:
|
||||
return _walk(client.read(path))
|
||||
except etcd.EtcdKeyNotFound:
|
||||
return []
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
'There was an error getting the key "{}": {}'.format(bank, exc)
|
||||
f'There was an error getting the key "{bank}": {exc}'
|
||||
) from exc
|
||||
|
||||
|
||||
|
@ -242,9 +236,7 @@ def contains(bank, key):
|
|||
except etcd.EtcdKeyNotFound:
|
||||
return False
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
"There was an error getting the key, {}: {}".format(etcd_key, exc)
|
||||
)
|
||||
raise SaltCacheError(f"There was an error getting the key, {etcd_key}: {exc}")
|
||||
|
||||
|
||||
def updated(bank, key):
|
||||
|
@ -252,13 +244,11 @@ def updated(bank, key):
|
|||
Return Unix Epoch based timestamp of when the bank/key was updated.
|
||||
"""
|
||||
_init_client()
|
||||
tstamp_key = "{}/{}/{}".format(path_prefix, bank, key + _tstamp_suffix)
|
||||
tstamp_key = f"{path_prefix}/{bank}/{key + _tstamp_suffix}"
|
||||
try:
|
||||
value = client.read(tstamp_key).value
|
||||
return int(value)
|
||||
except etcd.EtcdKeyNotFound:
|
||||
return None
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise SaltCacheError(
|
||||
"There was an error reading the key, {}: {}".format(tstamp_key, exc)
|
||||
)
|
||||
raise SaltCacheError(f"There was an error reading the key, {tstamp_key}: {exc}")
|
||||
|
|
24
salt/cache/localfs.py
vendored
24
salt/cache/localfs.py
vendored
|
@ -51,10 +51,10 @@ def store(bank, key, data, cachedir):
|
|||
except OSError as exc:
|
||||
if exc.errno != errno.EEXIST:
|
||||
raise SaltCacheError(
|
||||
"The cache directory, {}, could not be created: {}".format(base, exc)
|
||||
f"The cache directory, {base}, could not be created: {exc}"
|
||||
)
|
||||
|
||||
outfile = os.path.join(base, "{}.p".format(key))
|
||||
outfile = os.path.join(base, f"{key}.p")
|
||||
tmpfh, tmpfname = tempfile.mkstemp(dir=base)
|
||||
os.close(tmpfh)
|
||||
try:
|
||||
|
@ -64,7 +64,7 @@ def store(bank, key, data, cachedir):
|
|||
salt.utils.atomicfile.atomic_rename(tmpfname, outfile)
|
||||
except OSError as exc:
|
||||
raise SaltCacheError(
|
||||
"There was an error writing the cache file, {}: {}".format(base, exc)
|
||||
f"There was an error writing the cache file, {base}: {exc}"
|
||||
)
|
||||
|
||||
|
||||
|
@ -73,7 +73,7 @@ def fetch(bank, key, cachedir):
|
|||
Fetch information from a file.
|
||||
"""
|
||||
inkey = False
|
||||
key_file = os.path.join(cachedir, os.path.normpath(bank), "{}.p".format(key))
|
||||
key_file = os.path.join(cachedir, os.path.normpath(bank), f"{key}.p")
|
||||
if not os.path.isfile(key_file):
|
||||
# The bank includes the full filename, and the key is inside the file
|
||||
key_file = os.path.join(cachedir, os.path.normpath(bank) + ".p")
|
||||
|
@ -90,7 +90,7 @@ def fetch(bank, key, cachedir):
|
|||
return salt.payload.load(fh_)
|
||||
except OSError as exc:
|
||||
raise SaltCacheError(
|
||||
'There was an error reading the cache file "{}": {}'.format(key_file, exc)
|
||||
f'There was an error reading the cache file "{key_file}": {exc}'
|
||||
)
|
||||
|
||||
|
||||
|
@ -98,7 +98,7 @@ def updated(bank, key, cachedir):
|
|||
"""
|
||||
Return the epoch of the mtime for this cache file
|
||||
"""
|
||||
key_file = os.path.join(cachedir, os.path.normpath(bank), "{}.p".format(key))
|
||||
key_file = os.path.join(cachedir, os.path.normpath(bank), f"{key}.p")
|
||||
if not os.path.isfile(key_file):
|
||||
log.warning('Cache file "%s" does not exist', key_file)
|
||||
return None
|
||||
|
@ -106,7 +106,7 @@ def updated(bank, key, cachedir):
|
|||
return int(os.path.getmtime(key_file))
|
||||
except OSError as exc:
|
||||
raise SaltCacheError(
|
||||
'There was an error reading the mtime for "{}": {}'.format(key_file, exc)
|
||||
f'There was an error reading the mtime for "{key_file}": {exc}'
|
||||
)
|
||||
|
||||
|
||||
|
@ -124,12 +124,12 @@ def flush(bank, key=None, cachedir=None):
|
|||
return False
|
||||
shutil.rmtree(target)
|
||||
else:
|
||||
target = os.path.join(cachedir, os.path.normpath(bank), "{}.p".format(key))
|
||||
target = os.path.join(cachedir, os.path.normpath(bank), f"{key}.p")
|
||||
if not os.path.isfile(target):
|
||||
return False
|
||||
os.remove(target)
|
||||
except OSError as exc:
|
||||
raise SaltCacheError('There was an error removing "{}": {}'.format(target, exc))
|
||||
raise SaltCacheError(f'There was an error removing "{target}": {exc}')
|
||||
return True
|
||||
|
||||
|
||||
|
@ -143,9 +143,7 @@ def list_(bank, cachedir):
|
|||
try:
|
||||
items = os.listdir(base)
|
||||
except OSError as exc:
|
||||
raise SaltCacheError(
|
||||
'There was an error accessing directory "{}": {}'.format(base, exc)
|
||||
)
|
||||
raise SaltCacheError(f'There was an error accessing directory "{base}": {exc}')
|
||||
ret = []
|
||||
for item in items:
|
||||
if item.endswith(".p"):
|
||||
|
@ -163,5 +161,5 @@ def contains(bank, key, cachedir):
|
|||
base = os.path.join(cachedir, os.path.normpath(bank))
|
||||
return os.path.isdir(base)
|
||||
else:
|
||||
keyfile = os.path.join(cachedir, os.path.normpath(bank), "{}.p".format(key))
|
||||
keyfile = os.path.join(cachedir, os.path.normpath(bank), f"{key}.p")
|
||||
return os.path.isfile(keyfile)
|
||||
|
|
6
salt/cache/mysql_cache.py
vendored
6
salt/cache/mysql_cache.py
vendored
|
@ -144,9 +144,7 @@ def run_query(conn, query, args=None, retries=3):
|
|||
if len(query) > 150:
|
||||
query = query[:150] + "<...>"
|
||||
raise SaltCacheError(
|
||||
"Error running {}{}: {}".format(
|
||||
query, "- args: {}".format(args) if args else "", e
|
||||
)
|
||||
"Error running {}{}: {}".format(query, f"- args: {args}" if args else "", e)
|
||||
)
|
||||
|
||||
|
||||
|
@ -266,7 +264,7 @@ def store(bank, key, data):
|
|||
cur, cnt = run_query(__context__.get("mysql_client"), query, args=args)
|
||||
cur.close()
|
||||
if cnt not in (1, 2):
|
||||
raise SaltCacheError("Error storing {} {} returned {}".format(bank, key, cnt))
|
||||
raise SaltCacheError(f"Error storing {bank} {key} returned {cnt}")
|
||||
|
||||
|
||||
def fetch(bank, key):
|
||||
|
|
2
salt/cache/redis_cache.py
vendored
2
salt/cache/redis_cache.py
vendored
|
@ -351,7 +351,7 @@ def _get_banks_to_remove(redis_server, bank, path=""):
|
|||
A simple tree traversal algorithm that builds the list of banks to remove,
|
||||
starting from an arbitrary node in the tree.
|
||||
"""
|
||||
current_path = bank if not path else "{path}/{bank}".format(path=path, bank=bank)
|
||||
current_path = bank if not path else f"{path}/{bank}"
|
||||
bank_paths_to_remove = [current_path]
|
||||
# as you got here, you'll be removed
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ class AsyncReqChannel:
|
|||
auth,
|
||||
timeout=REQUEST_CHANNEL_TIMEOUT,
|
||||
tries=REQUEST_CHANNEL_TRIES,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
self.opts = dict(opts)
|
||||
self.transport = transport
|
||||
|
@ -446,7 +446,7 @@ class AsyncPubChannel:
|
|||
except Exception as exc: # pylint: disable=broad-except
|
||||
if "-|RETRY|-" not in str(exc):
|
||||
raise salt.exceptions.SaltClientError(
|
||||
"Unable to sign_in to master: {}".format(exc)
|
||||
f"Unable to sign_in to master: {exc}"
|
||||
) # TODO: better error message
|
||||
|
||||
def close(self):
|
||||
|
|
|
@ -144,9 +144,7 @@ class ReqServerChannel:
|
|||
raise salt.ext.tornado.gen.Return("bad load: id contains a null byte")
|
||||
except TypeError:
|
||||
log.error("Payload contains non-string id: %s", payload)
|
||||
raise salt.ext.tornado.gen.Return(
|
||||
"bad load: id {} is not a string".format(id_)
|
||||
)
|
||||
raise salt.ext.tornado.gen.Return(f"bad load: id {id_} is not a string")
|
||||
|
||||
version = 0
|
||||
if "version" in payload:
|
||||
|
|
|
@ -191,7 +191,7 @@ class Batch:
|
|||
if next_:
|
||||
if not self.quiet:
|
||||
salt.utils.stringutils.print_cli(
|
||||
"\nExecuting run on {}\n".format(sorted(next_))
|
||||
f"\nExecuting run on {sorted(next_)}\n"
|
||||
)
|
||||
# create a new iterator for this batch of minions
|
||||
return_value = self.opts.get("return", self.opts.get("ret", ""))
|
||||
|
|
|
@ -75,7 +75,7 @@ class BaseCaller:
|
|||
docs[name] = func.__doc__
|
||||
for name in sorted(docs):
|
||||
if name.startswith(self.opts.get("fun", "")):
|
||||
salt.utils.stringutils.print_cli("{}:\n{}\n".format(name, docs[name]))
|
||||
salt.utils.stringutils.print_cli(f"{name}:\n{docs[name]}\n")
|
||||
|
||||
def print_grains(self):
|
||||
"""
|
||||
|
@ -130,7 +130,7 @@ class BaseCaller:
|
|||
salt.minion.get_proc_dir(self.opts["cachedir"]), ret["jid"]
|
||||
)
|
||||
if fun not in self.minion.functions:
|
||||
docs = self.minion.functions["sys.doc"]("{}*".format(fun))
|
||||
docs = self.minion.functions["sys.doc"](f"{fun}*")
|
||||
if docs:
|
||||
docs[fun] = self.minion.functions.missing_fun_string(fun)
|
||||
ret["out"] = "nested"
|
||||
|
@ -194,20 +194,16 @@ class BaseCaller:
|
|||
executors = [executors]
|
||||
try:
|
||||
for name in executors:
|
||||
fname = "{}.execute".format(name)
|
||||
fname = f"{name}.execute"
|
||||
if fname not in self.minion.executors:
|
||||
raise SaltInvocationError(
|
||||
"Executor '{}' is not available".format(name)
|
||||
)
|
||||
raise SaltInvocationError(f"Executor '{name}' is not available")
|
||||
ret["return"] = self.minion.executors[fname](
|
||||
self.opts, data, func, args, kwargs
|
||||
)
|
||||
if ret["return"] is not None:
|
||||
break
|
||||
except TypeError as exc:
|
||||
sys.stderr.write(
|
||||
"\nPassed invalid arguments: {}.\n\nUsage:\n".format(exc)
|
||||
)
|
||||
sys.stderr.write(f"\nPassed invalid arguments: {exc}.\n\nUsage:\n")
|
||||
salt.utils.stringutils.print_cli(func.__doc__)
|
||||
active_level = LOG_LEVELS.get(
|
||||
self.opts["log_level"].lower(), logging.ERROR
|
||||
|
@ -272,7 +268,7 @@ class BaseCaller:
|
|||
continue
|
||||
try:
|
||||
ret["success"] = True
|
||||
self.minion.returners["{}.returner".format(returner)](ret)
|
||||
self.minion.returners[f"{returner}.returner"](ret)
|
||||
except Exception: # pylint: disable=broad-except
|
||||
pass
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
|
|||
auto_reconnect=True,
|
||||
)
|
||||
except SaltClientError as exc:
|
||||
self.exit(2, "{}\n".format(exc))
|
||||
self.exit(2, f"{exc}\n")
|
||||
return
|
||||
|
||||
if self.options.batch or self.options.static:
|
||||
|
@ -146,9 +146,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
|
|||
|
||||
if self.config["async"]:
|
||||
jid = self.local_client.cmd_async(**kwargs)
|
||||
salt.utils.stringutils.print_cli(
|
||||
"Executed command with job ID: {}".format(jid)
|
||||
)
|
||||
salt.utils.stringutils.print_cli(f"Executed command with job ID: {jid}")
|
||||
return
|
||||
|
||||
# local will be None when there was an error
|
||||
|
@ -337,16 +335,14 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
|
|||
salt.utils.stringutils.print_cli("Summary")
|
||||
salt.utils.stringutils.print_cli("-------------------------------------------")
|
||||
salt.utils.stringutils.print_cli(
|
||||
"# of minions targeted: {}".format(return_counter + not_return_counter)
|
||||
f"# of minions targeted: {return_counter + not_return_counter}"
|
||||
)
|
||||
salt.utils.stringutils.print_cli(f"# of minions returned: {return_counter}")
|
||||
salt.utils.stringutils.print_cli(
|
||||
f"# of minions that did not return: {not_return_counter}"
|
||||
)
|
||||
salt.utils.stringutils.print_cli(
|
||||
"# of minions returned: {}".format(return_counter)
|
||||
)
|
||||
salt.utils.stringutils.print_cli(
|
||||
"# of minions that did not return: {}".format(not_return_counter)
|
||||
)
|
||||
salt.utils.stringutils.print_cli(
|
||||
"# of minions with errors: {}".format(len(failed_minions))
|
||||
f"# of minions with errors: {len(failed_minions)}"
|
||||
)
|
||||
if self.options.verbose:
|
||||
if not_connected_minions:
|
||||
|
@ -449,7 +445,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
|
|||
if not ret:
|
||||
self.exit(2, "No minions found to gather docs from\n")
|
||||
if isinstance(ret, str):
|
||||
self.exit(2, "{}\n".format(ret))
|
||||
self.exit(2, f"{ret}\n")
|
||||
for host in ret:
|
||||
if isinstance(ret[host], str) and (
|
||||
ret[host].startswith("Minion did not return")
|
||||
|
@ -464,6 +460,6 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
|
|||
salt.output.display_output({fun: docs[fun]}, "nested", self.config)
|
||||
else:
|
||||
for fun in sorted(docs):
|
||||
salt.utils.stringutils.print_cli("{}:".format(fun))
|
||||
salt.utils.stringutils.print_cli(f"{fun}:")
|
||||
salt.utils.stringutils.print_cli(docs[fun])
|
||||
salt.utils.stringutils.print_cli("")
|
||||
|
|
|
@ -245,7 +245,7 @@ class LocalClient:
|
|||
# The username may contain '\' if it is in Windows
|
||||
# 'DOMAIN\username' format. Fix this for the keyfile path.
|
||||
key_user = key_user.replace("\\", "_")
|
||||
keyfile = os.path.join(self.opts["cachedir"], ".{}_key".format(key_user))
|
||||
keyfile = os.path.join(self.opts["cachedir"], f".{key_user}_key")
|
||||
try:
|
||||
# Make sure all key parent directories are accessible
|
||||
salt.utils.verify.check_path_traversal(
|
||||
|
@ -265,7 +265,7 @@ class LocalClient:
|
|||
try:
|
||||
return range_.expand(tgt)
|
||||
except seco.range.RangeException as err:
|
||||
print("Range server exception: {}".format(err))
|
||||
print(f"Range server exception: {err}")
|
||||
return []
|
||||
|
||||
def _get_timeout(self, timeout):
|
||||
|
@ -1053,11 +1053,11 @@ class LocalClient:
|
|||
:returns: all of the information for the JID
|
||||
"""
|
||||
if verbose:
|
||||
msg = "Executing job with jid {}".format(jid)
|
||||
msg = f"Executing job with jid {jid}"
|
||||
print(msg)
|
||||
print("-" * len(msg) + "\n")
|
||||
elif show_jid:
|
||||
print("jid: {}".format(jid))
|
||||
print(f"jid: {jid}")
|
||||
if timeout is None:
|
||||
timeout = self.opts["timeout"]
|
||||
fret = {}
|
||||
|
@ -1163,11 +1163,9 @@ class LocalClient:
|
|||
# iterator for this job's return
|
||||
if self.opts["order_masters"]:
|
||||
# If we are a MoM, we need to gather expected minions from downstreams masters.
|
||||
ret_iter = self.get_returns_no_block(
|
||||
"(salt/job|syndic/.*)/{}".format(jid), "regex"
|
||||
)
|
||||
ret_iter = self.get_returns_no_block(f"(salt/job|syndic/.*)/{jid}", "regex")
|
||||
else:
|
||||
ret_iter = self.get_returns_no_block("salt/job/{}".format(jid))
|
||||
ret_iter = self.get_returns_no_block(f"salt/job/{jid}")
|
||||
# iterator for the info of this job
|
||||
jinfo_iter = []
|
||||
# open event jids that need to be un-subscribed from later
|
||||
|
@ -1547,11 +1545,11 @@ class LocalClient:
|
|||
log.trace("entered - function get_cli_static_event_returns()")
|
||||
minions = set(minions)
|
||||
if verbose:
|
||||
msg = "Executing job with jid {}".format(jid)
|
||||
msg = f"Executing job with jid {jid}"
|
||||
print(msg)
|
||||
print("-" * len(msg) + "\n")
|
||||
elif show_jid:
|
||||
print("jid: {}".format(jid))
|
||||
print(f"jid: {jid}")
|
||||
|
||||
if timeout is None:
|
||||
timeout = self.opts["timeout"]
|
||||
|
@ -1581,7 +1579,7 @@ class LocalClient:
|
|||
time_left = timeout_at - int(time.time())
|
||||
# Wait 0 == forever, use a minimum of 1s
|
||||
wait = max(1, time_left)
|
||||
jid_tag = "salt/job/{}".format(jid)
|
||||
jid_tag = f"salt/job/{jid}"
|
||||
raw = self.event.get_event(
|
||||
wait, jid_tag, auto_reconnect=self.auto_reconnect
|
||||
)
|
||||
|
@ -1641,11 +1639,11 @@ class LocalClient:
|
|||
log.trace("func get_cli_event_returns()")
|
||||
|
||||
if verbose:
|
||||
msg = "Executing job with jid {}".format(jid)
|
||||
msg = f"Executing job with jid {jid}"
|
||||
print(msg)
|
||||
print("-" * len(msg) + "\n")
|
||||
elif show_jid:
|
||||
print("jid: {}".format(jid))
|
||||
print(f"jid: {jid}")
|
||||
|
||||
# lazy load the connected minions
|
||||
connected_minions = None
|
||||
|
@ -1684,7 +1682,7 @@ class LocalClient:
|
|||
if (
|
||||
self.opts["minion_data_cache"]
|
||||
and salt.cache.factory(self.opts).contains(
|
||||
"minions/{}".format(id_), "data"
|
||||
f"minions/{id_}", "data"
|
||||
)
|
||||
and connected_minions
|
||||
and id_ not in connected_minions
|
||||
|
@ -1775,9 +1773,7 @@ class LocalClient:
|
|||
"""
|
||||
if ng not in self.opts["nodegroups"]:
|
||||
conf_file = self.opts.get("conf_file", "the master config file")
|
||||
raise SaltInvocationError(
|
||||
"Node group {} unavailable in {}".format(ng, conf_file)
|
||||
)
|
||||
raise SaltInvocationError(f"Node group {ng} unavailable in {conf_file}")
|
||||
return salt.utils.minions.nodegroup_comp(ng, self.opts["nodegroups"])
|
||||
|
||||
def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs):
|
||||
|
@ -2062,8 +2058,8 @@ class LocalClient:
|
|||
|
||||
def _clean_up_subscriptions(self, job_id):
|
||||
if self.opts.get("order_masters"):
|
||||
self.event.unsubscribe("syndic/.*/{}".format(job_id), "regex")
|
||||
self.event.unsubscribe("salt/job/{}".format(job_id))
|
||||
self.event.unsubscribe(f"syndic/.*/{job_id}", "regex")
|
||||
self.event.unsubscribe(f"salt/job/{job_id}")
|
||||
|
||||
def destroy(self):
|
||||
if self.event is not None:
|
||||
|
@ -2122,7 +2118,7 @@ class FunctionWrapper(dict):
|
|||
"""
|
||||
args = list(args)
|
||||
for _key, _val in kwargs.items():
|
||||
args.append("{}={}".format(_key, _val))
|
||||
args.append(f"{_key}={_val}")
|
||||
return self.local.cmd(self.minion, key, args)
|
||||
|
||||
return func
|
||||
|
@ -2272,9 +2268,9 @@ class ProxyCaller:
|
|||
if isinstance(executors, str):
|
||||
executors = [executors]
|
||||
for name in executors:
|
||||
fname = "{}.execute".format(name)
|
||||
fname = f"{name}.execute"
|
||||
if fname not in self.sminion.executors:
|
||||
raise SaltInvocationError("Executor '{}' is not available".format(name))
|
||||
raise SaltInvocationError(f"Executor '{name}' is not available")
|
||||
return_data = self.sminion.executors[fname](
|
||||
self.opts, data, func, args, kwargs
|
||||
)
|
||||
|
|
|
@ -273,7 +273,7 @@ class SyncClientMixin(ClientStateMixin):
|
|||
return True
|
||||
|
||||
try:
|
||||
return self.opts["{}_returns".format(class_name)]
|
||||
return self.opts[f"{class_name}_returns"]
|
||||
except KeyError:
|
||||
# No such option, assume this isn't one we care about gating and
|
||||
# just return True.
|
||||
|
@ -300,7 +300,7 @@ class SyncClientMixin(ClientStateMixin):
|
|||
tag = low.get("__tag__", salt.utils.event.tagify(jid, prefix=self.tag_prefix))
|
||||
|
||||
data = {
|
||||
"fun": "{}.{}".format(self.client, fun),
|
||||
"fun": f"{self.client}.{fun}",
|
||||
"jid": jid,
|
||||
"user": low.get("__user__", "UNKNOWN"),
|
||||
}
|
||||
|
@ -523,7 +523,7 @@ class AsyncClientMixin(ClientStateMixin):
|
|||
tag,
|
||||
jid,
|
||||
daemonize=True,
|
||||
full_return=False
|
||||
full_return=False,
|
||||
):
|
||||
"""
|
||||
Run this method in a multiprocess target to execute the function
|
||||
|
|
|
@ -48,7 +48,7 @@ class NetapiClient:
|
|||
|
||||
for fun in self.netapi:
|
||||
if fun.endswith(".start"):
|
||||
name = "RunNetapi({})".format(self.netapi[fun].__module__)
|
||||
name = f"RunNetapi({self.netapi[fun].__module__})"
|
||||
log.info("Starting %s", name)
|
||||
self.process_manager.add_process(
|
||||
RunNetapi, args=(self.opts, fun), name=name
|
||||
|
|
|
@ -129,26 +129,26 @@ class Shell:
|
|||
options.append("PasswordAuthentication=no")
|
||||
if self.opts.get("_ssh_version", (0,)) > (4, 9):
|
||||
options.append("GSSAPIAuthentication=no")
|
||||
options.append("ConnectTimeout={}".format(self.timeout))
|
||||
options.append(f"ConnectTimeout={self.timeout}")
|
||||
if self.opts.get("ignore_host_keys"):
|
||||
options.append("StrictHostKeyChecking=no")
|
||||
if self.opts.get("no_host_keys"):
|
||||
options.extend(["StrictHostKeyChecking=no", "UserKnownHostsFile=/dev/null"])
|
||||
known_hosts = self.opts.get("known_hosts_file")
|
||||
if known_hosts and os.path.isfile(known_hosts):
|
||||
options.append("UserKnownHostsFile={}".format(known_hosts))
|
||||
options.append(f"UserKnownHostsFile={known_hosts}")
|
||||
if self.port:
|
||||
options.append("Port={}".format(self.port))
|
||||
options.append(f"Port={self.port}")
|
||||
if self.priv and self.priv != "agent-forwarding":
|
||||
options.append("IdentityFile={}".format(self.priv))
|
||||
options.append(f"IdentityFile={self.priv}")
|
||||
if self.user:
|
||||
options.append("User={}".format(self.user))
|
||||
options.append(f"User={self.user}")
|
||||
if self.identities_only:
|
||||
options.append("IdentitiesOnly=yes")
|
||||
|
||||
ret = []
|
||||
for option in options:
|
||||
ret.append("-o {} ".format(option))
|
||||
ret.append(f"-o {option} ")
|
||||
return "".join(ret)
|
||||
|
||||
def _passwd_opts(self):
|
||||
|
@ -164,7 +164,7 @@ class Shell:
|
|||
]
|
||||
if self.opts["_ssh_version"] > (4, 9):
|
||||
options.append("GSSAPIAuthentication=no")
|
||||
options.append("ConnectTimeout={}".format(self.timeout))
|
||||
options.append(f"ConnectTimeout={self.timeout}")
|
||||
if self.opts.get("ignore_host_keys"):
|
||||
options.append("StrictHostKeyChecking=no")
|
||||
if self.opts.get("no_host_keys"):
|
||||
|
@ -183,19 +183,19 @@ class Shell:
|
|||
]
|
||||
)
|
||||
if self.port:
|
||||
options.append("Port={}".format(self.port))
|
||||
options.append(f"Port={self.port}")
|
||||
if self.user:
|
||||
options.append("User={}".format(self.user))
|
||||
options.append(f"User={self.user}")
|
||||
if self.identities_only:
|
||||
options.append("IdentitiesOnly=yes")
|
||||
|
||||
ret = []
|
||||
for option in options:
|
||||
ret.append("-o {} ".format(option))
|
||||
ret.append(f"-o {option} ")
|
||||
return "".join(ret)
|
||||
|
||||
def _ssh_opts(self):
|
||||
return " ".join(["-o {}".format(opt) for opt in self.ssh_options])
|
||||
return " ".join([f"-o {opt}" for opt in self.ssh_options])
|
||||
|
||||
def _copy_id_str_old(self):
|
||||
"""
|
||||
|
@ -206,7 +206,7 @@ class Shell:
|
|||
# passwords containing '$'
|
||||
return "{} {} '{} -p {} {} {}@{}'".format(
|
||||
"ssh-copy-id",
|
||||
"-i {}.pub".format(self.priv),
|
||||
f"-i {self.priv}.pub",
|
||||
self._passwd_opts(),
|
||||
self.port,
|
||||
self._ssh_opts(),
|
||||
|
@ -225,7 +225,7 @@ class Shell:
|
|||
# passwords containing '$'
|
||||
return "{} {} {} -p {} {} {}@{}".format(
|
||||
"ssh-copy-id",
|
||||
"-i {}.pub".format(self.priv),
|
||||
f"-i {self.priv}.pub",
|
||||
self._passwd_opts(),
|
||||
self.port,
|
||||
self._ssh_opts(),
|
||||
|
@ -261,10 +261,7 @@ class Shell:
|
|||
if ssh != "scp" and self.remote_port_forwards:
|
||||
command.append(
|
||||
" ".join(
|
||||
[
|
||||
"-R {}".format(item)
|
||||
for item in self.remote_port_forwards.split(",")
|
||||
]
|
||||
[f"-R {item}" for item in self.remote_port_forwards.split(",")]
|
||||
)
|
||||
)
|
||||
if self.ssh_options:
|
||||
|
@ -306,7 +303,7 @@ class Shell:
|
|||
rcode = None
|
||||
cmd = self._cmd_str(cmd)
|
||||
|
||||
logmsg = "Executing non-blocking command: {}".format(cmd)
|
||||
logmsg = f"Executing non-blocking command: {cmd}"
|
||||
if self.passwd:
|
||||
logmsg = logmsg.replace(self.passwd, ("*" * 6))
|
||||
log.debug(logmsg)
|
||||
|
@ -325,7 +322,7 @@ class Shell:
|
|||
"""
|
||||
cmd = self._cmd_str(cmd)
|
||||
|
||||
logmsg = "Executing command: {}".format(cmd)
|
||||
logmsg = f"Executing command: {cmd}"
|
||||
if self.passwd:
|
||||
logmsg = logmsg.replace(self.passwd, ("*" * 6))
|
||||
if 'decode("base64")' in logmsg or "base64.b64decode(" in logmsg:
|
||||
|
@ -342,17 +339,17 @@ class Shell:
|
|||
scp a file or files to a remote system
|
||||
"""
|
||||
if makedirs:
|
||||
self.exec_cmd("mkdir -p {}".format(os.path.dirname(remote)))
|
||||
self.exec_cmd(f"mkdir -p {os.path.dirname(remote)}")
|
||||
|
||||
# scp needs [<ipv6}
|
||||
host = self.host
|
||||
if ":" in host:
|
||||
host = "[{}]".format(host)
|
||||
host = f"[{host}]"
|
||||
|
||||
cmd = "{} {}:{}".format(local, host, remote)
|
||||
cmd = f"{local} {host}:{remote}"
|
||||
cmd = self._cmd_str(cmd, ssh="scp")
|
||||
|
||||
logmsg = "Executing command: {}".format(cmd)
|
||||
logmsg = f"Executing command: {cmd}"
|
||||
if self.passwd:
|
||||
logmsg = logmsg.replace(self.passwd, ("*" * 6))
|
||||
log.debug(logmsg)
|
||||
|
@ -371,7 +368,7 @@ class Shell:
|
|||
cmd_lst = shlex.split(cmd)
|
||||
else:
|
||||
cmd_lst = shlex.split(ssh_part)
|
||||
cmd_lst.append("/bin/sh {}".format(cmd_part))
|
||||
cmd_lst.append(f"/bin/sh {cmd_part}")
|
||||
return cmd_lst
|
||||
|
||||
def _run_cmd(self, cmd, key_accept=False, passwd_retries=3):
|
||||
|
|
|
@ -30,7 +30,7 @@ class FunctionWrapper:
|
|||
cmd_prefix=None,
|
||||
aliases=None,
|
||||
minion_opts=None,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
self.cmd_prefix = cmd_prefix
|
||||
|
@ -79,14 +79,14 @@ class FunctionWrapper:
|
|||
cmd_prefix=cmd,
|
||||
aliases=self.aliases,
|
||||
minion_opts=self.minion_opts,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if self.cmd_prefix:
|
||||
# We're in an inner FunctionWrapper as created by the code block
|
||||
# above. Reconstruct the original cmd in the form 'cmd.run' and
|
||||
# then evaluate as normal
|
||||
cmd = "{}.{}".format(self.cmd_prefix, cmd)
|
||||
cmd = f"{self.cmd_prefix}.{cmd}"
|
||||
|
||||
if cmd in self.wfuncs:
|
||||
return self.wfuncs[cmd]
|
||||
|
@ -115,7 +115,7 @@ class FunctionWrapper:
|
|||
disable_wipe=True,
|
||||
fsclient=self.fsclient,
|
||||
minion_opts=self.minion_opts,
|
||||
**self.kwargs
|
||||
**self.kwargs,
|
||||
)
|
||||
stdout, stderr, retcode = single.cmd_block()
|
||||
if stderr.count("Permission Denied"):
|
||||
|
@ -149,15 +149,13 @@ class FunctionWrapper:
|
|||
# Form of salt.cmd.run in Jinja -- it's expecting a subdictionary
|
||||
# containing only 'cmd' module calls, in that case. We don't
|
||||
# support assigning directly to prefixes in this way
|
||||
raise KeyError(
|
||||
"Cannot assign to module key {} in the FunctionWrapper".format(cmd)
|
||||
)
|
||||
raise KeyError(f"Cannot assign to module key {cmd} in the FunctionWrapper")
|
||||
|
||||
if self.cmd_prefix:
|
||||
# We're in an inner FunctionWrapper as created by the first code
|
||||
# block in __getitem__. Reconstruct the original cmd in the form
|
||||
# 'cmd.run' and then evaluate as normal
|
||||
cmd = "{}.{}".format(self.cmd_prefix, cmd)
|
||||
cmd = f"{self.cmd_prefix}.{cmd}"
|
||||
|
||||
if cmd in self.wfuncs:
|
||||
self.wfuncs[cmd] = value
|
||||
|
|
|
@ -235,9 +235,9 @@ def dot_vals(value):
|
|||
"""
|
||||
ret = {}
|
||||
for key, val in __pillar__.get("master", {}).items():
|
||||
if key.startswith("{}.".format(value)):
|
||||
if key.startswith(f"{value}."):
|
||||
ret[key] = val
|
||||
for key, val in __opts__.items():
|
||||
if key.startswith("{}.".format(value)):
|
||||
if key.startswith(f"{value}."):
|
||||
ret[key] = val
|
||||
return ret
|
||||
|
|
|
@ -102,7 +102,7 @@ def _render_filenames(path, dest, saltenv, template):
|
|||
# render the path as a template using path_template_engine as the engine
|
||||
if template not in salt.utils.templates.TEMPLATE_REGISTRY:
|
||||
raise CommandExecutionError(
|
||||
"Attempted to render file paths with unavailable engine {}".format(template)
|
||||
f"Attempted to render file paths with unavailable engine {template}"
|
||||
)
|
||||
|
||||
kwargs = {}
|
||||
|
|
|
@ -60,17 +60,17 @@ def communicator(func):
|
|||
trace = traceback.format_exc()
|
||||
queue.put("KEYBOARDINT")
|
||||
queue.put("Keyboard interrupt")
|
||||
queue.put("{}\n{}\n".format(ex, trace))
|
||||
queue.put(f"{ex}\n{trace}\n")
|
||||
except Exception as ex: # pylint: disable=broad-except
|
||||
trace = traceback.format_exc()
|
||||
queue.put("ERROR")
|
||||
queue.put("Exception")
|
||||
queue.put("{}\n{}\n".format(ex, trace))
|
||||
queue.put(f"{ex}\n{trace}\n")
|
||||
except SystemExit as ex:
|
||||
trace = traceback.format_exc()
|
||||
queue.put("ERROR")
|
||||
queue.put("System exit")
|
||||
queue.put("{}\n{}\n".format(ex, trace))
|
||||
queue.put(f"{ex}\n{trace}\n")
|
||||
return ret
|
||||
|
||||
return _call
|
||||
|
@ -150,7 +150,7 @@ def enter_mainloop(
|
|||
" we bail out".format(target)
|
||||
)
|
||||
log.error(msg)
|
||||
raise SaltCloudSystemExit("Exception caught\n{}".format(msg))
|
||||
raise SaltCloudSystemExit(f"Exception caught\n{msg}")
|
||||
elif mapped_args is not None:
|
||||
iterable = [[queue, [arg], kwargs] for arg in mapped_args]
|
||||
ret = pool.map(func=target, iterable=iterable)
|
||||
|
@ -161,12 +161,12 @@ def enter_mainloop(
|
|||
if test in ["ERROR", "KEYBOARDINT"]:
|
||||
type_ = queue.get()
|
||||
trace = queue.get()
|
||||
msg = "Caught {}, terminating workers\n".format(type_)
|
||||
msg += "TRACE: {}\n".format(trace)
|
||||
msg = f"Caught {type_}, terminating workers\n"
|
||||
msg += f"TRACE: {trace}\n"
|
||||
log.error(msg)
|
||||
pool.terminate()
|
||||
pool.join()
|
||||
raise SaltCloudSystemExit("Exception caught\n{}".format(msg))
|
||||
raise SaltCloudSystemExit(f"Exception caught\n{msg}")
|
||||
elif test in ["END"] or (callback and callback(test)):
|
||||
pool.close()
|
||||
pool.join()
|
||||
|
@ -199,7 +199,7 @@ class CloudClient:
|
|||
for name, profile in pillars.pop("profiles", {}).items():
|
||||
provider = profile["provider"].split(":")[0]
|
||||
driver = next(iter(self.opts["providers"][provider].keys()))
|
||||
profile["provider"] = "{}:{}".format(provider, driver)
|
||||
profile["provider"] = f"{provider}:{driver}"
|
||||
profile["profile"] = name
|
||||
self.opts["profiles"].update({name: profile})
|
||||
self.opts["providers"][provider][driver]["profiles"].update(
|
||||
|
@ -392,7 +392,7 @@ class CloudClient:
|
|||
mapper = salt.cloud.Map(self._opts_defaults())
|
||||
providers = self.opts["providers"]
|
||||
if provider in providers:
|
||||
provider += ":{}".format(next(iter(providers[provider].keys())))
|
||||
provider += f":{next(iter(providers[provider].keys()))}"
|
||||
else:
|
||||
return False
|
||||
if isinstance(names, str):
|
||||
|
@ -433,7 +433,7 @@ class CloudClient:
|
|||
mapper = salt.cloud.Map(self._opts_defaults())
|
||||
providers = mapper.map_providers_parallel()
|
||||
if provider in providers:
|
||||
provider += ":{}".format(next(iter(providers[provider].keys())))
|
||||
provider += f":{next(iter(providers[provider].keys()))}"
|
||||
else:
|
||||
return False
|
||||
if isinstance(names, str):
|
||||
|
@ -518,7 +518,7 @@ class Cloud:
|
|||
for alias, drivers in self.opts["providers"].items():
|
||||
if len(drivers) > 1:
|
||||
for driver in drivers:
|
||||
providers.add("{}:{}".format(alias, driver))
|
||||
providers.add(f"{alias}:{driver}")
|
||||
continue
|
||||
providers.add(alias)
|
||||
return providers
|
||||
|
@ -609,7 +609,7 @@ class Cloud:
|
|||
pmap = {}
|
||||
for alias, drivers in self.opts["providers"].items():
|
||||
for driver, details in drivers.items():
|
||||
fun = "{}.{}".format(driver, query)
|
||||
fun = f"{driver}.{query}"
|
||||
if fun not in self.clouds:
|
||||
log.error("Public cloud provider %s is not available", driver)
|
||||
continue
|
||||
|
@ -659,11 +659,11 @@ class Cloud:
|
|||
# for minimum information, Otherwise still use query param.
|
||||
if (
|
||||
opts.get("selected_query_option") is None
|
||||
and "{}.list_nodes_min".format(driver) in self.clouds
|
||||
and f"{driver}.list_nodes_min" in self.clouds
|
||||
):
|
||||
this_query = "list_nodes_min"
|
||||
|
||||
fun = "{}.{}".format(driver, this_query)
|
||||
fun = f"{driver}.{this_query}"
|
||||
if fun not in self.clouds:
|
||||
log.error("Public cloud provider %s is not available", driver)
|
||||
continue
|
||||
|
@ -771,7 +771,7 @@ class Cloud:
|
|||
provider_by_driver[name][alias] = data
|
||||
|
||||
for driver, providers_data in provider_by_driver.items():
|
||||
fun = "{}.optimize_providers".format(driver)
|
||||
fun = f"{driver}.optimize_providers"
|
||||
if fun not in self.clouds:
|
||||
log.debug("The '%s' cloud driver is unable to be optimized.", driver)
|
||||
|
||||
|
@ -801,7 +801,7 @@ class Cloud:
|
|||
return data
|
||||
|
||||
for alias, driver in lookups:
|
||||
fun = "{}.avail_locations".format(driver)
|
||||
fun = f"{driver}.avail_locations"
|
||||
if fun not in self.clouds:
|
||||
# The capability to gather locations is not supported by this
|
||||
# cloud module
|
||||
|
@ -842,7 +842,7 @@ class Cloud:
|
|||
return data
|
||||
|
||||
for alias, driver in lookups:
|
||||
fun = "{}.avail_images".format(driver)
|
||||
fun = f"{driver}.avail_images"
|
||||
if fun not in self.clouds:
|
||||
# The capability to gather images is not supported by this
|
||||
# cloud module
|
||||
|
@ -882,7 +882,7 @@ class Cloud:
|
|||
return data
|
||||
|
||||
for alias, driver in lookups:
|
||||
fun = "{}.avail_sizes".format(driver)
|
||||
fun = f"{driver}.avail_sizes"
|
||||
if fun not in self.clouds:
|
||||
# The capability to gather sizes is not supported by this
|
||||
# cloud module
|
||||
|
@ -1017,7 +1017,7 @@ class Cloud:
|
|||
else:
|
||||
log.info("Destroying in non-parallel mode.")
|
||||
for alias, driver, name in vms_to_destroy:
|
||||
fun = "{}.destroy".format(driver)
|
||||
fun = f"{driver}.destroy"
|
||||
with salt.utils.context.func_globals_inject(
|
||||
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
|
||||
):
|
||||
|
@ -1050,7 +1050,7 @@ class Cloud:
|
|||
key_file = os.path.join(
|
||||
self.opts["pki_dir"], "minions", minion_dict.get("id", name)
|
||||
)
|
||||
globbed_key_file = glob.glob("{}.*".format(key_file))
|
||||
globbed_key_file = glob.glob(f"{key_file}.*")
|
||||
|
||||
if not os.path.isfile(key_file) and not globbed_key_file:
|
||||
# There's no such key file!? It might have been renamed
|
||||
|
@ -1090,25 +1090,25 @@ class Cloud:
|
|||
)
|
||||
while True:
|
||||
for idx, filename in enumerate(globbed_key_file):
|
||||
print(" {}: {}".format(idx, os.path.basename(filename)))
|
||||
print(f" {idx}: {os.path.basename(filename)}")
|
||||
selection = input("Which minion key should be deleted(number)? ")
|
||||
try:
|
||||
selection = int(selection)
|
||||
except ValueError:
|
||||
print("'{}' is not a valid selection.".format(selection))
|
||||
print(f"'{selection}' is not a valid selection.")
|
||||
|
||||
try:
|
||||
filename = os.path.basename(globbed_key_file.pop(selection))
|
||||
except Exception: # pylint: disable=broad-except
|
||||
continue
|
||||
|
||||
delete = input("Delete '{}'? [Y/n]? ".format(filename))
|
||||
delete = input(f"Delete '{filename}'? [Y/n]? ")
|
||||
if delete == "" or delete.lower().startswith("y"):
|
||||
salt.utils.cloud.remove_key(self.opts["pki_dir"], filename)
|
||||
print("Deleted '{}'".format(filename))
|
||||
print(f"Deleted '{filename}'")
|
||||
break
|
||||
|
||||
print("Did not delete '{}'".format(filename))
|
||||
print(f"Did not delete '{filename}'")
|
||||
break
|
||||
|
||||
if names and not processed:
|
||||
|
@ -1138,7 +1138,7 @@ class Cloud:
|
|||
if node in names:
|
||||
acts[prov].append(node)
|
||||
for prov, names_ in acts.items():
|
||||
fun = "{}.reboot".format(prov)
|
||||
fun = f"{prov}.reboot"
|
||||
for name in names_:
|
||||
ret.append({name: self.clouds[fun](name)})
|
||||
|
||||
|
@ -1155,7 +1155,7 @@ class Cloud:
|
|||
)
|
||||
|
||||
alias, driver = vm_["provider"].split(":")
|
||||
fun = "{}.create".format(driver)
|
||||
fun = f"{driver}.create"
|
||||
if fun not in self.clouds:
|
||||
log.error(
|
||||
"Creating '%s' using '%s' as the provider "
|
||||
|
@ -1220,7 +1220,7 @@ class Cloud:
|
|||
|
||||
try:
|
||||
alias, driver = vm_["provider"].split(":")
|
||||
func = "{}.create".format(driver)
|
||||
func = f"{driver}.create"
|
||||
with salt.utils.context.func_globals_inject(
|
||||
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
|
||||
):
|
||||
|
@ -1357,7 +1357,7 @@ class Cloud:
|
|||
handle them
|
||||
"""
|
||||
if profile not in self.opts["profiles"]:
|
||||
msg = "Profile {} is not defined".format(profile)
|
||||
msg = f"Profile {profile} is not defined"
|
||||
log.error(msg)
|
||||
return {"Error": msg}
|
||||
|
||||
|
@ -1396,7 +1396,7 @@ class Cloud:
|
|||
if name in vms:
|
||||
prov = vms[name]["provider"]
|
||||
driv = vms[name]["driver"]
|
||||
msg = "{} already exists under {}:{}".format(name, prov, driv)
|
||||
msg = f"{name} already exists under {prov}:{driv}"
|
||||
log.error(msg)
|
||||
ret[name] = {"Error": msg}
|
||||
continue
|
||||
|
@ -1542,14 +1542,12 @@ class Cloud:
|
|||
raise SaltCloudSystemExit(
|
||||
"More than one results matched '{}'. Please specify one of: {}".format(
|
||||
prov,
|
||||
", ".join(
|
||||
["{}:{}".format(alias, driver) for (alias, driver) in matches]
|
||||
),
|
||||
", ".join([f"{alias}:{driver}" for (alias, driver) in matches]),
|
||||
)
|
||||
)
|
||||
|
||||
alias, driver = matches.pop()
|
||||
fun = "{}.{}".format(driver, func)
|
||||
fun = f"{driver}.{func}"
|
||||
if fun not in self.clouds:
|
||||
raise SaltCloudSystemExit(
|
||||
"The '{}' cloud provider alias, for the '{}' driver, does "
|
||||
|
@ -1573,7 +1571,7 @@ class Cloud:
|
|||
"""
|
||||
for alias, drivers in self.opts["providers"].copy().items():
|
||||
for driver in drivers.copy():
|
||||
fun = "{}.get_configured_provider".format(driver)
|
||||
fun = f"{driver}.get_configured_provider"
|
||||
if fun not in self.clouds:
|
||||
# Mis-configured provider that got removed?
|
||||
log.warning(
|
||||
|
@ -1898,7 +1896,7 @@ class Map(Cloud):
|
|||
"The required profile, '{}', defined in the map "
|
||||
"does not exist. The defined nodes, {}, will not "
|
||||
"be created.".format(
|
||||
profile_name, ", ".join("'{}'".format(node) for node in nodes)
|
||||
profile_name, ", ".join(f"'{node}'" for node in nodes)
|
||||
)
|
||||
)
|
||||
log.error(msg)
|
||||
|
@ -1931,7 +1929,7 @@ class Map(Cloud):
|
|||
|
||||
# Update profile data with the map overrides
|
||||
for setting in ("grains", "master", "minion", "volumes", "requires"):
|
||||
deprecated = "map_{}".format(setting)
|
||||
deprecated = f"map_{setting}"
|
||||
if deprecated in overrides:
|
||||
log.warning(
|
||||
"The use of '%s' on the '%s' mapping has "
|
||||
|
|
|
@ -412,9 +412,7 @@ def get_image(vm_):
|
|||
|
||||
if vm_image and str(vm_image) in images:
|
||||
return images[vm_image]["ImageId"]
|
||||
raise SaltCloudNotFound(
|
||||
"The specified image, '{}', could not be found.".format(vm_image)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified image, '{vm_image}', could not be found.")
|
||||
|
||||
|
||||
def get_securitygroup(vm_):
|
||||
|
@ -432,7 +430,7 @@ def get_securitygroup(vm_):
|
|||
if securitygroup and str(securitygroup) in sgs:
|
||||
return sgs[securitygroup]["SecurityGroupId"]
|
||||
raise SaltCloudNotFound(
|
||||
"The specified security group, '{}', could not be found.".format(securitygroup)
|
||||
f"The specified security group, '{securitygroup}', could not be found."
|
||||
)
|
||||
|
||||
|
||||
|
@ -451,9 +449,7 @@ def get_size(vm_):
|
|||
if vm_size and str(vm_size) in sizes:
|
||||
return sizes[vm_size]["InstanceTypeId"]
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
"The specified size, '{}', could not be found.".format(vm_size)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified size, '{vm_size}', could not be found.")
|
||||
|
||||
|
||||
def __get_location(vm_):
|
||||
|
@ -471,7 +467,7 @@ def __get_location(vm_):
|
|||
if vm_location and str(vm_location) in locations:
|
||||
return locations[vm_location]["RegionId"]
|
||||
raise SaltCloudNotFound(
|
||||
"The specified location, '{}', could not be found.".format(vm_location)
|
||||
f"The specified location, '{vm_location}', could not be found."
|
||||
)
|
||||
|
||||
|
||||
|
@ -920,7 +916,7 @@ def _get_node(name):
|
|||
)
|
||||
# Just a little delay between attempts...
|
||||
time.sleep(0.5)
|
||||
raise SaltCloudNotFound("The specified instance {} not found".format(name))
|
||||
raise SaltCloudNotFound(f"The specified instance {name} not found")
|
||||
|
||||
|
||||
def show_image(kwargs, call=None):
|
||||
|
@ -982,7 +978,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -1001,7 +997,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
|
|
@ -836,7 +836,7 @@ def create_network_interface(call=None, kwargs=None):
|
|||
NetworkInterfaceIPConfiguration(
|
||||
name="{}-ip".format(kwargs["iface_name"]),
|
||||
subnet=subnet_obj,
|
||||
**ip_kwargs
|
||||
**ip_kwargs,
|
||||
)
|
||||
]
|
||||
break
|
||||
|
@ -999,7 +999,7 @@ def request_instance(vm_, kwargs=None):
|
|||
if not win_installer and ssh_publickeyfile_contents is not None:
|
||||
sshpublickey = SshPublicKey(
|
||||
key_data=ssh_publickeyfile_contents,
|
||||
path="/home/{}/.ssh/authorized_keys".format(vm_username),
|
||||
path=f"/home/{vm_username}/.ssh/authorized_keys",
|
||||
)
|
||||
sshconfiguration = SshConfiguration(
|
||||
public_keys=[sshpublickey],
|
||||
|
@ -1620,7 +1620,7 @@ def _get_cloud_environment():
|
|||
cloud_env = getattr(cloud_env_module, cloud_environment or "AZURE_PUBLIC_CLOUD")
|
||||
except (AttributeError, ImportError):
|
||||
raise SaltCloudSystemExit(
|
||||
"The azure {} cloud environment is not available.".format(cloud_environment)
|
||||
f"The azure {cloud_environment} cloud environment is not available."
|
||||
)
|
||||
|
||||
return cloud_env
|
||||
|
@ -1911,7 +1911,7 @@ def create_or_update_vmextension(
|
|||
except CloudError as exc:
|
||||
salt.utils.azurearm.log_cloud_error(
|
||||
"compute",
|
||||
"Error attempting to create the VM extension: {}".format(exc.message),
|
||||
f"Error attempting to create the VM extension: {exc.message}",
|
||||
)
|
||||
ret = {"error": exc.message}
|
||||
|
||||
|
@ -1959,9 +1959,9 @@ def stop(name, call=None):
|
|||
ret = {"error": exc.message}
|
||||
if not ret:
|
||||
salt.utils.azurearm.log_cloud_error(
|
||||
"compute", "Unable to find virtual machine with name: {}".format(name)
|
||||
"compute", f"Unable to find virtual machine with name: {name}"
|
||||
)
|
||||
ret = {"error": "Unable to find virtual machine with name: {}".format(name)}
|
||||
ret = {"error": f"Unable to find virtual machine with name: {name}"}
|
||||
else:
|
||||
try:
|
||||
instance = compconn.virtual_machines.deallocate(
|
||||
|
@ -1972,7 +1972,7 @@ def stop(name, call=None):
|
|||
ret = vm_result.as_dict()
|
||||
except CloudError as exc:
|
||||
salt.utils.azurearm.log_cloud_error(
|
||||
"compute", "Error attempting to stop {}: {}".format(name, exc.message)
|
||||
"compute", f"Error attempting to stop {name}: {exc.message}"
|
||||
)
|
||||
ret = {"error": exc.message}
|
||||
|
||||
|
@ -2022,9 +2022,9 @@ def start(name, call=None):
|
|||
ret = {"error": exc.message}
|
||||
if not ret:
|
||||
salt.utils.azurearm.log_cloud_error(
|
||||
"compute", "Unable to find virtual machine with name: {}".format(name)
|
||||
"compute", f"Unable to find virtual machine with name: {name}"
|
||||
)
|
||||
ret = {"error": "Unable to find virtual machine with name: {}".format(name)}
|
||||
ret = {"error": f"Unable to find virtual machine with name: {name}"}
|
||||
else:
|
||||
try:
|
||||
instance = compconn.virtual_machines.start(
|
||||
|
@ -2036,7 +2036,7 @@ def start(name, call=None):
|
|||
except CloudError as exc:
|
||||
salt.utils.azurearm.log_cloud_error(
|
||||
"compute",
|
||||
"Error attempting to start {}: {}".format(name, exc.message),
|
||||
f"Error attempting to start {name}: {exc.message}",
|
||||
)
|
||||
ret = {"error": exc.message}
|
||||
|
||||
|
|
|
@ -421,7 +421,7 @@ def create(vm_):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"waiting for ssh",
|
||||
"salt/cloud/{}/waiting_for_ssh".format(name),
|
||||
f"salt/cloud/{name}/waiting_for_ssh",
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
args={"ip_address": vm_["ssh_host"]},
|
||||
transport=__opts__["transport"],
|
||||
|
|
|
@ -474,7 +474,7 @@ def destroy(name, conn=None, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
args={"name": name},
|
||||
)
|
||||
|
@ -499,7 +499,7 @@ def destroy(name, conn=None, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"detaching volume",
|
||||
"salt/cloud/{}/detaching".format(volume.name),
|
||||
f"salt/cloud/{volume.name}/detaching",
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
args={"name": volume.name},
|
||||
)
|
||||
|
@ -510,7 +510,7 @@ def destroy(name, conn=None, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"detached volume",
|
||||
"salt/cloud/{}/detached".format(volume.name),
|
||||
f"salt/cloud/{volume.name}/detached",
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
args={"name": volume.name},
|
||||
)
|
||||
|
@ -519,7 +519,7 @@ def destroy(name, conn=None, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying volume",
|
||||
"salt/cloud/{}/destroying".format(volume.name),
|
||||
f"salt/cloud/{volume.name}/destroying",
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
args={"name": volume.name},
|
||||
)
|
||||
|
@ -530,7 +530,7 @@ def destroy(name, conn=None, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed volume",
|
||||
"salt/cloud/{}/destroyed".format(volume.name),
|
||||
f"salt/cloud/{volume.name}/destroyed",
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
args={"name": volume.name},
|
||||
)
|
||||
|
@ -545,7 +545,7 @@ def destroy(name, conn=None, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
args={"name": name},
|
||||
)
|
||||
|
|
|
@ -223,9 +223,7 @@ def get_image(vm_):
|
|||
if images[image]["slug"] is not None:
|
||||
return images[image]["slug"]
|
||||
return int(images[image]["id"])
|
||||
raise SaltCloudNotFound(
|
||||
"The specified image, '{}', could not be found.".format(vm_image)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified image, '{vm_image}', could not be found.")
|
||||
|
||||
|
||||
def get_size(vm_):
|
||||
|
@ -239,9 +237,7 @@ def get_size(vm_):
|
|||
for size in sizes:
|
||||
if vm_size.lower() == sizes[size]["slug"]:
|
||||
return sizes[size]["slug"]
|
||||
raise SaltCloudNotFound(
|
||||
"The specified size, '{}', could not be found.".format(vm_size)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified size, '{vm_size}', could not be found.")
|
||||
|
||||
|
||||
def get_location(vm_):
|
||||
|
@ -257,7 +253,7 @@ def get_location(vm_):
|
|||
if vm_location in (locations[location]["name"], locations[location]["slug"]):
|
||||
return locations[location]["slug"]
|
||||
raise SaltCloudNotFound(
|
||||
"The specified location, '{}', could not be found.".format(vm_location)
|
||||
f"The specified location, '{vm_location}', could not be found."
|
||||
)
|
||||
|
||||
|
||||
|
@ -333,7 +329,7 @@ def create(vm_):
|
|||
|
||||
if key_filename is not None and not os.path.isfile(key_filename):
|
||||
raise SaltCloudConfigError(
|
||||
"The defined key_filename '{}' does not exist".format(key_filename)
|
||||
f"The defined key_filename '{key_filename}' does not exist"
|
||||
)
|
||||
|
||||
if not __opts__.get("ssh_agent", False) and key_filename is None:
|
||||
|
@ -616,10 +612,10 @@ def query(
|
|||
)
|
||||
)
|
||||
|
||||
path = "{}/{}/".format(base_path, method)
|
||||
path = f"{base_path}/{method}/"
|
||||
|
||||
if droplet_id:
|
||||
path += "{}/".format(droplet_id)
|
||||
path += f"{droplet_id}/"
|
||||
|
||||
if command:
|
||||
path += command
|
||||
|
@ -875,7 +871,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -912,7 +908,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -987,7 +983,7 @@ def destroy_dns_records(fqdn):
|
|||
ret = query(
|
||||
method="domains",
|
||||
droplet_id=domain,
|
||||
command="records/{}".format(id_),
|
||||
command=f"records/{id_}",
|
||||
http_method="delete",
|
||||
)
|
||||
except SaltCloudSystemExit:
|
||||
|
|
|
@ -296,11 +296,9 @@ def query(
|
|||
location = get_location()
|
||||
|
||||
if not requesturl:
|
||||
endpoint = provider.get(
|
||||
"endpoint", "ec2.{}.{}".format(location, service_url)
|
||||
)
|
||||
endpoint = provider.get("endpoint", f"ec2.{location}.{service_url}")
|
||||
|
||||
requesturl = "https://{}/".format(endpoint)
|
||||
requesturl = f"https://{endpoint}/"
|
||||
endpoint = urllib.parse.urlparse(requesturl).netloc
|
||||
endpoint_path = urllib.parse.urlparse(requesturl).path
|
||||
else:
|
||||
|
@ -1480,7 +1478,7 @@ def _create_eni_if_necessary(interface, vm_):
|
|||
|
||||
eni_desc = result[1]
|
||||
if not eni_desc or not eni_desc.get("networkInterfaceId"):
|
||||
raise SaltCloudException("Failed to create interface: {}".format(result))
|
||||
raise SaltCloudException(f"Failed to create interface: {result}")
|
||||
|
||||
eni_id = eni_desc.get("networkInterfaceId")
|
||||
log.debug("Created network interface %s inst %s", eni_id, interface["DeviceIndex"])
|
||||
|
@ -1751,11 +1749,11 @@ def _param_from_config(key, data):
|
|||
|
||||
if isinstance(data, dict):
|
||||
for k, v in data.items():
|
||||
param.update(_param_from_config("{}.{}".format(key, k), v))
|
||||
param.update(_param_from_config(f"{key}.{k}", v))
|
||||
|
||||
elif isinstance(data, list) or isinstance(data, tuple):
|
||||
for idx, conf_item in enumerate(data):
|
||||
prefix = "{}.{}".format(key, idx)
|
||||
prefix = f"{key}.{idx}"
|
||||
param.update(_param_from_config(prefix, conf_item))
|
||||
|
||||
else:
|
||||
|
@ -1870,7 +1868,7 @@ def request_instance(vm_=None, call=None):
|
|||
params[spot_prefix + "SecurityGroup.1"] = ex_securitygroup
|
||||
else:
|
||||
for counter, sg_ in enumerate(ex_securitygroup):
|
||||
params[spot_prefix + "SecurityGroup.{}".format(counter)] = sg_
|
||||
params[spot_prefix + f"SecurityGroup.{counter}"] = sg_
|
||||
|
||||
ex_iam_profile = iam_profile(vm_)
|
||||
if ex_iam_profile:
|
||||
|
@ -1905,7 +1903,7 @@ def request_instance(vm_=None, call=None):
|
|||
params[spot_prefix + "SecurityGroupId.1"] = ex_securitygroupid
|
||||
else:
|
||||
for counter, sg_ in enumerate(ex_securitygroupid):
|
||||
params[spot_prefix + "SecurityGroupId.{}".format(counter)] = sg_
|
||||
params[spot_prefix + f"SecurityGroupId.{counter}"] = sg_
|
||||
|
||||
placementgroup_ = get_placementgroup(vm_)
|
||||
if placementgroup_ is not None:
|
||||
|
@ -2044,9 +2042,9 @@ def request_instance(vm_=None, call=None):
|
|||
else:
|
||||
dev_index = len(dev_list)
|
||||
# Add the device name in since it wasn't already there
|
||||
params[
|
||||
"{}BlockDeviceMapping.{}.DeviceName".format(spot_prefix, dev_index)
|
||||
] = rd_name
|
||||
params[f"{spot_prefix}BlockDeviceMapping.{dev_index}.DeviceName"] = (
|
||||
rd_name
|
||||
)
|
||||
|
||||
# Set the termination value
|
||||
termination_key = "{}BlockDeviceMapping.{}.Ebs.DeleteOnTermination".format(
|
||||
|
@ -2509,7 +2507,7 @@ def wait_for_instance(
|
|||
for line in comps[0].splitlines():
|
||||
if not line:
|
||||
continue
|
||||
keys += "\n{} {}".format(ip_address, line)
|
||||
keys += f"\n{ip_address} {line}"
|
||||
|
||||
with salt.utils.files.fopen(known_hosts_file, "a") as fp_:
|
||||
fp_.write(salt.utils.stringutils.to_str(keys))
|
||||
|
@ -2563,7 +2561,7 @@ def _validate_key_path_and_mode(key_filename):
|
|||
|
||||
if not os.path.exists(key_filename):
|
||||
raise SaltCloudSystemExit(
|
||||
"The EC2 key file '{}' does not exist.\n".format(key_filename)
|
||||
f"The EC2 key file '{key_filename}' does not exist.\n"
|
||||
)
|
||||
|
||||
key_mode = stat.S_IMODE(os.stat(key_filename).st_mode)
|
||||
|
@ -2752,7 +2750,7 @@ def create(vm_=None, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"setting tags",
|
||||
"salt/cloud/spot_request_{}/tagging".format(sir_id),
|
||||
f"salt/cloud/spot_request_{sir_id}/tagging",
|
||||
args={"tags": spot_request_tags},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -2924,7 +2922,7 @@ def create(vm_=None, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"setting tags",
|
||||
"salt/cloud/block_volume_{}/tagging".format(str(volid)),
|
||||
f"salt/cloud/block_volume_{str(volid)}/tagging",
|
||||
args={"tags": tags},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -3054,7 +3052,7 @@ def stop(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"stopping instance",
|
||||
"salt/cloud/{}/stopping".format(name),
|
||||
f"salt/cloud/{name}/stopping",
|
||||
args={"name": name, "instance_id": instance_id},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -3088,7 +3086,7 @@ def start(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"starting instance",
|
||||
"salt/cloud/{}/starting".format(name),
|
||||
f"salt/cloud/{name}/starting",
|
||||
args={"name": name, "instance_id": instance_id},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -3163,8 +3161,8 @@ def set_tags(
|
|||
tags = kwargs
|
||||
|
||||
for idx, (tag_k, tag_v) in enumerate(tags.items()):
|
||||
params["Tag.{}.Key".format(idx)] = tag_k
|
||||
params["Tag.{}.Value".format(idx)] = tag_v
|
||||
params[f"Tag.{idx}.Key"] = tag_k
|
||||
params[f"Tag.{idx}.Value"] = tag_v
|
||||
|
||||
attempts = 0
|
||||
while attempts < aws.AWS_MAX_RETRIES:
|
||||
|
@ -3210,7 +3208,7 @@ def set_tags(
|
|||
|
||||
return settags
|
||||
|
||||
raise SaltCloudSystemExit("Failed to set tags on {}!".format(name))
|
||||
raise SaltCloudSystemExit(f"Failed to set tags on {name}!")
|
||||
|
||||
|
||||
def get_tags(
|
||||
|
@ -3292,7 +3290,7 @@ def del_tags(
|
|||
params = {"Action": "DeleteTags", "ResourceId.1": instance_id}
|
||||
|
||||
for idx, tag in enumerate(kwargs["tags"].split(",")):
|
||||
params["Tag.{}.Key".format(idx)] = tag
|
||||
params[f"Tag.{idx}.Key"] = tag
|
||||
|
||||
aws.query(
|
||||
params,
|
||||
|
@ -3356,7 +3354,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name, "instance_id": instance_id},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -3377,7 +3375,7 @@ def destroy(name, call=None):
|
|||
"rename_on_destroy", get_configured_provider(), __opts__, search_global=False
|
||||
)
|
||||
if rename_on_destroy is not False:
|
||||
newname = "{}-DEL{}".format(name, uuid.uuid4().hex)
|
||||
newname = f"{name}-DEL{uuid.uuid4().hex}"
|
||||
rename(name, kwargs={"newname": newname}, call="action")
|
||||
log.info(
|
||||
"Machine will be identified as %s until it has been cleaned up.", newname
|
||||
|
@ -3410,7 +3408,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name, "instance_id": instance_id},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -4056,8 +4054,8 @@ def _toggle_delvol(
|
|||
if volume_id is not None and volume_id != item["ebs"]["volumeId"]:
|
||||
continue
|
||||
|
||||
params["BlockDeviceMapping.{}.DeviceName".format(idx)] = device_name
|
||||
params["BlockDeviceMapping.{}.Ebs.DeleteOnTermination".format(idx)] = value
|
||||
params[f"BlockDeviceMapping.{idx}.DeviceName"] = device_name
|
||||
params[f"BlockDeviceMapping.{idx}.Ebs.DeleteOnTermination"] = value
|
||||
|
||||
aws.query(
|
||||
params,
|
||||
|
@ -4477,7 +4475,7 @@ def describe_volumes(kwargs=None, call=None):
|
|||
if "volume_id" in kwargs:
|
||||
volume_id = kwargs["volume_id"].split(",")
|
||||
for volume_index, volume_id in enumerate(volume_id):
|
||||
params["VolumeId.{}".format(volume_index)] = volume_id
|
||||
params[f"VolumeId.{volume_index}"] = volume_id
|
||||
|
||||
log.debug(params)
|
||||
|
||||
|
@ -4796,17 +4794,17 @@ def describe_snapshots(kwargs=None, call=None):
|
|||
if "snapshot_id" in kwargs:
|
||||
snapshot_ids = kwargs["snapshot_id"].split(",")
|
||||
for snapshot_index, snapshot_id in enumerate(snapshot_ids):
|
||||
params["SnapshotId.{}".format(snapshot_index)] = snapshot_id
|
||||
params[f"SnapshotId.{snapshot_index}"] = snapshot_id
|
||||
|
||||
if "owner" in kwargs:
|
||||
owners = kwargs["owner"].split(",")
|
||||
for owner_index, owner in enumerate(owners):
|
||||
params["Owner.{}".format(owner_index)] = owner
|
||||
params[f"Owner.{owner_index}"] = owner
|
||||
|
||||
if "restorable_by" in kwargs:
|
||||
restorable_bys = kwargs["restorable_by"].split(",")
|
||||
for restorable_by_index, restorable_by in enumerate(restorable_bys):
|
||||
params["RestorableBy.{}".format(restorable_by_index)] = restorable_by
|
||||
params[f"RestorableBy.{restorable_by_index}"] = restorable_by
|
||||
|
||||
log.debug(params)
|
||||
|
||||
|
@ -5013,11 +5011,11 @@ def _parse_pricing(url, name):
|
|||
"storageGiB",
|
||||
"USD",
|
||||
):
|
||||
price_js = price_js.replace(keyword, '"{}"'.format(keyword))
|
||||
price_js = price_js.replace(keyword, f'"{keyword}"')
|
||||
|
||||
for keyword in ("region", "price", "size"):
|
||||
price_js = price_js.replace(keyword, '"{}"'.format(keyword))
|
||||
price_js = price_js.replace('"{}"s'.format(keyword), '"{}s"'.format(keyword))
|
||||
price_js = price_js.replace(keyword, f'"{keyword}"')
|
||||
price_js = price_js.replace(f'"{keyword}"s', f'"{keyword}s"')
|
||||
|
||||
price_js = price_js.replace('""', '"')
|
||||
|
||||
|
@ -5031,7 +5029,7 @@ def _parse_pricing(url, name):
|
|||
sizes[size["size"]] = size
|
||||
regions[region["region"]] = sizes
|
||||
|
||||
outfile = os.path.join(__opts__["cachedir"], "ec2-pricing-{}.p".format(name))
|
||||
outfile = os.path.join(__opts__["cachedir"], f"ec2-pricing-{name}.p")
|
||||
with salt.utils.files.fopen(outfile, "w") as fho:
|
||||
salt.utils.msgpack.dump(regions, fho)
|
||||
|
||||
|
@ -5093,7 +5091,7 @@ def show_pricing(kwargs=None, call=None):
|
|||
else:
|
||||
name = "linux"
|
||||
|
||||
pricefile = os.path.join(__opts__["cachedir"], "ec2-pricing-{}.p".format(name))
|
||||
pricefile = os.path.join(__opts__["cachedir"], f"ec2-pricing-{name}.p")
|
||||
|
||||
if not os.path.isfile(pricefile):
|
||||
update_pricing({"type": name}, "function")
|
||||
|
|
|
@ -186,7 +186,7 @@ def get_conn():
|
|||
"service_account_private_key", provider, __opts__
|
||||
)
|
||||
gce = driver(email, private_key, project=project)
|
||||
gce.connection.user_agent_append("{}/{}".format(_UA_PRODUCT, _UA_VERSION))
|
||||
gce.connection.user_agent_append(f"{_UA_PRODUCT}/{_UA_VERSION}")
|
||||
return gce
|
||||
|
||||
|
||||
|
@ -544,7 +544,7 @@ def _parse_allow(allow):
|
|||
pairs = p.split(":")
|
||||
if pairs[0].lower() not in ["tcp", "udp", "icmp"]:
|
||||
raise SaltCloudSystemExit(
|
||||
"Unsupported protocol {}. Must be tcp, udp, or icmp.".format(pairs[0])
|
||||
f"Unsupported protocol {pairs[0]}. Must be tcp, udp, or icmp."
|
||||
)
|
||||
if len(pairs) == 1 or pairs[0].lower() == "icmp":
|
||||
seen_protos[pairs[0]] = []
|
||||
|
@ -2014,7 +2014,7 @@ def reboot(vm_name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"reboot instance",
|
||||
"salt/cloud/{}/rebooting".format(vm_name),
|
||||
f"salt/cloud/{vm_name}/rebooting",
|
||||
args={"name": vm_name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -2025,7 +2025,7 @@ def reboot(vm_name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"reboot instance",
|
||||
"salt/cloud/{}/rebooted".format(vm_name),
|
||||
f"salt/cloud/{vm_name}/rebooted",
|
||||
args={"name": vm_name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -2056,7 +2056,7 @@ def start(vm_name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"start instance",
|
||||
"salt/cloud/{}/starting".format(vm_name),
|
||||
f"salt/cloud/{vm_name}/starting",
|
||||
args={"name": vm_name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -2067,7 +2067,7 @@ def start(vm_name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"start instance",
|
||||
"salt/cloud/{}/started".format(vm_name),
|
||||
f"salt/cloud/{vm_name}/started",
|
||||
args={"name": vm_name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -2096,7 +2096,7 @@ def stop(vm_name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"stop instance",
|
||||
"salt/cloud/{}/stopping".format(vm_name),
|
||||
f"salt/cloud/{vm_name}/stopping",
|
||||
args={"name": vm_name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -2107,7 +2107,7 @@ def stop(vm_name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"stop instance",
|
||||
"salt/cloud/{}/stopped".format(vm_name),
|
||||
f"salt/cloud/{vm_name}/stopped",
|
||||
args={"name": vm_name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -2145,12 +2145,12 @@ def destroy(vm_name, call=None):
|
|||
exc,
|
||||
exc_info_on_loglevel=logging.DEBUG,
|
||||
)
|
||||
raise SaltCloudSystemExit("Could not find instance {}.".format(vm_name))
|
||||
raise SaltCloudSystemExit(f"Could not find instance {vm_name}.")
|
||||
|
||||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"delete instance",
|
||||
"salt/cloud/{}/deleting".format(vm_name),
|
||||
f"salt/cloud/{vm_name}/deleting",
|
||||
args={"name": vm_name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -2186,11 +2186,11 @@ def destroy(vm_name, call=None):
|
|||
exc,
|
||||
exc_info_on_loglevel=logging.DEBUG,
|
||||
)
|
||||
raise SaltCloudSystemExit("Could not destroy instance {}.".format(vm_name))
|
||||
raise SaltCloudSystemExit(f"Could not destroy instance {vm_name}.")
|
||||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"delete instance",
|
||||
"salt/cloud/{}/deleted".format(vm_name),
|
||||
f"salt/cloud/{vm_name}/deleted",
|
||||
args={"name": vm_name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -2279,7 +2279,7 @@ def create_attach_volumes(name, kwargs, call=None):
|
|||
letter = ord("a") - 1
|
||||
|
||||
for idx, volume in enumerate(volumes):
|
||||
volume_name = "{}-sd{}".format(name, chr(letter + 2 + idx))
|
||||
volume_name = f"{name}-sd{chr(letter + 2 + idx)}"
|
||||
|
||||
volume_dict = {
|
||||
"disk_name": volume_name,
|
||||
|
|
|
@ -417,7 +417,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -428,7 +428,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -536,7 +536,7 @@ def _query(
|
|||
path += action
|
||||
|
||||
if command:
|
||||
path += "/{}".format(command)
|
||||
path += f"/{command}"
|
||||
|
||||
log.debug("GoGrid URL: %s", path)
|
||||
|
||||
|
|
|
@ -470,22 +470,22 @@ def start(name, call=None, wait=True):
|
|||
client = _connect_client()
|
||||
server = client.servers.get_by_name(name)
|
||||
if server is None:
|
||||
return "Instance {} doesn't exist.".format(name)
|
||||
return f"Instance {name} doesn't exist."
|
||||
|
||||
server.power_on()
|
||||
if wait and not wait_until(name, "running"):
|
||||
return "Instance {} doesn't start.".format(name)
|
||||
return f"Instance {name} doesn't start."
|
||||
|
||||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"started instance",
|
||||
"salt/cloud/{}/started".format(name),
|
||||
f"salt/cloud/{name}/started",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
)
|
||||
|
||||
return {"Started": "{} was started.".format(name)}
|
||||
return {"Started": f"{name} was started."}
|
||||
|
||||
|
||||
def stop(name, call=None, wait=True):
|
||||
|
@ -504,22 +504,22 @@ def stop(name, call=None, wait=True):
|
|||
client = _connect_client()
|
||||
server = client.servers.get_by_name(name)
|
||||
if server is None:
|
||||
return "Instance {} doesn't exist.".format(name)
|
||||
return f"Instance {name} doesn't exist."
|
||||
|
||||
server.power_off()
|
||||
if wait and not wait_until(name, "off"):
|
||||
return "Instance {} doesn't stop.".format(name)
|
||||
return f"Instance {name} doesn't stop."
|
||||
|
||||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"stopped instance",
|
||||
"salt/cloud/{}/stopped".format(name),
|
||||
f"salt/cloud/{name}/stopped",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
)
|
||||
|
||||
return {"Stopped": "{} was stopped.".format(name)}
|
||||
return {"Stopped": f"{name} was stopped."}
|
||||
|
||||
|
||||
def reboot(name, call=None, wait=True):
|
||||
|
@ -540,14 +540,14 @@ def reboot(name, call=None, wait=True):
|
|||
client = _connect_client()
|
||||
server = client.servers.get_by_name(name)
|
||||
if server is None:
|
||||
return "Instance {} doesn't exist.".format(name)
|
||||
return f"Instance {name} doesn't exist."
|
||||
|
||||
server.reboot()
|
||||
|
||||
if wait and not wait_until(name, "running"):
|
||||
return "Instance {} doesn't start.".format(name)
|
||||
return f"Instance {name} doesn't start."
|
||||
|
||||
return {"Rebooted": "{} was rebooted.".format(name)}
|
||||
return {"Rebooted": f"{name} was rebooted."}
|
||||
|
||||
|
||||
def destroy(name, call=None):
|
||||
|
@ -568,12 +568,12 @@ def destroy(name, call=None):
|
|||
client = _connect_client()
|
||||
server = client.servers.get_by_name(name)
|
||||
if server is None:
|
||||
return "Instance {} doesn't exist.".format(name)
|
||||
return f"Instance {name} doesn't exist."
|
||||
|
||||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -583,14 +583,14 @@ def destroy(name, call=None):
|
|||
if node["state"] == "running":
|
||||
stop(name, call="action", wait=False)
|
||||
if not wait_until(name, "off"):
|
||||
return {"Error": "Unable to destroy {}, command timed out".format(name)}
|
||||
return {"Error": f"Unable to destroy {name}, command timed out"}
|
||||
|
||||
server.delete()
|
||||
|
||||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -603,7 +603,7 @@ def destroy(name, call=None):
|
|||
__opts__,
|
||||
)
|
||||
|
||||
return {"Destroyed": "{} was destroyed.".format(name)}
|
||||
return {"Destroyed": f"{name} was destroyed."}
|
||||
|
||||
|
||||
def resize(name, kwargs, call=None):
|
||||
|
@ -624,7 +624,7 @@ def resize(name, kwargs, call=None):
|
|||
client = _connect_client()
|
||||
server = client.servers.get_by_name(name)
|
||||
if server is None:
|
||||
return "Instance {} doesn't exist.".format(name)
|
||||
return f"Instance {name} doesn't exist."
|
||||
|
||||
# Check the configuration
|
||||
size = kwargs.get("size", None)
|
||||
|
@ -638,7 +638,7 @@ def resize(name, kwargs, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"resizing instance",
|
||||
"salt/cloud/{}/resizing".format(name),
|
||||
f"salt/cloud/{name}/resizing",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -648,17 +648,17 @@ def resize(name, kwargs, call=None):
|
|||
if node["state"] == "running":
|
||||
stop(name, call="action", wait=False)
|
||||
if not wait_until(name, "off"):
|
||||
return {"Error": "Unable to resize {}, command timed out".format(name)}
|
||||
return {"Error": f"Unable to resize {name}, command timed out"}
|
||||
|
||||
server.change_type(server_type, kwargs.get("upgrade_disk", False))
|
||||
|
||||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"resizing instance",
|
||||
"salt/cloud/{}/resized".format(name),
|
||||
f"salt/cloud/{name}/resized",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
)
|
||||
|
||||
return {"Resized": "{} was resized.".format(name)}
|
||||
return {"Resized": f"{name} was resized."}
|
||||
|
|
|
@ -161,9 +161,7 @@ def get_image(vm_):
|
|||
images[vm_image]["name"] = images[vm_image]["id"]
|
||||
return images[vm_image]
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
"The specified image, '{}', could not be found.".format(vm_image)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified image, '{vm_image}', could not be found.")
|
||||
|
||||
|
||||
def get_size(vm_):
|
||||
|
@ -178,9 +176,7 @@ def get_size(vm_):
|
|||
if vm_size and str(vm_size) in sizes:
|
||||
return sizes[vm_size]
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
"The specified size, '{}', could not be found.".format(vm_size)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified size, '{vm_size}', could not be found.")
|
||||
|
||||
|
||||
def query_instance(vm_=None, call=None):
|
||||
|
@ -375,11 +371,11 @@ def create_node(**kwargs):
|
|||
|
||||
if metadata is not None:
|
||||
for key, value in metadata.items():
|
||||
create_data["metadata.{}".format(key)] = value
|
||||
create_data[f"metadata.{key}"] = value
|
||||
|
||||
if tag is not None:
|
||||
for key, value in tag.items():
|
||||
create_data["tag.{}".format(key)] = value
|
||||
create_data[f"tag.{key}"] = value
|
||||
|
||||
if firewall_enabled is not None:
|
||||
create_data["firewall_enabled"] = firewall_enabled
|
||||
|
@ -419,7 +415,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -435,7 +431,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -920,11 +916,11 @@ def avail_images(call=None):
|
|||
get_configured_provider(),
|
||||
__opts__,
|
||||
search_global=False,
|
||||
default="{}{}/{}/images".format(DEFAULT_LOCATION, JOYENT_API_HOST_SUFFIX, user),
|
||||
default=f"{DEFAULT_LOCATION}{JOYENT_API_HOST_SUFFIX}/{user}/images",
|
||||
)
|
||||
|
||||
if not img_url.startswith("http://") and not img_url.startswith("https://"):
|
||||
img_url = "{}://{}".format(_get_proto(), img_url)
|
||||
img_url = f"{_get_proto()}://{img_url}"
|
||||
|
||||
rcode, data = query(command="my/images", method="GET")
|
||||
log.debug(data)
|
||||
|
@ -1077,7 +1073,7 @@ def get_location_path(
|
|||
:param location: joyent data center location
|
||||
:return: url
|
||||
"""
|
||||
return "{}://{}{}".format(_get_proto(), location, api_host_suffix)
|
||||
return f"{_get_proto()}://{location}{api_host_suffix}"
|
||||
|
||||
|
||||
def query(action=None, command=None, args=None, method="GET", location=None, data=None):
|
||||
|
@ -1151,7 +1147,7 @@ def query(action=None, command=None, args=None, method="GET", location=None, dat
|
|||
path += action
|
||||
|
||||
if command:
|
||||
path += "/{}".format(command)
|
||||
path += f"/{command}"
|
||||
|
||||
log.debug("User: '%s' on PATH: %s", user, path)
|
||||
|
||||
|
@ -1174,9 +1170,9 @@ def query(action=None, command=None, args=None, method="GET", location=None, dat
|
|||
signed = base64.b64encode(signed)
|
||||
user_arr = user.split("/")
|
||||
if len(user_arr) == 1:
|
||||
keyid = "/{}/keys/{}".format(user_arr[0], ssh_keyname)
|
||||
keyid = f"/{user_arr[0]}/keys/{ssh_keyname}"
|
||||
elif len(user_arr) == 2:
|
||||
keyid = "/{}/users/{}/keys/{}".format(user_arr[0], user_arr[1], ssh_keyname)
|
||||
keyid = f"/{user_arr[0]}/users/{user_arr[1]}/keys/{ssh_keyname}"
|
||||
else:
|
||||
log.error("Malformed user string")
|
||||
|
||||
|
|
|
@ -332,7 +332,7 @@ def create(vm_):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"starting create",
|
||||
"salt/cloud/{}/creating".format(name),
|
||||
f"salt/cloud/{name}/creating",
|
||||
args=__utils__["cloud.filter_event"](
|
||||
"creating", vm_, ["name", "profile", "provider", "driver"]
|
||||
),
|
||||
|
@ -345,7 +345,7 @@ def create(vm_):
|
|||
)
|
||||
if key_filename is not None and not os.path.isfile(key_filename):
|
||||
raise SaltCloudConfigError(
|
||||
"The defined key_filename '{}' does not exist".format(key_filename)
|
||||
f"The defined key_filename '{key_filename}' does not exist"
|
||||
)
|
||||
vm_["key_filename"] = key_filename
|
||||
# wait_for_instance requires private_key
|
||||
|
@ -374,7 +374,7 @@ def create(vm_):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"requesting instance",
|
||||
"salt/cloud/{}/requesting".format(name),
|
||||
f"salt/cloud/{name}/requesting",
|
||||
args={
|
||||
"kwargs": __utils__["cloud.filter_event"](
|
||||
"requesting", kwargs, list(kwargs)
|
||||
|
@ -392,7 +392,7 @@ def create(vm_):
|
|||
description_elem = ElementTree.Element("description")
|
||||
domain_xml.insert(0, description_elem)
|
||||
description = domain_xml.find("./description")
|
||||
description.text = "Cloned from {}".format(base)
|
||||
description.text = f"Cloned from {base}"
|
||||
domain_xml.remove(domain_xml.find("./uuid"))
|
||||
|
||||
for iface_xml in domain_xml.findall("./devices/interface"):
|
||||
|
@ -426,9 +426,7 @@ def create(vm_):
|
|||
# see if there is a path element that needs rewriting
|
||||
if source_element and "path" in source_element.attrib:
|
||||
path = source_element.attrib["path"]
|
||||
new_path = path.replace(
|
||||
"/domain-{}/".format(base), "/domain-{}/".format(name)
|
||||
)
|
||||
new_path = path.replace(f"/domain-{base}/", f"/domain-{name}/")
|
||||
log.debug("Rewriting agent socket path to %s", new_path)
|
||||
source_element.attrib["path"] = new_path
|
||||
|
||||
|
@ -471,7 +469,7 @@ def create(vm_):
|
|||
disk.find("./source").attrib["file"] = new_volume.path()
|
||||
else:
|
||||
raise SaltCloudExecutionFailure(
|
||||
"Disk type '{}' not supported".format(disk_type)
|
||||
f"Disk type '{disk_type}' not supported"
|
||||
)
|
||||
|
||||
clone_xml = salt.utils.stringutils.to_str(ElementTree.tostring(domain_xml))
|
||||
|
@ -515,7 +513,7 @@ def create(vm_):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"created instance",
|
||||
"salt/cloud/{}/created".format(name),
|
||||
f"salt/cloud/{name}/created",
|
||||
args=__utils__["cloud.filter_event"](
|
||||
"created", vm_, ["name", "profile", "provider", "driver"]
|
||||
),
|
||||
|
@ -617,15 +615,15 @@ def destroy(name, call=None):
|
|||
pass
|
||||
|
||||
if not found:
|
||||
return "{} doesn't exist and can't be deleted".format(name)
|
||||
return f"{name} doesn't exist and can't be deleted"
|
||||
|
||||
if len(found) > 1:
|
||||
return "{} doesn't identify a unique machine leaving things".format(name)
|
||||
return f"{name} doesn't identify a unique machine leaving things"
|
||||
|
||||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -636,7 +634,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -718,15 +716,15 @@ def find_pool_and_volume(conn, path):
|
|||
for v in sp.listAllVolumes():
|
||||
if v.path() == path:
|
||||
return sp, v
|
||||
raise SaltCloudNotFound("Could not find volume for path {}".format(path))
|
||||
raise SaltCloudNotFound(f"Could not find volume for path {path}")
|
||||
|
||||
|
||||
def generate_new_name(orig_name):
|
||||
if "." not in orig_name:
|
||||
return "{}-{}".format(orig_name, uuid.uuid1())
|
||||
return f"{orig_name}-{uuid.uuid1()}"
|
||||
|
||||
name, ext = orig_name.rsplit(".", 1)
|
||||
return "{}-{}.{}".format(name, uuid.uuid1(), ext)
|
||||
return f"{name}-{uuid.uuid1()}.{ext}"
|
||||
|
||||
|
||||
def get_domain_volumes(conn, domain):
|
||||
|
|
|
@ -339,7 +339,7 @@ def _get_ssh_keys(vm_):
|
|||
key_files = _get_ssh_key_files(vm_)
|
||||
for file in map(lambda file: Path(file).resolve(), key_files):
|
||||
if not (file.exists() or file.is_file()):
|
||||
raise SaltCloudSystemExit("Invalid SSH key file: {}".format(str(file)))
|
||||
raise SaltCloudSystemExit(f"Invalid SSH key file: {str(file)}")
|
||||
ssh_keys.add(file.read_text())
|
||||
|
||||
return list(ssh_keys)
|
||||
|
@ -513,11 +513,11 @@ class LinodeAPIv4(LinodeAPI):
|
|||
|
||||
if headers is None:
|
||||
headers = {}
|
||||
headers["Authorization"] = "Bearer {}".format(api_key)
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
headers["Content-Type"] = "application/json"
|
||||
headers["User-Agent"] = "salt-cloud-linode"
|
||||
|
||||
url = "https://api.linode.com/{}{}".format(api_version, path)
|
||||
url = f"https://api.linode.com/{api_version}{path}"
|
||||
|
||||
decode = method != "DELETE"
|
||||
result = None
|
||||
|
@ -578,7 +578,7 @@ class LinodeAPIv4(LinodeAPI):
|
|||
# If the response is not valid JSON or the error was not included, propagate the
|
||||
# human readable status representation.
|
||||
raise SaltCloudSystemExit(
|
||||
"Linode API error occurred: {}".format(err_response.reason)
|
||||
f"Linode API error occurred: {err_response.reason}"
|
||||
)
|
||||
if decode:
|
||||
return self._get_response_json(result)
|
||||
|
@ -623,7 +623,7 @@ class LinodeAPIv4(LinodeAPI):
|
|||
)
|
||||
|
||||
response = self._query(
|
||||
"/linode/instances/{}/boot".format(linode_id),
|
||||
f"/linode/instances/{linode_id}/boot",
|
||||
method="POST",
|
||||
data={"config_id": config_id},
|
||||
)
|
||||
|
@ -656,7 +656,7 @@ class LinodeAPIv4(LinodeAPI):
|
|||
)
|
||||
|
||||
return self._query(
|
||||
"/linode/instances/{}/clone".format(linode_id),
|
||||
f"/linode/instances/{linode_id}/clone",
|
||||
method="POST",
|
||||
data={"region": location, "type": size},
|
||||
)
|
||||
|
@ -688,7 +688,7 @@ class LinodeAPIv4(LinodeAPI):
|
|||
}
|
||||
|
||||
return self._query(
|
||||
"/linode/instances/{}/configs".format(linode_id),
|
||||
f"/linode/instances/{linode_id}/configs",
|
||||
method="POST",
|
||||
data={"label": name, "devices": devices},
|
||||
)
|
||||
|
@ -702,7 +702,7 @@ class LinodeAPIv4(LinodeAPI):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"starting create",
|
||||
"salt/cloud/{}/creating".format(name),
|
||||
f"salt/cloud/{name}/creating",
|
||||
args=__utils__["cloud.filter_event"](
|
||||
"creating", vm_, ["name", "profile", "provider", "driver"]
|
||||
),
|
||||
|
@ -795,7 +795,7 @@ class LinodeAPIv4(LinodeAPI):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"waiting for ssh",
|
||||
"salt/cloud/{}/waiting_for_ssh".format(name),
|
||||
f"salt/cloud/{name}/waiting_for_ssh",
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
args={"ip_address": vm_["ssh_host"]},
|
||||
transport=__opts__["transport"],
|
||||
|
@ -810,7 +810,7 @@ class LinodeAPIv4(LinodeAPI):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"created instance",
|
||||
"salt/cloud/{}/created".format(name),
|
||||
f"salt/cloud/{name}/created",
|
||||
args=__utils__["cloud.filter_event"](
|
||||
"created", vm_, ["name", "profile", "provider", "driver"]
|
||||
),
|
||||
|
@ -824,7 +824,7 @@ class LinodeAPIv4(LinodeAPI):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -838,7 +838,7 @@ class LinodeAPIv4(LinodeAPI):
|
|||
instance = self._get_linode_by_name(name)
|
||||
linode_id = instance.get("id", None)
|
||||
|
||||
self._query("/linode/instances/{}".format(linode_id), method="DELETE")
|
||||
self._query(f"/linode/instances/{linode_id}", method="DELETE")
|
||||
|
||||
def get_config_id(self, kwargs=None):
|
||||
name = kwargs.get("name", None)
|
||||
|
@ -853,7 +853,7 @@ class LinodeAPIv4(LinodeAPI):
|
|||
if linode_id is None:
|
||||
linode_id = self.get_linode(kwargs=kwargs).get("id", None)
|
||||
|
||||
response = self._query("/linode/instances/{}/configs".format(linode_id))
|
||||
response = self._query(f"/linode/instances/{linode_id}/configs")
|
||||
configs = response.get("data", [])
|
||||
|
||||
return {"config_id": configs[0]["id"]}
|
||||
|
@ -879,7 +879,7 @@ class LinodeAPIv4(LinodeAPI):
|
|||
instance = self._get_linode_by_name(name)
|
||||
linode_id = instance.get("id", None)
|
||||
|
||||
self._query("/linode/instances/{}/reboot".format(linode_id), method="POST")
|
||||
self._query(f"/linode/instances/{linode_id}/reboot", method="POST")
|
||||
return self._wait_for_linode_status(linode_id, "running")
|
||||
|
||||
def show_instance(self, name):
|
||||
|
@ -939,7 +939,7 @@ class LinodeAPIv4(LinodeAPI):
|
|||
"msg": "Machine already running",
|
||||
}
|
||||
|
||||
self._query("/linode/instances/{}/boot".format(linode_id), method="POST")
|
||||
self._query(f"/linode/instances/{linode_id}/boot", method="POST")
|
||||
|
||||
self._wait_for_linode_status(linode_id, "running")
|
||||
return {
|
||||
|
@ -960,13 +960,13 @@ class LinodeAPIv4(LinodeAPI):
|
|||
"msg": "Machine already stopped",
|
||||
}
|
||||
|
||||
self._query("/linode/instances/{}/shutdown".format(linode_id), method="POST")
|
||||
self._query(f"/linode/instances/{linode_id}/shutdown", method="POST")
|
||||
|
||||
self._wait_for_linode_status(linode_id, "offline")
|
||||
return {"success": True, "state": "Stopped", "action": "stop"}
|
||||
|
||||
def _get_linode_by_id(self, linode_id):
|
||||
return self._query("/linode/instances/{}".format(linode_id))
|
||||
return self._query(f"/linode/instances/{linode_id}")
|
||||
|
||||
def _get_linode_by_name(self, name):
|
||||
result = self._query("/linode/instances")
|
||||
|
@ -976,9 +976,7 @@ class LinodeAPIv4(LinodeAPI):
|
|||
if instance["label"] == name:
|
||||
return instance
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
"The specified name, {}, could not be found.".format(name)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified name, {name}, could not be found.")
|
||||
|
||||
def _list_linodes(self, full=False):
|
||||
result = self._query("/linode/instances")
|
||||
|
@ -1005,7 +1003,7 @@ class LinodeAPIv4(LinodeAPI):
|
|||
return ret
|
||||
|
||||
def _get_linode_type(self, linode_type):
|
||||
return self._query("/linode/types/{}".format(linode_type))
|
||||
return self._query(f"/linode/types/{linode_type}")
|
||||
|
||||
def _get_ips(self, linode_id):
|
||||
instance = self._get_linode_by_id(linode_id)
|
||||
|
@ -1049,15 +1047,13 @@ class LinodeAPIv4(LinodeAPI):
|
|||
time.sleep(poll_interval / 1000)
|
||||
log.info("retrying: polling for %s...", description)
|
||||
else:
|
||||
raise SaltCloudException(
|
||||
"timed out: polling for {}".format(description)
|
||||
)
|
||||
raise SaltCloudException(f"timed out: polling for {description}")
|
||||
|
||||
def _wait_for_entity_status(
|
||||
self, getter, status, entity_name="item", identifier="some", timeout=None
|
||||
):
|
||||
return self._poll(
|
||||
"{} (id={}) status to be '{}'".format(entity_name, identifier, status),
|
||||
f"{entity_name} (id={identifier}) status to be '{status}'",
|
||||
getter,
|
||||
lambda item: item.get("status") == status,
|
||||
timeout=timeout,
|
||||
|
@ -1126,8 +1122,8 @@ class LinodeAPIv4(LinodeAPI):
|
|||
return True
|
||||
|
||||
return self._poll(
|
||||
"event {} to be '{}'".format(event_id, status),
|
||||
lambda: self._query("/account/events/{}".format(event_id)),
|
||||
f"event {event_id} to be '{status}'",
|
||||
lambda: self._query(f"/account/events/{event_id}"),
|
||||
condition,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
@ -1170,7 +1166,7 @@ class LinodeAPIv3(LinodeAPI):
|
|||
if "api_key" not in args.keys():
|
||||
args["api_key"] = apikey
|
||||
if action and "api_action" not in args.keys():
|
||||
args["api_action"] = "{}.{}".format(action, command)
|
||||
args["api_action"] = f"{action}.{command}"
|
||||
if header_dict is None:
|
||||
header_dict = {}
|
||||
if method != "POST":
|
||||
|
@ -1266,7 +1262,7 @@ class LinodeAPIv3(LinodeAPI):
|
|||
if status == "1":
|
||||
raise SaltCloudSystemExit(
|
||||
"Cannot boot Linode {0}. "
|
||||
+ "Linode {} is already running.".format(linode_item)
|
||||
+ f"Linode {linode_item} is already running."
|
||||
)
|
||||
|
||||
# Boot the VM and get the JobID from Linode
|
||||
|
@ -1311,7 +1307,7 @@ class LinodeAPIv3(LinodeAPI):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"starting create",
|
||||
"salt/cloud/{}/creating".format(name),
|
||||
f"salt/cloud/{name}/creating",
|
||||
args=__utils__["cloud.filter_event"](
|
||||
"creating", vm_, ["name", "profile", "provider", "driver"]
|
||||
),
|
||||
|
@ -1348,7 +1344,7 @@ class LinodeAPIv3(LinodeAPI):
|
|||
|
||||
kwargs = {
|
||||
"clonefrom": clonefrom_name,
|
||||
"image": "Clone of {}".format(clonefrom_name),
|
||||
"image": f"Clone of {clonefrom_name}",
|
||||
}
|
||||
|
||||
if size is None:
|
||||
|
@ -1412,7 +1408,7 @@ class LinodeAPIv3(LinodeAPI):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"requesting instance",
|
||||
"salt/cloud/{}/requesting".format(name),
|
||||
f"salt/cloud/{name}/requesting",
|
||||
args=__utils__["cloud.filter_event"](
|
||||
"requesting", vm_, ["name", "profile", "provider", "driver"]
|
||||
),
|
||||
|
@ -1505,7 +1501,7 @@ class LinodeAPIv3(LinodeAPI):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"waiting for ssh",
|
||||
"salt/cloud/{}/waiting_for_ssh".format(name),
|
||||
f"salt/cloud/{name}/waiting_for_ssh",
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
args={"ip_address": vm_["ssh_host"]},
|
||||
transport=__opts__["transport"],
|
||||
|
@ -1522,7 +1518,7 @@ class LinodeAPIv3(LinodeAPI):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"created instance",
|
||||
"salt/cloud/{}/created".format(name),
|
||||
f"salt/cloud/{name}/created",
|
||||
args=__utils__["cloud.filter_event"](
|
||||
"created", vm_, ["name", "profile", "provider", "driver"]
|
||||
),
|
||||
|
@ -1560,9 +1556,9 @@ class LinodeAPIv3(LinodeAPI):
|
|||
instance = self._get_linode_by_name(name)
|
||||
linode_id = instance.get("id", None)
|
||||
|
||||
disklist = "{},{}".format(root_disk_id, swap_disk_id)
|
||||
disklist = f"{root_disk_id},{swap_disk_id}"
|
||||
if data_disk_id is not None:
|
||||
disklist = "{},{},{}".format(root_disk_id, swap_disk_id, data_disk_id)
|
||||
disklist = f"{root_disk_id},{swap_disk_id},{data_disk_id}"
|
||||
|
||||
config_args = {
|
||||
"LinodeID": int(linode_id),
|
||||
|
@ -1663,7 +1659,7 @@ class LinodeAPIv3(LinodeAPI):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -1678,7 +1674,7 @@ class LinodeAPIv3(LinodeAPI):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -1732,7 +1728,7 @@ class LinodeAPIv3(LinodeAPI):
|
|||
plan_type = "Nanode"
|
||||
|
||||
plan_size = plan_size / 1024
|
||||
new_label = "{} {}GB".format(plan_type, plan_size)
|
||||
new_label = f"{plan_type} {plan_size}GB"
|
||||
|
||||
if new_label not in sizes:
|
||||
raise SaltCloudException(
|
||||
|
@ -2052,9 +2048,7 @@ class LinodeAPIv3(LinodeAPI):
|
|||
if name == node["LABEL"]:
|
||||
return node
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
"The specified name, {}, could not be found.".format(name)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified name, {name}, could not be found.")
|
||||
|
||||
def _get_linode_by_id(self, linode_id):
|
||||
result = self._query("linode", "list", args={"LinodeID": linode_id})
|
||||
|
|
|
@ -179,7 +179,7 @@ def _salt(fun, *args, **kw):
|
|||
ping_retries += 1
|
||||
log.error("%s unreachable, retrying", target)
|
||||
if not ping:
|
||||
raise SaltCloudSystemExit("Target {} unreachable".format(target))
|
||||
raise SaltCloudSystemExit(f"Target {target} unreachable")
|
||||
jid = conn.cmd_async(tgt=target, fun=fun, arg=args, kwarg=kw, **rkwargs)
|
||||
cret = conn.cmd(
|
||||
tgt=target, fun="saltutil.find_job", arg=[jid], timeout=10, **kwargs
|
||||
|
@ -224,9 +224,7 @@ def _salt(fun, *args, **kw):
|
|||
time.sleep(0.5)
|
||||
try:
|
||||
if "is not available." in ret:
|
||||
raise SaltCloudSystemExit(
|
||||
"module/function {} is not available".format(fun)
|
||||
)
|
||||
raise SaltCloudSystemExit(f"module/function {fun} is not available")
|
||||
except SaltCloudSystemExit: # pylint: disable=try-except-raise
|
||||
raise
|
||||
except TypeError:
|
||||
|
@ -367,12 +365,12 @@ def destroy(vm_, call=None):
|
|||
)
|
||||
if not get_configured_provider():
|
||||
return
|
||||
ret = {"comment": "{} was not found".format(vm_), "result": False}
|
||||
ret = {"comment": f"{vm_} was not found", "result": False}
|
||||
if _salt("lxc.info", vm_, path=path):
|
||||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(vm_),
|
||||
f"salt/cloud/{vm_}/destroying",
|
||||
args={"name": vm_, "instance_id": vm_},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -380,11 +378,11 @@ def destroy(vm_, call=None):
|
|||
cret = _salt("lxc.destroy", vm_, stop=True, path=path)
|
||||
ret["result"] = cret["result"]
|
||||
if ret["result"]:
|
||||
ret["comment"] = "{} was destroyed".format(vm_)
|
||||
ret["comment"] = f"{vm_} was destroyed"
|
||||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(vm_),
|
||||
f"salt/cloud/{vm_}/destroyed",
|
||||
args={"name": vm_, "instance_id": vm_},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -506,14 +504,14 @@ def get_configured_provider(vm_=None):
|
|||
matched = False
|
||||
# --list-images level
|
||||
if img_provider:
|
||||
tgt = "provider: {}".format(img_provider)
|
||||
tgt = f"provider: {img_provider}"
|
||||
if dalias == img_provider:
|
||||
data = get_provider(img_provider)
|
||||
matched = True
|
||||
# providers are set in configuration
|
||||
if not data and "profile" not in __opts__ and arg_providers:
|
||||
for name in arg_providers:
|
||||
tgt = "provider: {}".format(name)
|
||||
tgt = f"provider: {name}"
|
||||
if dalias == name:
|
||||
data = get_provider(name)
|
||||
if data:
|
||||
|
@ -523,13 +521,13 @@ def get_configured_provider(vm_=None):
|
|||
elif "profile" in __opts__:
|
||||
curprof = __opts__["profile"]
|
||||
profs = __opts__["profiles"]
|
||||
tgt = "profile: {}".format(curprof)
|
||||
tgt = f"profile: {curprof}"
|
||||
if (
|
||||
curprof in profs
|
||||
and profs[curprof]["provider"] == _get_active_provider_name()
|
||||
):
|
||||
prov, cdriver = profs[curprof]["provider"].split(":")
|
||||
tgt += " provider: {}".format(prov)
|
||||
tgt += f" provider: {prov}"
|
||||
data = get_provider(prov)
|
||||
matched = True
|
||||
# fallback if we have only __active_provider_name__
|
||||
|
|
|
@ -853,7 +853,7 @@ def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True):
|
|||
kwargs["service_name"],
|
||||
kwargs["deployment_name"],
|
||||
kwargs["role_name"],
|
||||
**volume
|
||||
**volume,
|
||||
)
|
||||
log.debug(attach)
|
||||
|
||||
|
@ -954,7 +954,7 @@ def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True):
|
|||
kwargs["service_name"],
|
||||
kwargs["deployment_name"],
|
||||
kwargs["role_name"],
|
||||
**volume
|
||||
**volume,
|
||||
)
|
||||
_wait_for_async(conn, result.request_id)
|
||||
|
||||
|
@ -1031,7 +1031,7 @@ def destroy(name, conn=None, call=None, kwargs=None):
|
|||
result = conn.delete_deployment(service_name, service_name)
|
||||
except AzureConflictHttpError as exc:
|
||||
log.error(exc.message)
|
||||
raise SaltCloudSystemExit("{}: {}".format(name, exc.message))
|
||||
raise SaltCloudSystemExit(f"{name}: {exc.message}")
|
||||
delete_type = "delete_deployment"
|
||||
_wait_for_async(conn, result.request_id)
|
||||
ret[name] = {
|
||||
|
|
|
@ -193,9 +193,7 @@ def get_size(vm_):
|
|||
if size:
|
||||
return size
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
"The specified size, '{}', could not be found.".format(vm_size)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified size, '{vm_size}', could not be found.")
|
||||
|
||||
|
||||
def get_image(vm_):
|
||||
|
@ -211,9 +209,7 @@ def get_image(vm_):
|
|||
if vm_image and vm_image in (images[key]["id"], images[key]["name"]):
|
||||
return images[key]
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
"The specified image, '{}', could not be found.".format(vm_image)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified image, '{vm_image}', could not be found.")
|
||||
|
||||
|
||||
def avail_locations(conn=None, call=None):
|
||||
|
@ -735,7 +731,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -749,7 +745,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -846,7 +842,7 @@ def get_key_filename(vm_):
|
|||
key_filename = os.path.expanduser(key_filename)
|
||||
if not os.path.isfile(key_filename):
|
||||
raise SaltCloudConfigError(
|
||||
"The defined ssh_private_key '{}' does not exist".format(key_filename)
|
||||
f"The defined ssh_private_key '{key_filename}' does not exist"
|
||||
)
|
||||
|
||||
return key_filename
|
||||
|
@ -897,11 +893,9 @@ def _wait_for_completion(conn, wait_timeout, server_id):
|
|||
if server_state == "powered_on":
|
||||
return
|
||||
elif server_state == "failed":
|
||||
raise Exception("Server creation failed for {}".format(server_id))
|
||||
raise Exception(f"Server creation failed for {server_id}")
|
||||
elif server_state in ("active", "enabled", "deploying", "configuring"):
|
||||
continue
|
||||
else:
|
||||
raise Exception("Unknown server state {}".format(server_state))
|
||||
raise Exception(
|
||||
"Timed out waiting for server create completion for {}".format(server_id)
|
||||
)
|
||||
raise Exception(f"Unknown server state {server_state}")
|
||||
raise Exception(f"Timed out waiting for server create completion for {server_id}")
|
||||
|
|
|
@ -558,7 +558,7 @@ def get_cluster_id(kwargs=None, call=None):
|
|||
try:
|
||||
ret = list_clusters()[name]["id"]
|
||||
except KeyError:
|
||||
raise SaltCloudSystemExit("The cluster '{}' could not be found".format(name))
|
||||
raise SaltCloudSystemExit(f"The cluster '{name}' could not be found")
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -590,7 +590,7 @@ def get_datastore_id(kwargs=None, call=None):
|
|||
try:
|
||||
ret = list_datastores()[name]["id"]
|
||||
except KeyError:
|
||||
raise SaltCloudSystemExit("The datastore '{}' could not be found.".format(name))
|
||||
raise SaltCloudSystemExit(f"The datastore '{name}' could not be found.")
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -622,7 +622,7 @@ def get_host_id(kwargs=None, call=None):
|
|||
try:
|
||||
ret = avail_locations()[name]["id"]
|
||||
except KeyError:
|
||||
raise SaltCloudSystemExit("The host '{}' could not be found".format(name))
|
||||
raise SaltCloudSystemExit(f"The host '{name}' could not be found")
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -641,9 +641,7 @@ def get_image(vm_):
|
|||
for image in images:
|
||||
if vm_image in (images[image]["name"], images[image]["id"]):
|
||||
return images[image]["id"]
|
||||
raise SaltCloudNotFound(
|
||||
"The specified image, '{}', could not be found.".format(vm_image)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified image, '{vm_image}', could not be found.")
|
||||
|
||||
|
||||
def get_image_id(kwargs=None, call=None):
|
||||
|
@ -673,7 +671,7 @@ def get_image_id(kwargs=None, call=None):
|
|||
try:
|
||||
ret = avail_images()[name]["id"]
|
||||
except KeyError:
|
||||
raise SaltCloudSystemExit("The image '{}' could not be found".format(name))
|
||||
raise SaltCloudSystemExit(f"The image '{name}' could not be found")
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -697,7 +695,7 @@ def get_location(vm_):
|
|||
if vm_location in (locations[location]["name"], locations[location]["id"]):
|
||||
return locations[location]["id"]
|
||||
raise SaltCloudNotFound(
|
||||
"The specified location, '{}', could not be found.".format(vm_location)
|
||||
f"The specified location, '{vm_location}', could not be found."
|
||||
)
|
||||
|
||||
|
||||
|
@ -728,9 +726,7 @@ def get_secgroup_id(kwargs=None, call=None):
|
|||
try:
|
||||
ret = list_security_groups()[name]["id"]
|
||||
except KeyError:
|
||||
raise SaltCloudSystemExit(
|
||||
"The security group '{}' could not be found.".format(name)
|
||||
)
|
||||
raise SaltCloudSystemExit(f"The security group '{name}' could not be found.")
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -761,7 +757,7 @@ def get_template_image(kwargs=None, call=None):
|
|||
ret = list_templates()[name]["template"]["disk"]["image"]
|
||||
except KeyError:
|
||||
raise SaltCloudSystemExit(
|
||||
"The image for template '{}' could not be found.".format(name)
|
||||
f"The image for template '{name}' could not be found."
|
||||
)
|
||||
|
||||
return ret
|
||||
|
@ -794,7 +790,7 @@ def get_template_id(kwargs=None, call=None):
|
|||
try:
|
||||
ret = list_templates()[name]["id"]
|
||||
except KeyError:
|
||||
raise SaltCloudSystemExit("The template '{}' could not be found.".format(name))
|
||||
raise SaltCloudSystemExit(f"The template '{name}' could not be found.")
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -816,7 +812,7 @@ def get_template(vm_):
|
|||
return list_templates()[vm_template]["id"]
|
||||
except KeyError:
|
||||
raise SaltCloudNotFound(
|
||||
"The specified template, '{}', could not be found.".format(vm_template)
|
||||
f"The specified template, '{vm_template}', could not be found."
|
||||
)
|
||||
|
||||
|
||||
|
@ -847,7 +843,7 @@ def get_vm_id(kwargs=None, call=None):
|
|||
try:
|
||||
ret = list_nodes()[name]["id"]
|
||||
except KeyError:
|
||||
raise SaltCloudSystemExit("The VM '{}' could not be found.".format(name))
|
||||
raise SaltCloudSystemExit(f"The VM '{name}' could not be found.")
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -879,7 +875,7 @@ def get_vn_id(kwargs=None, call=None):
|
|||
try:
|
||||
ret = list_vns()[name]["id"]
|
||||
except KeyError:
|
||||
raise SaltCloudSystemExit("The VN '{}' could not be found.".format(name))
|
||||
raise SaltCloudSystemExit(f"The VN '{name}' could not be found.")
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -895,9 +891,7 @@ def _get_device_template(disk, disk_info, template=None):
|
|||
def _require_disk_opts(*args):
|
||||
for arg in args:
|
||||
if arg not in disk_info:
|
||||
raise SaltCloudSystemExit(
|
||||
"The disk {} requires a {} argument".format(disk, arg)
|
||||
)
|
||||
raise SaltCloudSystemExit(f"The disk {disk} requires a {arg} argument")
|
||||
|
||||
_require_disk_opts("disk_type", "size")
|
||||
|
||||
|
@ -919,12 +913,12 @@ def _get_device_template(disk, disk_info, template=None):
|
|||
if disk_type == "volatile":
|
||||
_require_disk_opts("type")
|
||||
v_type = disk_info["type"]
|
||||
temp = "DISK=[TYPE={}, SIZE={}]".format(v_type, size)
|
||||
temp = f"DISK=[TYPE={v_type}, SIZE={size}]"
|
||||
|
||||
if v_type == "fs":
|
||||
_require_disk_opts("format")
|
||||
format = disk_info["format"]
|
||||
temp = "DISK=[TYPE={}, SIZE={}, FORMAT={}]".format(v_type, size, format)
|
||||
temp = f"DISK=[TYPE={v_type}, SIZE={size}, FORMAT={format}]"
|
||||
return temp
|
||||
# TODO add persistant disk_type
|
||||
|
||||
|
@ -1101,7 +1095,7 @@ def create(vm_):
|
|||
)
|
||||
if key_filename is not None and not os.path.isfile(key_filename):
|
||||
raise SaltCloudConfigError(
|
||||
"The defined key_filename '{}' does not exist".format(key_filename)
|
||||
f"The defined key_filename '{key_filename}' does not exist"
|
||||
)
|
||||
|
||||
if fqdn:
|
||||
|
@ -1178,7 +1172,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
)
|
||||
|
@ -1192,7 +1186,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
)
|
||||
|
@ -4474,7 +4468,7 @@ def _get_xml(xml_str):
|
|||
except etree.XMLSyntaxError as err:
|
||||
# opennebula returned invalid XML, which could be an error message, so
|
||||
# log it
|
||||
raise SaltCloudSystemExit("opennebula returned: {}".format(xml_str))
|
||||
raise SaltCloudSystemExit(f"opennebula returned: {xml_str}")
|
||||
return xml_data
|
||||
|
||||
|
||||
|
|
|
@ -737,7 +737,7 @@ def create(vm_):
|
|||
)
|
||||
if key_filename is not None and not os.path.isfile(key_filename):
|
||||
raise SaltCloudConfigError(
|
||||
"The defined ssh_key_file '{}' does not exist".format(key_filename)
|
||||
f"The defined ssh_key_file '{key_filename}' does not exist"
|
||||
)
|
||||
|
||||
vm_["key_filename"] = key_filename
|
||||
|
@ -846,7 +846,7 @@ def destroy(name, conn=None, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -863,7 +863,7 @@ def destroy(name, conn=None, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
|
|
@ -242,9 +242,7 @@ def _wait_for_status(status_type, object_id, status=None, timeout=500, quiet=Tru
|
|||
manager = packet.Manager(auth_token=vm_["token"])
|
||||
|
||||
for i in range(0, iterations):
|
||||
get_object = getattr(
|
||||
manager, "get_{status_type}".format(status_type=status_type)
|
||||
)
|
||||
get_object = getattr(manager, f"get_{status_type}")
|
||||
obj = get_object(object_id)
|
||||
|
||||
if obj.state == status:
|
||||
|
@ -340,7 +338,7 @@ def create(vm_):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"starting create",
|
||||
"salt/cloud/{}/creating".format(name),
|
||||
f"salt/cloud/{name}/creating",
|
||||
args=__utils__["cloud.filter_event"](
|
||||
"creating", vm_, ["name", "profile", "provider", "driver"]
|
||||
),
|
||||
|
@ -413,7 +411,7 @@ def create(vm_):
|
|||
|
||||
volume = manager.create_volume(
|
||||
vm_["project_id"],
|
||||
"{}_storage".format(name),
|
||||
f"{name}_storage",
|
||||
vm_.get("storage_tier"),
|
||||
vm_.get("storage_size"),
|
||||
vm_.get("location"),
|
||||
|
@ -441,7 +439,7 @@ def create(vm_):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"created instance",
|
||||
"salt/cloud/{}/created".format(name),
|
||||
f"salt/cloud/{name}/created",
|
||||
args=__utils__["cloud.filter_event"](
|
||||
"created", vm_, ["name", "profile", "provider", "driver"]
|
||||
),
|
||||
|
@ -580,7 +578,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -606,7 +604,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
|
|
@ -310,11 +310,11 @@ def create(vm_):
|
|||
|
||||
name = vm_["name"]
|
||||
if not wait_until(name, "CREATED"):
|
||||
return {"Error": "Unable to start {}, command timed out".format(name)}
|
||||
return {"Error": f"Unable to start {name}, command timed out"}
|
||||
start(vm_["name"], call="action")
|
||||
|
||||
if not wait_until(name, "STARTED"):
|
||||
return {"Error": "Unable to start {}, command timed out".format(name)}
|
||||
return {"Error": f"Unable to start {name}, command timed out"}
|
||||
|
||||
def __query_node_data(vm_name):
|
||||
data = show_instance(vm_name, call="action")
|
||||
|
@ -391,7 +391,7 @@ def query(action=None, command=None, args=None, method="GET", data=None):
|
|||
path += action
|
||||
|
||||
if command:
|
||||
path += "/{}".format(command)
|
||||
path += f"/{command}"
|
||||
|
||||
if not type(args, dict):
|
||||
args = {}
|
||||
|
@ -404,7 +404,7 @@ def query(action=None, command=None, args=None, method="GET", data=None):
|
|||
|
||||
if args:
|
||||
params = urllib.parse.urlencode(args)
|
||||
req = urllib.request.Request(url="{}?{}".format(path, params), **kwargs)
|
||||
req = urllib.request.Request(url=f"{path}?{params}", **kwargs)
|
||||
else:
|
||||
req = urllib.request.Request(url=path, **kwargs)
|
||||
|
||||
|
@ -526,7 +526,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -536,7 +536,7 @@ def destroy(name, call=None):
|
|||
if node["state"] == "STARTED":
|
||||
stop(name, call="action")
|
||||
if not wait_until(name, "STOPPED"):
|
||||
return {"Error": "Unable to destroy {}, command timed out".format(name)}
|
||||
return {"Error": f"Unable to destroy {name}, command timed out"}
|
||||
|
||||
data = query(action="ve", command=name, method="DELETE")
|
||||
|
||||
|
@ -546,7 +546,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -557,7 +557,7 @@ def destroy(name, call=None):
|
|||
name, _get_active_provider_name().split(":")[0], __opts__
|
||||
)
|
||||
|
||||
return {"Destroyed": "{} was destroyed.".format(name)}
|
||||
return {"Destroyed": f"{name} was destroyed."}
|
||||
|
||||
|
||||
def start(name, call=None):
|
||||
|
@ -575,12 +575,12 @@ def start(name, call=None):
|
|||
"The show_instance action must be called with -a or --action."
|
||||
)
|
||||
|
||||
data = query(action="ve", command="{}/start".format(name), method="PUT")
|
||||
data = query(action="ve", command=f"{name}/start", method="PUT")
|
||||
|
||||
if "error" in data:
|
||||
return data["error"]
|
||||
|
||||
return {"Started": "{} was started.".format(name)}
|
||||
return {"Started": f"{name} was started."}
|
||||
|
||||
|
||||
def stop(name, call=None):
|
||||
|
@ -598,9 +598,9 @@ def stop(name, call=None):
|
|||
"The show_instance action must be called with -a or --action."
|
||||
)
|
||||
|
||||
data = query(action="ve", command="{}/stop".format(name), method="PUT")
|
||||
data = query(action="ve", command=f"{name}/stop", method="PUT")
|
||||
|
||||
if "error" in data:
|
||||
return data["error"]
|
||||
|
||||
return {"Stopped": "{} was stopped.".format(name)}
|
||||
return {"Stopped": f"{name} was stopped."}
|
||||
|
|
|
@ -328,9 +328,7 @@ def get_size(vm_):
|
|||
combinations = (str(sizes[size]["id"]), str(size))
|
||||
if vm_size and str(vm_size) in combinations:
|
||||
return sizes[size]
|
||||
raise SaltCloudNotFound(
|
||||
"The specified size, '{}', could not be found.".format(vm_size)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified size, '{vm_size}', could not be found.")
|
||||
|
||||
|
||||
def get_datacenter_id():
|
||||
|
@ -415,7 +413,7 @@ def get_datacenter(conn):
|
|||
return item
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
"The specified datacenter '{}' could not be found.".format(datacenter_id)
|
||||
f"The specified datacenter '{datacenter_id}' could not be found."
|
||||
)
|
||||
|
||||
|
||||
|
@ -488,9 +486,7 @@ def get_image(vm_):
|
|||
if vm_image and vm_image in (images[key]["id"], images[key]["name"]):
|
||||
return images[key]
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
"The specified image, '{}', could not be found.".format(vm_image)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified image, '{vm_image}', could not be found.")
|
||||
|
||||
|
||||
def list_datacenters(conn=None, call=None):
|
||||
|
@ -725,7 +721,7 @@ def get_public_keys(vm_):
|
|||
key_filename = os.path.expanduser(key_filename)
|
||||
if not os.path.isfile(key_filename):
|
||||
raise SaltCloudConfigError(
|
||||
"The defined ssh_public_key '{}' does not exist".format(key_filename)
|
||||
f"The defined ssh_public_key '{key_filename}' does not exist"
|
||||
)
|
||||
ssh_keys = []
|
||||
with salt.utils.files.fopen(key_filename) as rfh:
|
||||
|
@ -746,7 +742,7 @@ def get_key_filename(vm_):
|
|||
key_filename = os.path.expanduser(key_filename)
|
||||
if not os.path.isfile(key_filename):
|
||||
raise SaltCloudConfigError(
|
||||
"The defined ssh_private_key '{}' does not exist".format(key_filename)
|
||||
f"The defined ssh_private_key '{key_filename}' does not exist"
|
||||
)
|
||||
|
||||
return key_filename
|
||||
|
@ -941,7 +937,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -972,7 +968,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -1142,9 +1138,7 @@ def _get_data_volumes(vm_):
|
|||
# Verify the required 'disk_size' property is present in the cloud
|
||||
# profile config
|
||||
if "disk_size" not in volumes[key].keys():
|
||||
raise SaltCloudConfigError(
|
||||
"The volume '{}' is missing 'disk_size'".format(key)
|
||||
)
|
||||
raise SaltCloudConfigError(f"The volume '{key}' is missing 'disk_size'")
|
||||
# Use 'HDD' if no 'disk_type' property is present in cloud profile
|
||||
if "disk_type" not in volumes[key].keys():
|
||||
volumes[key]["disk_type"] = "HDD"
|
||||
|
@ -1187,7 +1181,7 @@ def _get_firewall_rules(firewall_rules):
|
|||
# profile config
|
||||
if "protocol" not in firewall_rules[key].keys():
|
||||
raise SaltCloudConfigError(
|
||||
"The firewall rule '{}' is missing 'protocol'".format(key)
|
||||
f"The firewall rule '{key}' is missing 'protocol'"
|
||||
)
|
||||
ret.append(
|
||||
FirewallRule(
|
||||
|
|
|
@ -135,7 +135,7 @@ def _authenticate():
|
|||
)
|
||||
|
||||
connect_data = {"username": username, "password": passwd}
|
||||
full_url = "https://{}:{}/api2/json/access/ticket".format(url, port)
|
||||
full_url = f"https://{url}:{port}/api2/json/access/ticket"
|
||||
|
||||
response = requests.post(
|
||||
full_url, verify=verify_ssl, data=connect_data, timeout=120
|
||||
|
@ -155,7 +155,7 @@ def query(conn_type, option, post_data=None):
|
|||
log.debug("Not authenticated yet, doing that now..")
|
||||
_authenticate()
|
||||
|
||||
full_url = "https://{}:{}/api2/json/{}".format(url, port, option)
|
||||
full_url = f"https://{url}:{port}/api2/json/{option}"
|
||||
|
||||
log.debug("%s: %s (%s)", conn_type, full_url, post_data)
|
||||
|
||||
|
@ -450,9 +450,7 @@ def avail_images(call=None, location="local"):
|
|||
|
||||
ret = {}
|
||||
for host_name, host_details in avail_locations().items():
|
||||
for item in query(
|
||||
"get", "nodes/{}/storage/{}/content".format(host_name, location)
|
||||
):
|
||||
for item in query("get", f"nodes/{host_name}/storage/{location}/content"):
|
||||
ret[item["volid"]] = item
|
||||
return ret
|
||||
|
||||
|
@ -559,7 +557,7 @@ def _dictionary_to_stringlist(input_dict):
|
|||
|
||||
setting1=value1,setting2=value2
|
||||
"""
|
||||
return ",".join("{}={}".format(k, input_dict[k]) for k in sorted(input_dict.keys()))
|
||||
return ",".join(f"{k}={input_dict[k]}" for k in sorted(input_dict.keys()))
|
||||
|
||||
|
||||
def _reconfigure_clone(vm_, vmid):
|
||||
|
@ -715,7 +713,7 @@ def create(vm_):
|
|||
|
||||
# wait until the vm has been created so we can start it
|
||||
if not wait_for_created(data["upid"], timeout=300):
|
||||
return {"Error": "Unable to create {}, command timed out".format(name)}
|
||||
return {"Error": f"Unable to create {name}, command timed out"}
|
||||
|
||||
if vm_.get("clone") is True:
|
||||
_reconfigure_clone(vm_, vmid)
|
||||
|
@ -728,7 +726,7 @@ def create(vm_):
|
|||
# Wait until the VM has fully started
|
||||
log.debug('Waiting for state "running" for vm %s on %s', vmid, host)
|
||||
if not wait_for_state(vmid, "running"):
|
||||
return {"Error": "Unable to start {}, command timed out".format(name)}
|
||||
return {"Error": f"Unable to start {name}, command timed out"}
|
||||
|
||||
if agent_get_ip is True:
|
||||
try:
|
||||
|
@ -868,7 +866,7 @@ def _import_api():
|
|||
Load this json content into global variable "api"
|
||||
"""
|
||||
global api
|
||||
full_url = "https://{}:{}/pve-docs/api-viewer/apidoc.js".format(url, port)
|
||||
full_url = f"https://{url}:{port}/pve-docs/api-viewer/apidoc.js"
|
||||
returned_data = requests.get(full_url, verify=verify_ssl, timeout=120)
|
||||
|
||||
re_filter = re.compile(" (?:pveapi|apiSchema) = (.*)^;", re.DOTALL | re.MULTILINE)
|
||||
|
@ -1102,12 +1100,12 @@ def get_vmconfig(vmid, node=None, node_type="openvz"):
|
|||
if node is None:
|
||||
# We need to figure out which node this VM is on.
|
||||
for host_name, host_details in avail_locations().items():
|
||||
for item in query("get", "nodes/{}/{}".format(host_name, node_type)):
|
||||
for item in query("get", f"nodes/{host_name}/{node_type}"):
|
||||
if item["vmid"] == vmid:
|
||||
node = host_name
|
||||
|
||||
# If we reached this point, we have all the information we need
|
||||
data = query("get", "nodes/{}/{}/{}/config".format(node, node_type, vmid))
|
||||
data = query("get", f"nodes/{node}/{node_type}/{vmid}/config")
|
||||
|
||||
return data
|
||||
|
||||
|
@ -1179,7 +1177,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -1193,7 +1191,7 @@ def destroy(name, call=None):
|
|||
|
||||
# wait until stopped
|
||||
if not wait_for_state(vmobj["vmid"], "stopped"):
|
||||
return {"Error": "Unable to stop {}, command timed out".format(name)}
|
||||
return {"Error": f"Unable to stop {name}, command timed out"}
|
||||
|
||||
# required to wait a bit here, otherwise the VM is sometimes
|
||||
# still locked and destroy fails.
|
||||
|
@ -1203,7 +1201,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -1213,7 +1211,7 @@ def destroy(name, call=None):
|
|||
name, _get_active_provider_name().split(":")[0], __opts__
|
||||
)
|
||||
|
||||
return {"Destroyed": "{} was destroyed.".format(name)}
|
||||
return {"Destroyed": f"{name} was destroyed."}
|
||||
|
||||
|
||||
def set_vm_status(status, name=None, vmid=None):
|
||||
|
@ -1302,7 +1300,7 @@ def start(name, vmid=None, call=None):
|
|||
|
||||
# xxx: TBD: Check here whether the status was actually changed to 'started'
|
||||
|
||||
return {"Started": "{} was started.".format(name)}
|
||||
return {"Started": f"{name} was started."}
|
||||
|
||||
|
||||
def stop(name, vmid=None, call=None):
|
||||
|
@ -1324,7 +1322,7 @@ def stop(name, vmid=None, call=None):
|
|||
|
||||
# xxx: TBD: Check here whether the status was actually changed to 'stopped'
|
||||
|
||||
return {"Stopped": "{} was stopped.".format(name)}
|
||||
return {"Stopped": f"{name} was stopped."}
|
||||
|
||||
|
||||
def shutdown(name=None, vmid=None, call=None):
|
||||
|
@ -1348,4 +1346,4 @@ def shutdown(name=None, vmid=None, call=None):
|
|||
|
||||
# xxx: TBD: Check here whether the status was actually changed to 'stopped'
|
||||
|
||||
return {"Shutdown": "{} was shutdown.".format(name)}
|
||||
return {"Shutdown": f"{name} was shutdown."}
|
||||
|
|
|
@ -108,7 +108,7 @@ def _compute_signature(parameters, access_key_secret, method, path):
|
|||
"""
|
||||
parameters["signature_method"] = "HmacSHA256"
|
||||
|
||||
string_to_sign = "{}\n{}\n".format(method.upper(), path)
|
||||
string_to_sign = f"{method.upper()}\n{path}\n"
|
||||
|
||||
keys = sorted(parameters.keys())
|
||||
pairs = []
|
||||
|
@ -166,9 +166,9 @@ def query(params=None):
|
|||
for sk, sv in value[i - 1].items():
|
||||
if isinstance(sv, dict) or isinstance(sv, list):
|
||||
sv = salt.utils.json.dumps(sv, separators=(",", ":"))
|
||||
real_parameters["{}.{}.{}".format(key, i, sk)] = sv
|
||||
real_parameters[f"{key}.{i}.{sk}"] = sv
|
||||
else:
|
||||
real_parameters["{}.{}".format(key, i)] = value[i - 1]
|
||||
real_parameters[f"{key}.{i}"] = value[i - 1]
|
||||
else:
|
||||
real_parameters[key] = value
|
||||
|
||||
|
@ -252,7 +252,7 @@ def _get_location(vm_=None):
|
|||
return vm_location
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
"The specified location, '{}', could not be found.".format(vm_location)
|
||||
f"The specified location, '{vm_location}', could not be found."
|
||||
)
|
||||
|
||||
|
||||
|
@ -320,9 +320,7 @@ def _get_image(vm_):
|
|||
if vm_image in images:
|
||||
return vm_image
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
"The specified image, '{}', could not be found.".format(vm_image)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified image, '{vm_image}', could not be found.")
|
||||
|
||||
|
||||
def show_image(kwargs, call=None):
|
||||
|
@ -442,9 +440,7 @@ def _get_size(vm_):
|
|||
if vm_size in sizes:
|
||||
return vm_size
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
"The specified size, '{}', could not be found.".format(vm_size)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified size, '{vm_size}', could not be found.")
|
||||
|
||||
|
||||
def _show_normalized_node(full_node):
|
||||
|
@ -626,7 +622,7 @@ def show_instance(instance_id, call=None, kwargs=None):
|
|||
|
||||
if items["total_count"] == 0:
|
||||
raise SaltCloudNotFound(
|
||||
"The specified instance, '{}', could not be found.".format(instance_id)
|
||||
f"The specified instance, '{instance_id}', could not be found."
|
||||
)
|
||||
|
||||
full_node = items["instance_set"][0]
|
||||
|
@ -878,7 +874,7 @@ def destroy(instance_id, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -894,7 +890,7 @@ def destroy(instance_id, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
|
|
@ -289,7 +289,7 @@ def create(vm_):
|
|||
if ssh_host:
|
||||
log.info("trying to ping %s", ssh_host)
|
||||
count = "n" if salt.utils.platform.is_windows() else "c"
|
||||
cmd = "ping -{} 1 {}".format(count, ssh_host)
|
||||
cmd = f"ping -{count} 1 {ssh_host}"
|
||||
good_ping = local.cmd(wol_host, "cmd.retcode", [cmd]) == 0
|
||||
if good_ping:
|
||||
log.info("successful ping.")
|
||||
|
@ -464,7 +464,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=opts["sock_dir"],
|
||||
transport=opts["transport"],
|
||||
|
@ -510,13 +510,13 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=opts["sock_dir"],
|
||||
transport=opts["transport"],
|
||||
)
|
||||
|
||||
return {"Destroyed": "{} was destroyed.".format(name)}
|
||||
return {"Destroyed": f"{name} was destroyed."}
|
||||
|
||||
|
||||
def reboot(name, call=None):
|
||||
|
|
|
@ -160,7 +160,7 @@ def get_image(server_):
|
|||
if server_image in (images[image]["name"], images[image]["id"]):
|
||||
return images[image]["id"]
|
||||
raise SaltCloudNotFound(
|
||||
"The specified image, '{}', could not be found.".format(server_image)
|
||||
f"The specified image, '{server_image}', could not be found."
|
||||
)
|
||||
|
||||
|
||||
|
@ -225,7 +225,7 @@ def create(server_):
|
|||
|
||||
if key_filename is not None and not os.path.isfile(key_filename):
|
||||
raise SaltCloudConfigError(
|
||||
"The defined key_filename '{}' does not exist".format(key_filename)
|
||||
f"The defined key_filename '{key_filename}' does not exist"
|
||||
)
|
||||
|
||||
ssh_password = config.get_cloud_config_value("ssh_password", server_, __opts__)
|
||||
|
@ -346,10 +346,10 @@ def query(
|
|||
)
|
||||
)
|
||||
|
||||
path = "{}/{}/".format(base_path, method)
|
||||
path = f"{base_path}/{method}/"
|
||||
|
||||
if server_id:
|
||||
path += "{}/".format(server_id)
|
||||
path += f"{server_id}/"
|
||||
|
||||
if command:
|
||||
path += command
|
||||
|
@ -439,7 +439,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -457,7 +457,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
|
|
@ -269,7 +269,7 @@ def create(vm_):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"starting create",
|
||||
"salt/cloud/{}/creating".format(name),
|
||||
f"salt/cloud/{name}/creating",
|
||||
args=__utils__["cloud.filter_event"](
|
||||
"creating", vm_, ["name", "profile", "provider", "driver"]
|
||||
),
|
||||
|
@ -395,7 +395,7 @@ def create(vm_):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"requesting instance",
|
||||
"salt/cloud/{}/requesting".format(name),
|
||||
f"salt/cloud/{name}/requesting",
|
||||
args={
|
||||
"kwargs": __utils__["cloud.filter_event"](
|
||||
"requesting", kwargs, list(kwargs)
|
||||
|
@ -513,7 +513,7 @@ def create(vm_):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"created instance",
|
||||
"salt/cloud/{}/created".format(name),
|
||||
f"salt/cloud/{name}/created",
|
||||
args=__utils__["cloud.filter_event"](
|
||||
"created", vm_, ["name", "profile", "provider", "driver"]
|
||||
),
|
||||
|
@ -620,7 +620,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -633,7 +633,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
|
|
@ -241,7 +241,7 @@ def create(vm_):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"starting create",
|
||||
"salt/cloud/{}/creating".format(name),
|
||||
f"salt/cloud/{name}/creating",
|
||||
args=__utils__["cloud.filter_event"](
|
||||
"creating", vm_, ["name", "profile", "provider", "driver"]
|
||||
),
|
||||
|
@ -311,7 +311,7 @@ def create(vm_):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"requesting instance",
|
||||
"salt/cloud/{}/requesting".format(name),
|
||||
f"salt/cloud/{name}/requesting",
|
||||
args={
|
||||
"kwargs": __utils__["cloud.filter_event"](
|
||||
"requesting", kwargs, list(kwargs)
|
||||
|
@ -406,7 +406,7 @@ def create(vm_):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"created instance",
|
||||
"salt/cloud/{}/created".format(name),
|
||||
f"salt/cloud/{name}/created",
|
||||
args=__utils__["cloud.filter_event"](
|
||||
"created", vm_, ["name", "profile", "provider", "driver"]
|
||||
),
|
||||
|
@ -514,7 +514,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -535,7 +535,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
|
|
@ -123,7 +123,7 @@ def get_provider_client(name=None):
|
|||
elif name == "vpc_client":
|
||||
client = vpc_client.VpcClient(crd, region, cpf)
|
||||
else:
|
||||
raise SaltCloudSystemExit("Client name {} is not supported".format(name))
|
||||
raise SaltCloudSystemExit(f"Client name {name} is not supported")
|
||||
|
||||
return client
|
||||
|
||||
|
@ -206,11 +206,11 @@ def avail_sizes(call=None):
|
|||
ret[typeConfig.InstanceType] = {
|
||||
"Zone": typeConfig.Zone,
|
||||
"InstanceFamily": typeConfig.InstanceFamily,
|
||||
"Memory": "{}GB".format(typeConfig.Memory),
|
||||
"CPU": "{}-Core".format(typeConfig.CPU),
|
||||
"Memory": f"{typeConfig.Memory}GB",
|
||||
"CPU": f"{typeConfig.CPU}-Core",
|
||||
}
|
||||
if typeConfig.GPU:
|
||||
ret[typeConfig.InstanceType]["GPU"] = "{}-Core".format(typeConfig.GPU)
|
||||
ret[typeConfig.InstanceType]["GPU"] = f"{typeConfig.GPU}-Core"
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -714,7 +714,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -730,7 +730,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -782,9 +782,7 @@ def show_image(kwargs, call=None):
|
|||
resp = client.DescribeImages(req)
|
||||
|
||||
if not resp.ImageSet:
|
||||
raise SaltCloudNotFound(
|
||||
"The specified image '{}' could not be found.".format(image)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified image '{image}' could not be found.")
|
||||
|
||||
ret = {}
|
||||
for image in resp.ImageSet:
|
||||
|
@ -794,7 +792,7 @@ def show_image(kwargs, call=None):
|
|||
"ImageSource": image.ImageSource,
|
||||
"Platform": image.Platform,
|
||||
"Architecture": image.Architecture,
|
||||
"ImageSize": "{}GB".format(image.ImageSize),
|
||||
"ImageSize": f"{image.ImageSize}GB",
|
||||
"ImageState": image.ImageState,
|
||||
}
|
||||
|
||||
|
@ -893,7 +891,7 @@ def _get_node(name):
|
|||
)
|
||||
time.sleep(0.5)
|
||||
|
||||
raise SaltCloudNotFound("Failed to get instance info {}".format(name))
|
||||
raise SaltCloudNotFound(f"Failed to get instance info {name}")
|
||||
|
||||
|
||||
def _get_nodes():
|
||||
|
@ -940,7 +938,7 @@ def _get_images(image_type):
|
|||
"ImageSource": image.ImageSource,
|
||||
"Platform": image.Platform,
|
||||
"Architecture": image.Architecture,
|
||||
"ImageSize": "{}GB".format(image.ImageSize),
|
||||
"ImageSize": f"{image.ImageSize}GB",
|
||||
}
|
||||
|
||||
return ret
|
||||
|
@ -958,9 +956,7 @@ def __get_image(vm_):
|
|||
if vm_image in images:
|
||||
return vm_image
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
"The specified image '{}' could not be found.".format(vm_image)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified image '{vm_image}' could not be found.")
|
||||
|
||||
|
||||
def __get_size(vm_):
|
||||
|
@ -975,9 +971,7 @@ def __get_size(vm_):
|
|||
if vm_size in sizes:
|
||||
return vm_size
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
"The specified size '{}' could not be found.".format(vm_size)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified size '{vm_size}' could not be found.")
|
||||
|
||||
|
||||
def __get_securitygroups(vm_):
|
||||
|
|
|
@ -256,7 +256,7 @@ def create(vm_):
|
|||
vm_.setdefault("ssh_port", ret["ssh_port"])
|
||||
except (KeyError, TypeError):
|
||||
raise SaltInvocationError(
|
||||
"Insufficient SSH addressing information for {}".format(name)
|
||||
f"Insufficient SSH addressing information for {name}"
|
||||
)
|
||||
|
||||
log.info(
|
||||
|
@ -300,7 +300,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=opts["sock_dir"],
|
||||
transport=opts["transport"],
|
||||
|
@ -317,7 +317,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=opts["sock_dir"],
|
||||
transport=opts["transport"],
|
||||
|
@ -328,11 +328,11 @@ def destroy(name, call=None):
|
|||
name, _get_active_provider_name().split(":")[0], opts
|
||||
)
|
||||
|
||||
return {"Destroyed": "{} was destroyed.".format(name)}
|
||||
return {"Destroyed": f"{name} was destroyed."}
|
||||
else:
|
||||
return {"Error": "Error destroying {}".format(name)}
|
||||
return {"Error": f"Error destroying {name}"}
|
||||
else:
|
||||
return {"Error": "No response from {}. Cannot destroy.".format(name)}
|
||||
return {"Error": f"No response from {name}. Cannot destroy."}
|
||||
|
||||
|
||||
# noinspection PyTypeChecker
|
||||
|
|
|
@ -368,12 +368,12 @@ def destroy(name, call=None):
|
|||
"""
|
||||
log.info("Attempting to delete instance %s", name)
|
||||
if not vb_machine_exists(name):
|
||||
return "{} doesn't exist and can't be deleted".format(name)
|
||||
return f"{name} doesn't exist and can't be deleted"
|
||||
|
||||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -384,7 +384,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
|
|
@ -306,7 +306,7 @@ def _add_new_hard_disk_helper(
|
|||
disk_spec.device.key = random_key
|
||||
disk_spec.device.deviceInfo = vim.Description()
|
||||
disk_spec.device.deviceInfo.label = disk_label
|
||||
disk_spec.device.deviceInfo.summary = "{} GB".format(size_gb)
|
||||
disk_spec.device.deviceInfo.summary = f"{size_gb} GB"
|
||||
|
||||
disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
|
||||
disk_spec.device.backing.thinProvisioned = thin_provision
|
||||
|
@ -662,7 +662,7 @@ def _set_cd_or_dvd_backing_type(drive, device_type, mode, iso_path):
|
|||
if datastore_ref:
|
||||
drive.backing.datastore = datastore_ref
|
||||
|
||||
drive.deviceInfo.summary = "ISO {}".format(iso_path)
|
||||
drive.deviceInfo.summary = f"ISO {iso_path}"
|
||||
|
||||
elif device_type == "client_device":
|
||||
if mode == "passthrough":
|
||||
|
@ -917,7 +917,7 @@ def _manage_devices(devices, vm=None, container_ref=None, new_vm_name=None):
|
|||
else None
|
||||
)
|
||||
if bus_sharing and bus_sharing in ["virtual", "physical", "no"]:
|
||||
bus_sharing = "{}Sharing".format(bus_sharing)
|
||||
bus_sharing = f"{bus_sharing}Sharing"
|
||||
if bus_sharing != device.sharedBus:
|
||||
# Only edit the SCSI controller if bus_sharing is different
|
||||
scsi_spec = _edit_existing_scsi_controller(
|
||||
|
@ -1327,7 +1327,7 @@ def _format_instance_info_select(vm, selection):
|
|||
if "size" in selection:
|
||||
cpu = defaultto(vm, "config.hardware.numCPU")
|
||||
ram = "{} MB".format(defaultto(vm, "config.hardware.memoryMB"))
|
||||
vm_select_info["size"] = "cpu: {}\nram: {}".format(cpu, ram)
|
||||
vm_select_info["size"] = f"cpu: {cpu}\nram: {ram}"
|
||||
vm_select_info["size_dict"] = {
|
||||
"cpu": cpu,
|
||||
"memory": ram,
|
||||
|
@ -1610,7 +1610,7 @@ def _format_instance_info(vm):
|
|||
if "config.guestFullName" in vm
|
||||
else "N/A"
|
||||
),
|
||||
"size": "cpu: {}\nram: {}".format(cpu, ram),
|
||||
"size": f"cpu: {cpu}\nram: {ram}",
|
||||
"size_dict": {"cpu": cpu, "memory": ram},
|
||||
"state": (
|
||||
str(vm["summary.runtime.powerState"])
|
||||
|
@ -1642,7 +1642,7 @@ def _format_instance_info(vm):
|
|||
def _get_snapshots(snapshot_list, current_snapshot=None, parent_snapshot_path=""):
|
||||
snapshots = {}
|
||||
for snapshot in snapshot_list:
|
||||
snapshot_path = "{}/{}".format(parent_snapshot_path, snapshot.name)
|
||||
snapshot_path = f"{parent_snapshot_path}/{snapshot.name}"
|
||||
snapshots[snapshot_path] = {
|
||||
"name": snapshot.name,
|
||||
"description": snapshot.description,
|
||||
|
@ -1777,7 +1777,7 @@ def test_vcenter_connection(kwargs=None, call=None):
|
|||
# Get the service instance object
|
||||
_get_si()
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
return "failed to connect: {}".format(exc)
|
||||
return f"failed to connect: {exc}"
|
||||
|
||||
return "connection successful"
|
||||
|
||||
|
@ -2027,7 +2027,7 @@ def list_nodes(kwargs=None, call=None):
|
|||
if "config.guestFullName" in vm
|
||||
else "N/A"
|
||||
),
|
||||
"size": "cpu: {}\nram: {}".format(cpu, ram),
|
||||
"size": f"cpu: {cpu}\nram: {ram}",
|
||||
"size_dict": {"cpu": cpu, "memory": ram},
|
||||
"state": (
|
||||
str(vm["summary.runtime.powerState"])
|
||||
|
@ -2684,7 +2684,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -2730,7 +2730,7 @@ def destroy(name, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -3135,7 +3135,7 @@ def create(vm_):
|
|||
)
|
||||
if not datastore_ref:
|
||||
raise SaltCloudSystemExit(
|
||||
"Specified datastore: '{}' does not exist".format(datastore)
|
||||
f"Specified datastore: '{datastore}' does not exist"
|
||||
)
|
||||
|
||||
if host:
|
||||
|
@ -3151,7 +3151,7 @@ def create(vm_):
|
|||
# If the hardware version is specified and if it is different from the current
|
||||
# hardware version, then schedule a hardware version upgrade
|
||||
if hardware_version and object_ref is not None:
|
||||
hardware_version = "vmx-{:02}".format(hardware_version)
|
||||
hardware_version = f"vmx-{hardware_version:02}"
|
||||
if hardware_version != object_ref.config.version:
|
||||
log.debug(
|
||||
"Scheduling hardware version upgrade from %s to %s",
|
||||
|
@ -3181,7 +3181,7 @@ def create(vm_):
|
|||
elif memory_unit.lower() == "gb":
|
||||
memory_mb = int(float(memory_num) * 1024.0)
|
||||
else:
|
||||
err_msg = "Invalid memory type specified: '{}'".format(memory_unit)
|
||||
err_msg = f"Invalid memory type specified: '{memory_unit}'"
|
||||
log.error(err_msg)
|
||||
return {"Error": err_msg}
|
||||
except (TypeError, ValueError):
|
||||
|
@ -3629,7 +3629,7 @@ def rescan_hba(kwargs=None, call=None):
|
|||
if hba:
|
||||
log.info("Rescanning HBA %s on host %s", hba, host_name)
|
||||
host_ref.configManager.storageSystem.RescanHba(hba)
|
||||
ret = "rescanned HBA {}".format(hba)
|
||||
ret = f"rescanned HBA {hba}"
|
||||
else:
|
||||
log.info("Rescanning all HBAs on host %s", host_name)
|
||||
host_ref.configManager.storageSystem.RescanAllHba()
|
||||
|
@ -3907,7 +3907,7 @@ def list_hbas(kwargs=None, call=None):
|
|||
|
||||
if hba_type and hba_type not in ["parallel", "block", "iscsi", "fibre"]:
|
||||
raise SaltCloudSystemExit(
|
||||
"Specified hba type {} currently not supported.".format(hba_type)
|
||||
f"Specified hba type {hba_type} currently not supported."
|
||||
)
|
||||
|
||||
host_list = salt.utils.vmware.get_mors_with_properties(
|
||||
|
@ -4280,10 +4280,10 @@ def revert_to_snapshot(name, kwargs=None, call=None):
|
|||
task = vm_ref.RevertToCurrentSnapshot(suppressPowerOn=suppress_power_on)
|
||||
else:
|
||||
log.debug("Reverting VM %s to snapshot %s", name, snapshot_name)
|
||||
msg = "reverted to snapshot {}".format(snapshot_name)
|
||||
msg = f"reverted to snapshot {snapshot_name}"
|
||||
snapshot_ref = _get_snapshot_ref_by_name(vm_ref, snapshot_name)
|
||||
if snapshot_ref is None:
|
||||
return "specified snapshot '{}' does not exist".format(snapshot_name)
|
||||
return f"specified snapshot '{snapshot_name}' does not exist"
|
||||
task = snapshot_ref.snapshot.Revert(suppressPowerOn=suppress_power_on)
|
||||
|
||||
salt.utils.vmware.wait_for_task(task, name, "revert to snapshot", 5, "info")
|
||||
|
@ -4421,7 +4421,7 @@ def convert_to_template(name, kwargs=None, call=None):
|
|||
vm_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, name)
|
||||
|
||||
if vm_ref.config.template:
|
||||
raise SaltCloudSystemExit("{} already a template".format(name))
|
||||
raise SaltCloudSystemExit(f"{name} already a template")
|
||||
|
||||
try:
|
||||
vm_ref.MarkAsTemplate()
|
||||
|
@ -4435,7 +4435,7 @@ def convert_to_template(name, kwargs=None, call=None):
|
|||
)
|
||||
return "failed to convert to teamplate"
|
||||
|
||||
return "{} converted to template".format(name)
|
||||
return f"{name} converted to template"
|
||||
|
||||
|
||||
def add_host(kwargs=None, call=None):
|
||||
|
@ -4557,7 +4557,7 @@ def add_host(kwargs=None, call=None):
|
|||
("echo", "-n"), stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
)
|
||||
p2 = subprocess.Popen(
|
||||
("openssl", "s_client", "-connect", "{}:443".format(host_name)),
|
||||
("openssl", "s_client", "-connect", f"{host_name}:443"),
|
||||
stdin=p1.stdout,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
|
@ -4587,12 +4587,12 @@ def add_host(kwargs=None, call=None):
|
|||
try:
|
||||
if cluster_name:
|
||||
task = cluster_ref.AddHost(spec=spec, asConnected=True)
|
||||
ret = "added host system to cluster {}".format(cluster_name)
|
||||
ret = f"added host system to cluster {cluster_name}"
|
||||
if datacenter_name:
|
||||
task = datacenter_ref.hostFolder.AddStandaloneHost(
|
||||
spec=spec, addConnected=True
|
||||
)
|
||||
ret = "added host system to datacenter {}".format(datacenter_name)
|
||||
ret = f"added host system to datacenter {datacenter_name}"
|
||||
salt.utils.vmware.wait_for_task(task, host_name, "add host system", 5, "info")
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
if isinstance(exc, vim.fault.SSLVerifyFault):
|
||||
|
|
|
@ -124,7 +124,7 @@ def _get_session():
|
|||
Get a connection to the XenServer host
|
||||
"""
|
||||
api_version = "1.0"
|
||||
originator = "salt_cloud_{}_driver".format(__virtualname__)
|
||||
originator = f"salt_cloud_{__virtualname__}_driver"
|
||||
url = config.get_cloud_config_value(
|
||||
"url", get_configured_provider(), __opts__, search_global=False
|
||||
)
|
||||
|
@ -550,7 +550,7 @@ def create(vm_):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"starting create",
|
||||
"salt/cloud/{}/creating".format(name),
|
||||
f"salt/cloud/{name}/creating",
|
||||
args={"name": name, "profile": vm_["profile"], "provider": vm_["driver"]},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -580,7 +580,7 @@ def create(vm_):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"requesting instance",
|
||||
"salt/cloud/{}/requesting".format(name),
|
||||
f"salt/cloud/{name}/requesting",
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
)
|
||||
|
@ -623,7 +623,7 @@ def create(vm_):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"created instance",
|
||||
"salt/cloud/{}/created".format(name),
|
||||
f"salt/cloud/{name}/created",
|
||||
args={"name": name, "profile": vm_["profile"], "provider": vm_["driver"]},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -923,7 +923,7 @@ def reboot(name, call=None, session=None):
|
|||
_run_async_task(task, session)
|
||||
return show_instance(name)
|
||||
else:
|
||||
return "{} is not running to be rebooted".format(name)
|
||||
return f"{name} is not running to be rebooted"
|
||||
|
||||
|
||||
def _get_vm(name=None, session=None):
|
||||
|
@ -984,7 +984,7 @@ def destroy(name=None, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -1009,7 +1009,7 @@ def destroy(name=None, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -1134,7 +1134,7 @@ def vif_list(name, call=None, kwargs=None):
|
|||
x = 0
|
||||
for vif in vifs:
|
||||
vif_record = session.xenapi.VIF.get_record(vif)
|
||||
data["vif-{}".format(x)] = vif_record
|
||||
data[f"vif-{x}"] = vif_record
|
||||
x += 1
|
||||
ret[name] = data
|
||||
return ret
|
||||
|
@ -1168,7 +1168,7 @@ def vbd_list(name=None, call=None):
|
|||
x = 0
|
||||
for vbd in vbds:
|
||||
vbd_record = session.xenapi.VBD.get_record(vbd)
|
||||
data["vbd-{}".format(x)] = vbd_record
|
||||
data[f"vbd-{x}"] = vbd_record
|
||||
x += 1
|
||||
ret = data
|
||||
return ret
|
||||
|
@ -1219,7 +1219,7 @@ def destroy_vm_vdis(name=None, session=None, call=None):
|
|||
vdi_record = session.xenapi.VDI.get_record(vbd_record["VDI"])
|
||||
if "iso" not in vdi_record["name_label"]:
|
||||
session.xenapi.VDI.destroy(vbd_record["VDI"])
|
||||
ret["vdi-{}".format(x)] = vdi_record["name_label"]
|
||||
ret[f"vdi-{x}"] = vdi_record["name_label"]
|
||||
x += 1
|
||||
return ret
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ def check_libcloud_version(reqver=LIBCLOUD_MINIMAL_VERSION, why=None):
|
|||
)
|
||||
)
|
||||
if why:
|
||||
errormsg += " for {}".format(why)
|
||||
errormsg += f" for {why}"
|
||||
errormsg += ". Please upgrade."
|
||||
raise ImportError(errormsg)
|
||||
|
||||
|
@ -186,7 +186,7 @@ def get_location(conn, vm_):
|
|||
return img
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
"The specified location, '{}', could not be found.".format(vm_location)
|
||||
f"The specified location, '{vm_location}', could not be found."
|
||||
)
|
||||
|
||||
|
||||
|
@ -204,9 +204,7 @@ def get_image(conn, vm_):
|
|||
if vm_image and vm_image in (img_id, img_name):
|
||||
return img
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
"The specified image, '{}', could not be found.".format(vm_image)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified image, '{vm_image}', could not be found.")
|
||||
|
||||
|
||||
def get_size(conn, vm_):
|
||||
|
@ -224,9 +222,7 @@ def get_size(conn, vm_):
|
|||
str(size.name),
|
||||
):
|
||||
return size
|
||||
raise SaltCloudNotFound(
|
||||
"The specified size, '{}', could not be found.".format(vm_size)
|
||||
)
|
||||
raise SaltCloudNotFound(f"The specified size, '{vm_size}', could not be found.")
|
||||
|
||||
|
||||
def script(vm_):
|
||||
|
@ -257,7 +253,7 @@ def destroy(name, conn=None, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroying instance",
|
||||
"salt/cloud/{}/destroying".format(name),
|
||||
f"salt/cloud/{name}/destroying",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -296,7 +292,7 @@ def destroy(name, conn=None, call=None):
|
|||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"destroyed instance",
|
||||
"salt/cloud/{}/destroyed".format(name),
|
||||
f"salt/cloud/{name}/destroyed",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
@ -338,8 +334,8 @@ def reboot(name, conn=None):
|
|||
# Fire reboot action
|
||||
__utils__["cloud.fire_event"](
|
||||
"event",
|
||||
"{} has been rebooted".format(name),
|
||||
"salt/cloud/{}/rebooting".format(name),
|
||||
f"{name} has been rebooted",
|
||||
f"salt/cloud/{name}/rebooting",
|
||||
args={"name": name},
|
||||
sock_dir=__opts__["sock_dir"],
|
||||
transport=__opts__["transport"],
|
||||
|
|
|
@ -2026,7 +2026,7 @@ def _read_conf_file(path):
|
|||
try:
|
||||
conf_opts = salt.utils.yaml.safe_load(conf_file) or {}
|
||||
except salt.utils.yaml.YAMLError as err:
|
||||
message = "Error parsing configuration file: {} - {}".format(path, err)
|
||||
message = f"Error parsing configuration file: {path} - {err}"
|
||||
log.error(message)
|
||||
if path.endswith("_schedule.conf"):
|
||||
# Create empty dictionary of config options
|
||||
|
@ -2123,7 +2123,7 @@ def load_config(path, env_var, default_path=None, exit_on_config_errors=True):
|
|||
# If the configuration file is missing, attempt to copy the template,
|
||||
# after removing the first header line.
|
||||
if not os.path.isfile(path):
|
||||
template = "{}.template".format(path)
|
||||
template = f"{path}.template"
|
||||
if os.path.isfile(template):
|
||||
log.debug("Writing %s based on %s", path, template)
|
||||
with salt.utils.files.fopen(path, "w") as out:
|
||||
|
@ -2800,7 +2800,7 @@ def apply_cloud_config(overrides, defaults=None):
|
|||
if alias not in config["providers"]:
|
||||
config["providers"][alias] = {}
|
||||
|
||||
detail["provider"] = "{}:{}".format(alias, driver)
|
||||
detail["provider"] = f"{alias}:{driver}"
|
||||
config["providers"][alias][driver] = detail
|
||||
elif isinstance(details, dict):
|
||||
if "driver" not in details:
|
||||
|
@ -2817,7 +2817,7 @@ def apply_cloud_config(overrides, defaults=None):
|
|||
if alias not in config["providers"]:
|
||||
config["providers"][alias] = {}
|
||||
|
||||
details["provider"] = "{}:{}".format(alias, driver)
|
||||
details["provider"] = f"{alias}:{driver}"
|
||||
config["providers"][alias][driver] = details
|
||||
|
||||
# Migrate old configuration
|
||||
|
@ -3088,7 +3088,7 @@ def apply_cloud_providers_config(overrides, defaults=None):
|
|||
for entry in val:
|
||||
|
||||
if "driver" not in entry:
|
||||
entry["driver"] = "-only-extendable-{}".format(ext_count)
|
||||
entry["driver"] = f"-only-extendable-{ext_count}"
|
||||
ext_count += 1
|
||||
|
||||
if key not in providers:
|
||||
|
@ -3131,7 +3131,7 @@ def apply_cloud_providers_config(overrides, defaults=None):
|
|||
details["driver"], provider_alias, alias, provider
|
||||
)
|
||||
)
|
||||
details["extends"] = "{}:{}".format(alias, provider)
|
||||
details["extends"] = f"{alias}:{provider}"
|
||||
# change provider details '-only-extendable-' to extended
|
||||
# provider name
|
||||
details["driver"] = provider
|
||||
|
@ -3152,10 +3152,10 @@ def apply_cloud_providers_config(overrides, defaults=None):
|
|||
)
|
||||
else:
|
||||
if driver in providers.get(extends):
|
||||
details["extends"] = "{}:{}".format(extends, driver)
|
||||
details["extends"] = f"{extends}:{driver}"
|
||||
elif "-only-extendable-" in providers.get(extends):
|
||||
details["extends"] = "{}:{}".format(
|
||||
extends, "-only-extendable-{}".format(ext_count)
|
||||
extends, f"-only-extendable-{ext_count}"
|
||||
)
|
||||
else:
|
||||
# We're still not aware of what we're trying to extend
|
||||
|
@ -3869,7 +3869,7 @@ def _update_discovery_config(opts):
|
|||
for key in opts["discovery"]:
|
||||
if key not in discovery_config:
|
||||
raise salt.exceptions.SaltConfigurationError(
|
||||
"Unknown discovery option: {}".format(key)
|
||||
f"Unknown discovery option: {key}"
|
||||
)
|
||||
if opts.get("__role") != "minion":
|
||||
for key in ["attempts", "pause", "match"]:
|
||||
|
|
|
@ -25,7 +25,7 @@ class DefaultIncludeConfig(StringItem):
|
|||
description = __doc__
|
||||
|
||||
def __init__(self, default=None, pattern=None, **kwargs):
|
||||
default = "{}/*.conf".format(self.__confd_directory__)
|
||||
default = f"{self.__confd_directory__}/*.conf"
|
||||
pattern = r"(?:.*)/\*\.conf"
|
||||
super().__init__(default=default, pattern=pattern, **kwargs)
|
||||
|
||||
|
|
|
@ -128,8 +128,8 @@ def gen_keys(keydir, keyname, keysize, user=None, passphrase=None):
|
|||
:return: Path on the filesystem to the RSA private key
|
||||
"""
|
||||
base = os.path.join(keydir, keyname)
|
||||
priv = "{}.pem".format(base)
|
||||
pub = "{}.pub".format(base)
|
||||
priv = f"{base}.pem"
|
||||
pub = f"{base}.pub"
|
||||
|
||||
if HAS_M2:
|
||||
gen = RSA.gen_key(keysize, 65537, lambda: None)
|
||||
|
@ -449,7 +449,7 @@ class MasterKeys(dict):
|
|||
try:
|
||||
key = get_rsa_key(path, passphrase)
|
||||
except key_error as e:
|
||||
message = "Unable to read key: {}; passphrase may be incorrect".format(path)
|
||||
message = f"Unable to read key: {path}; passphrase may be incorrect"
|
||||
log.error(message)
|
||||
raise MasterExit(message)
|
||||
log.debug("Loaded %s key: %s", name, path)
|
||||
|
|
|
@ -140,7 +140,7 @@ def extract_masters(opts, masters="master", port=None, raise_if_empty=True):
|
|||
entries = opts.get(masters, [])
|
||||
|
||||
if not entries:
|
||||
emsg = "Invalid or missing opts['{}'].".format(masters)
|
||||
emsg = f"Invalid or missing opts['{masters}']."
|
||||
log.error(emsg)
|
||||
if raise_if_empty:
|
||||
raise ValueError(emsg)
|
||||
|
|
|
@ -88,7 +88,7 @@ def clean_fsbackend(opts):
|
|||
# Clear remote fileserver backend caches so they get recreated
|
||||
for backend in ("git", "hg", "svn"):
|
||||
if backend in opts["fileserver_backend"]:
|
||||
env_cache = os.path.join(opts["cachedir"], "{}fs".format(backend), "envs.p")
|
||||
env_cache = os.path.join(opts["cachedir"], f"{backend}fs", "envs.p")
|
||||
if os.path.isfile(env_cache):
|
||||
log.debug("Clearing %sfs env cache", backend)
|
||||
try:
|
||||
|
@ -99,7 +99,7 @@ def clean_fsbackend(opts):
|
|||
)
|
||||
|
||||
file_lists_dir = os.path.join(
|
||||
opts["cachedir"], "file_lists", "{}fs".format(backend)
|
||||
opts["cachedir"], "file_lists", f"{backend}fs"
|
||||
)
|
||||
try:
|
||||
file_lists_caches = os.listdir(file_lists_dir)
|
||||
|
@ -177,7 +177,7 @@ def mk_key(opts, user):
|
|||
opts["cachedir"], ".{}_key".format(user.replace("\\", "_"))
|
||||
)
|
||||
else:
|
||||
keyfile = os.path.join(opts["cachedir"], ".{}_key".format(user))
|
||||
keyfile = os.path.join(opts["cachedir"], f".{user}_key")
|
||||
|
||||
if os.path.exists(keyfile):
|
||||
log.debug("Removing stale keyfile: %s", keyfile)
|
||||
|
@ -589,7 +589,7 @@ class RemoteFuncs:
|
|||
minions = _res["minions"]
|
||||
minion_side_acl = {} # Cache minion-side ACL
|
||||
for minion in minions:
|
||||
mine_data = self.cache.fetch("minions/{}".format(minion), "mine")
|
||||
mine_data = self.cache.fetch(f"minions/{minion}", "mine")
|
||||
if not isinstance(mine_data, dict):
|
||||
continue
|
||||
for function in functions_allowed:
|
||||
|
@ -616,7 +616,7 @@ class RemoteFuncs:
|
|||
continue
|
||||
salt.utils.dictupdate.set_dict_key_value(
|
||||
minion_side_acl,
|
||||
"{}:{}".format(minion, function),
|
||||
f"{minion}:{function}",
|
||||
get_minion,
|
||||
)
|
||||
if salt.utils.mine.minion_side_acl_denied(
|
||||
|
@ -1176,7 +1176,7 @@ class LocalFuncs:
|
|||
fun = load.pop("fun")
|
||||
tag = salt.utils.event.tagify(jid, prefix="wheel")
|
||||
data = {
|
||||
"fun": "wheel.{}".format(fun),
|
||||
"fun": f"wheel.{fun}",
|
||||
"jid": jid,
|
||||
"tag": tag,
|
||||
"user": username,
|
||||
|
@ -1260,7 +1260,7 @@ class LocalFuncs:
|
|||
# Setup authorization list variable and error information
|
||||
auth_list = auth_check.get("auth_list", [])
|
||||
error = auth_check.get("error")
|
||||
err_msg = 'Authentication failure of type "{}" occurred.'.format(auth_type)
|
||||
err_msg = f'Authentication failure of type "{auth_type}" occurred.'
|
||||
|
||||
if error:
|
||||
# Authentication error occurred: do not continue.
|
||||
|
|
|
@ -45,8 +45,8 @@ class _Constant:
|
|||
|
||||
def __repr__(self):
|
||||
if self.value:
|
||||
return "<Constant.{} value={}>".format(self.name, self.value)
|
||||
return "<Constant.{}>".format(self.name)
|
||||
return f"<Constant.{self.name} value={self.value}>"
|
||||
return f"<Constant.{self.name}>"
|
||||
|
||||
|
||||
# Default delimiter for multi-level traversal in targeting
|
||||
|
|
|
@ -48,13 +48,13 @@ def start_engines(opts, proc_mgr, proxy=None):
|
|||
engine_name = engine
|
||||
del engine_opts["engine_module"]
|
||||
else:
|
||||
fun = "{}.start".format(engine)
|
||||
fun = f"{engine}.start"
|
||||
if fun in engines:
|
||||
start_func = engines[fun]
|
||||
if engine_name:
|
||||
name = "Engine({}, name={})".format(start_func.__module__, engine_name)
|
||||
name = f"Engine({start_func.__module__}, name={engine_name})"
|
||||
else:
|
||||
name = "Engine({})".format(start_func.__module__)
|
||||
name = f"Engine({start_func.__module__})"
|
||||
log.info("Starting %s", name)
|
||||
proc_mgr.add_process(
|
||||
Engine,
|
||||
|
|
|
@ -173,16 +173,16 @@ class IRCClient:
|
|||
event.source, nick, user, host, event.code, channel, command, line
|
||||
)
|
||||
if (self._allow_nick(nick) or self._allow_host(host)) and hasattr(
|
||||
self, "_command_{}".format(command)
|
||||
self, f"_command_{command}"
|
||||
):
|
||||
getattr(self, "_command_{}".format(command))(privevent)
|
||||
getattr(self, f"_command_{command}")(privevent)
|
||||
|
||||
def _command_echo(self, event):
|
||||
message = "PRIVMSG {} :{}".format(event.channel, event.line)
|
||||
message = f"PRIVMSG {event.channel} :{event.line}"
|
||||
self.send_message(message)
|
||||
|
||||
def _command_ping(self, event):
|
||||
message = "PRIVMSG {} :{}: pong".format(event.channel, event.nick)
|
||||
message = f"PRIVMSG {event.channel} :{event.nick}: pong"
|
||||
self.send_message(message)
|
||||
|
||||
def _command_event(self, event):
|
||||
|
@ -210,7 +210,7 @@ class IRCClient:
|
|||
payload = {"data": []}
|
||||
|
||||
fire("salt/engines/ircbot/" + tag, payload)
|
||||
message = "PRIVMSG {} :{}: TaDa!".format(event.channel, event.nick)
|
||||
message = f"PRIVMSG {event.channel} :{event.nick}: TaDa!"
|
||||
self.send_message(message)
|
||||
|
||||
def _message(self, raw):
|
||||
|
@ -219,7 +219,7 @@ class IRCClient:
|
|||
|
||||
if event.code == "PING":
|
||||
salt.ext.tornado.ioloop.IOLoop.current().spawn_callback(
|
||||
self.send_message, "PONG {}".format(event.line)
|
||||
self.send_message, f"PONG {event.line}"
|
||||
)
|
||||
elif event.code == "PRIVMSG":
|
||||
salt.ext.tornado.ioloop.IOLoop.current().spawn_callback(
|
||||
|
@ -230,13 +230,13 @@ class IRCClient:
|
|||
def join_channel(self, channel):
|
||||
if not channel.startswith("#"):
|
||||
channel = "#" + channel
|
||||
self.send_message("JOIN {}".format(channel))
|
||||
self.send_message(f"JOIN {channel}")
|
||||
|
||||
def on_connect(self):
|
||||
logging.info("on_connect")
|
||||
if self.sasl is True:
|
||||
self.send_message("CAP REQ :sasl")
|
||||
self.send_message("NICK {}".format(self.nick))
|
||||
self.send_message(f"NICK {self.nick}")
|
||||
self.send_message("USER saltstack 0 * :saltstack")
|
||||
if self.password:
|
||||
if self.sasl is True:
|
||||
|
@ -244,7 +244,7 @@ class IRCClient:
|
|||
"{0}\x00{0}\x00{1}".format(self.username, self.password).encode()
|
||||
)
|
||||
self.send_message("AUTHENTICATE PLAIN")
|
||||
self.send_message("AUTHENTICATE {}".format(authstring))
|
||||
self.send_message(f"AUTHENTICATE {authstring}")
|
||||
self.send_message("CAP END")
|
||||
else:
|
||||
self.send_message(
|
||||
|
|
|
@ -189,7 +189,7 @@ def _get_domain_event_detail(event, detail):
|
|||
if event_name == "unknown":
|
||||
return event_name, "unknown"
|
||||
|
||||
prefix = "VIR_DOMAIN_EVENT_{}_".format(event_name.upper())
|
||||
prefix = f"VIR_DOMAIN_EVENT_{event_name.upper()}_"
|
||||
detail_name = _get_libvirt_enum_string(prefix, detail)
|
||||
|
||||
return event_name, detail_name
|
||||
|
@ -333,9 +333,7 @@ def _domain_event_graphics_cb(
|
|||
transform address structure into event data piece
|
||||
"""
|
||||
return {
|
||||
"family": _get_libvirt_enum_string(
|
||||
"{}_ADDRESS_".format(prefix), addr["family"]
|
||||
),
|
||||
"family": _get_libvirt_enum_string(f"{prefix}_ADDRESS_", addr["family"]),
|
||||
"node": addr["node"],
|
||||
"service": addr["service"],
|
||||
}
|
||||
|
@ -680,14 +678,14 @@ def _register_callback(cnx, tag_prefix, obj, event, real_id):
|
|||
"""
|
||||
libvirt_name = real_id
|
||||
if real_id is None:
|
||||
libvirt_name = "VIR_{}_EVENT_ID_{}".format(obj, event).upper()
|
||||
libvirt_name = f"VIR_{obj}_EVENT_ID_{event}".upper()
|
||||
|
||||
if not hasattr(libvirt, libvirt_name):
|
||||
log.warning('Skipping "%s/%s" events: libvirt too old', obj, event)
|
||||
return None
|
||||
|
||||
libvirt_id = getattr(libvirt, libvirt_name)
|
||||
callback_name = "_{}_event_{}_cb".format(obj, event)
|
||||
callback_name = f"_{obj}_event_{event}_cb"
|
||||
callback = globals().get(callback_name, None)
|
||||
if callback is None:
|
||||
log.error("Missing function %s in engine", callback_name)
|
||||
|
|
|
@ -209,7 +209,7 @@ def _zmq(address, port, **kwargs):
|
|||
socket = context.socket(zmq.SUB)
|
||||
if salt.utils.network.is_ipv6(address):
|
||||
socket.ipv6 = True
|
||||
socket.connect("tcp://{addr}:{port}".format(addr=address, port=port))
|
||||
socket.connect(f"tcp://{address}:{port}")
|
||||
socket.setsockopt(zmq.SUBSCRIBE, b"")
|
||||
return socket.recv
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ def _get_serializer(output):
|
|||
return getattr(serializers, output)
|
||||
except AttributeError:
|
||||
raise CommandExecutionError(
|
||||
"Unknown serializer `{}` found for output option".format(output)
|
||||
f"Unknown serializer `{output}` found for output option"
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -747,7 +747,7 @@ class SlackClient:
|
|||
results = {}
|
||||
for jid in outstanding_jids:
|
||||
# results[jid] = runner.cmd('jobs.lookup_jid', [jid])
|
||||
if self.master_minion.returners["{}.get_jid".format(source)](jid):
|
||||
if self.master_minion.returners[f"{source}.get_jid"](jid):
|
||||
job_result = runner.cmd("jobs.list_job", [jid])
|
||||
jid_result = job_result.get("Result", {})
|
||||
jid_function = job_result.get("Function", {})
|
||||
|
@ -838,7 +838,7 @@ class SlackClient:
|
|||
channel.send_message(return_prefix)
|
||||
ts = time.time()
|
||||
st = datetime.datetime.fromtimestamp(ts).strftime("%Y%m%d%H%M%S%f")
|
||||
filename = "salt-results-{}.yaml".format(st)
|
||||
filename = f"salt-results-{st}.yaml"
|
||||
r = self.sc.api_call(
|
||||
"files.upload",
|
||||
channels=channel.id,
|
||||
|
@ -944,4 +944,4 @@ def start(
|
|||
)
|
||||
client.run_commands_from_slack_async(message_generator, fire_all, tag, control)
|
||||
except Exception: # pylint: disable=broad-except
|
||||
raise Exception("{}".format(traceback.format_exc()))
|
||||
raise Exception(f"{traceback.format_exc()}")
|
||||
|
|
|
@ -284,7 +284,7 @@ class SaltRenderError(SaltException):
|
|||
self.buffer = buf
|
||||
self.context = ""
|
||||
if trace:
|
||||
exc_str += "\n{}\n".format(trace)
|
||||
exc_str += f"\n{trace}\n"
|
||||
if self.line_num and self.buffer:
|
||||
# Avoid circular import
|
||||
import salt.utils.templates
|
||||
|
|
|
@ -22,7 +22,7 @@ def __virtual__():
|
|||
"Docker executor is only meant to be used with Docker Proxy Minions",
|
||||
)
|
||||
if __opts__.get("proxy", {}).get("proxytype") != __virtualname__:
|
||||
return False, "Proxytype does not match: {}".format(__virtualname__)
|
||||
return False, f"Proxytype does not match: {__virtualname__}"
|
||||
return True
|
||||
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ def execute(opts, data, func, args, kwargs):
|
|||
for arg in args:
|
||||
cmd.append(shlex.quote(str(arg)))
|
||||
for key in kwargs:
|
||||
cmd.append(shlex.quote("{}={}".format(key, kwargs[key])))
|
||||
cmd.append(shlex.quote(f"{key}={kwargs[key]}"))
|
||||
|
||||
cmd_ret = __salt__["cmd.run_all"](cmd, use_vt=True, python_shell=False)
|
||||
|
||||
|
|
|
@ -107,7 +107,7 @@ class Client:
|
|||
Make sure that this path is intended for the salt master and trim it
|
||||
"""
|
||||
if not path.startswith("salt://"):
|
||||
raise MinionError("Unsupported path: {}".format(path))
|
||||
raise MinionError(f"Unsupported path: {path}")
|
||||
file_path, saltenv = salt.utils.url.parse(path)
|
||||
return file_path
|
||||
|
||||
|
@ -273,7 +273,7 @@ class Client:
|
|||
for fn_ in self.file_list_emptydirs(saltenv):
|
||||
fn_ = salt.utils.data.decode(fn_)
|
||||
if fn_.startswith(path):
|
||||
minion_dir = "{}/{}".format(dest, fn_)
|
||||
minion_dir = f"{dest}/{fn_}"
|
||||
if not os.path.isdir(minion_dir):
|
||||
os.makedirs(minion_dir)
|
||||
ret.append(minion_dir)
|
||||
|
@ -438,7 +438,7 @@ class Client:
|
|||
ret.append(
|
||||
self.get_file(
|
||||
salt.utils.url.create(fn_),
|
||||
"{}/{}".format(dest, minion_relpath),
|
||||
f"{dest}/{minion_relpath}",
|
||||
True,
|
||||
saltenv,
|
||||
gzip,
|
||||
|
@ -457,7 +457,7 @@ class Client:
|
|||
# Remove the leading directories from path to derive
|
||||
# the relative path on the minion.
|
||||
minion_relpath = fn_[len(prefix) :].lstrip("/")
|
||||
minion_mkdir = "{}/{}".format(dest, minion_relpath)
|
||||
minion_mkdir = f"{dest}/{minion_relpath}"
|
||||
if not os.path.isdir(minion_mkdir):
|
||||
os.makedirs(minion_mkdir)
|
||||
ret.append(minion_mkdir)
|
||||
|
@ -508,9 +508,7 @@ class Client:
|
|||
if url_scheme in ("file", ""):
|
||||
# Local filesystem
|
||||
if not os.path.isabs(url_path):
|
||||
raise CommandExecutionError(
|
||||
"Path '{}' is not absolute".format(url_path)
|
||||
)
|
||||
raise CommandExecutionError(f"Path '{url_path}' is not absolute")
|
||||
if dest is None:
|
||||
with salt.utils.files.fopen(url_path, "rb") as fp_:
|
||||
data = fp_.read()
|
||||
|
@ -584,9 +582,7 @@ class Client:
|
|||
)
|
||||
return dest
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
raise MinionError(
|
||||
"Could not fetch from {}. Exception: {}".format(url, exc)
|
||||
)
|
||||
raise MinionError(f"Could not fetch from {url}. Exception: {exc}")
|
||||
if url_data.scheme == "ftp":
|
||||
try:
|
||||
ftp = ftplib.FTP() # nosec
|
||||
|
@ -597,7 +593,7 @@ class Client:
|
|||
ftp.login(url_data.username, url_data.password)
|
||||
remote_file_path = url_data.path.lstrip("/")
|
||||
with salt.utils.files.fopen(dest, "wb") as fp_:
|
||||
ftp.retrbinary("RETR {}".format(remote_file_path), fp_.write)
|
||||
ftp.retrbinary(f"RETR {remote_file_path}", fp_.write)
|
||||
ftp.quit()
|
||||
return dest
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
|
@ -631,7 +627,7 @@ class Client:
|
|||
swift_conn.get_object(url_data.netloc, url_data.path[1:], dest)
|
||||
return dest
|
||||
except Exception: # pylint: disable=broad-except
|
||||
raise MinionError("Could not fetch from {}".format(url))
|
||||
raise MinionError(f"Could not fetch from {url}")
|
||||
|
||||
get_kwargs = {}
|
||||
if url_data.username is not None and url_data.scheme in ("http", "https"):
|
||||
|
@ -654,7 +650,7 @@ class Client:
|
|||
fixed_url = url
|
||||
|
||||
destfp = None
|
||||
dest_etag = "{}.etag".format(dest)
|
||||
dest_etag = f"{dest}.etag"
|
||||
try:
|
||||
# Tornado calls streaming_callback on redirect response bodies.
|
||||
# But we need streaming to support fetching large files (> RAM
|
||||
|
@ -768,7 +764,7 @@ class Client:
|
|||
result.append(chunk)
|
||||
|
||||
else:
|
||||
dest_tmp = "{}.part".format(dest)
|
||||
dest_tmp = f"{dest}.part"
|
||||
# We need an open filehandle to use in the on_chunk callback,
|
||||
# that's why we're not using a with clause here.
|
||||
# pylint: disable=resource-leakage
|
||||
|
@ -830,7 +826,7 @@ class Client:
|
|||
)
|
||||
)
|
||||
except urllib.error.URLError as exc:
|
||||
raise MinionError("Error reading {}: {}".format(url, exc.reason))
|
||||
raise MinionError(f"Error reading {url}: {exc.reason}")
|
||||
finally:
|
||||
if destfp is not None:
|
||||
destfp.close()
|
||||
|
|
|
@ -320,9 +320,9 @@ def clear_lock(clear_func, role, remote=None, lock_type="update"):
|
|||
|
||||
Returns the return data from ``clear_func``.
|
||||
"""
|
||||
msg = "Clearing {} lock for {} remotes".format(lock_type, role)
|
||||
msg = f"Clearing {lock_type} lock for {role} remotes"
|
||||
if remote:
|
||||
msg += " matching {}".format(remote)
|
||||
msg += f" matching {remote}"
|
||||
log.debug(msg)
|
||||
return clear_func(remote=remote, lock_type=lock_type)
|
||||
|
||||
|
@ -375,12 +375,12 @@ class Fileserver:
|
|||
# Only subtracting backends from enabled ones
|
||||
ret = self.opts["fileserver_backend"]
|
||||
for sub in back:
|
||||
if "{}.envs".format(sub[1:]) in server_funcs:
|
||||
if f"{sub[1:]}.envs" in server_funcs:
|
||||
ret.remove(sub[1:])
|
||||
return ret
|
||||
|
||||
for sub in back:
|
||||
if "{}.envs".format(sub) in server_funcs:
|
||||
if f"{sub}.envs" in server_funcs:
|
||||
ret.append(sub)
|
||||
return ret
|
||||
|
||||
|
@ -408,7 +408,7 @@ class Fileserver:
|
|||
cleared = []
|
||||
errors = []
|
||||
for fsb in back:
|
||||
fstr = "{}.clear_cache".format(fsb)
|
||||
fstr = f"{fsb}.clear_cache"
|
||||
if fstr in self.servers:
|
||||
log.debug("Clearing %s fileserver cache", fsb)
|
||||
failed = self.servers[fstr]()
|
||||
|
@ -416,7 +416,7 @@ class Fileserver:
|
|||
errors.extend(failed)
|
||||
else:
|
||||
cleared.append(
|
||||
"The {} fileserver cache was successfully cleared".format(fsb)
|
||||
f"The {fsb} fileserver cache was successfully cleared"
|
||||
)
|
||||
return cleared, errors
|
||||
|
||||
|
@ -430,17 +430,15 @@ class Fileserver:
|
|||
locked = []
|
||||
errors = []
|
||||
for fsb in back:
|
||||
fstr = "{}.lock".format(fsb)
|
||||
fstr = f"{fsb}.lock"
|
||||
if fstr in self.servers:
|
||||
msg = "Setting update lock for {} remotes".format(fsb)
|
||||
msg = f"Setting update lock for {fsb} remotes"
|
||||
if remote:
|
||||
if not isinstance(remote, str):
|
||||
errors.append(
|
||||
"Badly formatted remote pattern '{}'".format(remote)
|
||||
)
|
||||
errors.append(f"Badly formatted remote pattern '{remote}'")
|
||||
continue
|
||||
else:
|
||||
msg += " matching {}".format(remote)
|
||||
msg += f" matching {remote}"
|
||||
log.debug(msg)
|
||||
good, bad = self.servers[fstr](remote=remote)
|
||||
locked.extend(good)
|
||||
|
@ -463,7 +461,7 @@ class Fileserver:
|
|||
cleared = []
|
||||
errors = []
|
||||
for fsb in back:
|
||||
fstr = "{}.clear_lock".format(fsb)
|
||||
fstr = f"{fsb}.clear_lock"
|
||||
if fstr in self.servers:
|
||||
good, bad = clear_lock(self.servers[fstr], fsb, remote=remote)
|
||||
cleared.extend(good)
|
||||
|
@ -477,7 +475,7 @@ class Fileserver:
|
|||
"""
|
||||
back = self.backends(back)
|
||||
for fsb in back:
|
||||
fstr = "{}.update".format(fsb)
|
||||
fstr = f"{fsb}.update"
|
||||
if fstr in self.servers:
|
||||
log.debug("Updating %s fileserver cache", fsb)
|
||||
self.servers[fstr](**kwargs)
|
||||
|
@ -490,7 +488,7 @@ class Fileserver:
|
|||
back = self.backends(back)
|
||||
ret = {}
|
||||
for fsb in back:
|
||||
fstr = "{}.update_intervals".format(fsb)
|
||||
fstr = f"{fsb}.update_intervals"
|
||||
if fstr in self.servers:
|
||||
ret[fsb] = self.servers[fstr]()
|
||||
return ret
|
||||
|
@ -504,7 +502,7 @@ class Fileserver:
|
|||
if sources:
|
||||
ret = {}
|
||||
for fsb in back:
|
||||
fstr = "{}.envs".format(fsb)
|
||||
fstr = f"{fsb}.envs"
|
||||
kwargs = (
|
||||
{"ignore_cache": True}
|
||||
if "ignore_cache" in _argspec(self.servers[fstr]).args
|
||||
|
@ -534,7 +532,7 @@ class Fileserver:
|
|||
"""
|
||||
back = self.backends(back)
|
||||
for fsb in back:
|
||||
fstr = "{}.init".format(fsb)
|
||||
fstr = f"{fsb}.init"
|
||||
if fstr in self.servers:
|
||||
self.servers[fstr]()
|
||||
|
||||
|
@ -596,7 +594,7 @@ class Fileserver:
|
|||
saltenv = str(saltenv)
|
||||
|
||||
for fsb in back:
|
||||
fstr = "{}.find_file".format(fsb)
|
||||
fstr = f"{fsb}.find_file"
|
||||
if fstr in self.servers:
|
||||
fnd = self.servers[fstr](path, saltenv, **kwargs)
|
||||
if fnd.get("path"):
|
||||
|
@ -766,7 +764,7 @@ class Fileserver:
|
|||
load["saltenv"] = str(load["saltenv"])
|
||||
|
||||
for fsb in self.backends(load.pop("fsbackend", None)):
|
||||
fstr = "{}.file_list".format(fsb)
|
||||
fstr = f"{fsb}.file_list"
|
||||
if fstr in self.servers:
|
||||
ret.update(self.servers[fstr](load))
|
||||
# some *fs do not handle prefix. Ensure it is filtered
|
||||
|
@ -791,7 +789,7 @@ class Fileserver:
|
|||
load["saltenv"] = str(load["saltenv"])
|
||||
|
||||
for fsb in self.backends(None):
|
||||
fstr = "{}.file_list_emptydirs".format(fsb)
|
||||
fstr = f"{fsb}.file_list_emptydirs"
|
||||
if fstr in self.servers:
|
||||
ret.update(self.servers[fstr](load))
|
||||
# some *fs do not handle prefix. Ensure it is filtered
|
||||
|
@ -816,7 +814,7 @@ class Fileserver:
|
|||
load["saltenv"] = str(load["saltenv"])
|
||||
|
||||
for fsb in self.backends(load.pop("fsbackend", None)):
|
||||
fstr = "{}.dir_list".format(fsb)
|
||||
fstr = f"{fsb}.dir_list"
|
||||
if fstr in self.servers:
|
||||
ret.update(self.servers[fstr](load))
|
||||
# some *fs do not handle prefix. Ensure it is filtered
|
||||
|
@ -841,7 +839,7 @@ class Fileserver:
|
|||
load["saltenv"] = str(load["saltenv"])
|
||||
|
||||
for fsb in self.backends(load.pop("fsbackend", None)):
|
||||
symlstr = "{}.symlink_list".format(fsb)
|
||||
symlstr = f"{fsb}.symlink_list"
|
||||
if symlstr in self.servers:
|
||||
ret = self.servers[symlstr](load)
|
||||
# some *fs do not handle prefix. Ensure it is filtered
|
||||
|
|
|
@ -239,7 +239,7 @@ def init():
|
|||
|
||||
per_remote_defaults = {}
|
||||
for param in PER_REMOTE_OVERRIDES:
|
||||
per_remote_defaults[param] = str(__opts__["hgfs_{}".format(param)])
|
||||
per_remote_defaults[param] = str(__opts__[f"hgfs_{param}"])
|
||||
|
||||
for remote in __opts__["hgfs_remotes"]:
|
||||
repo_conf = copy.deepcopy(per_remote_defaults)
|
||||
|
@ -355,7 +355,7 @@ def init():
|
|||
with salt.utils.files.fopen(hgconfpath, "w+") as hgconfig:
|
||||
hgconfig.write("[paths]\n")
|
||||
hgconfig.write(
|
||||
salt.utils.stringutils.to_str("default = {}\n".format(repo_url))
|
||||
salt.utils.stringutils.to_str(f"default = {repo_url}\n")
|
||||
)
|
||||
|
||||
repo_conf.update(
|
||||
|
@ -365,7 +365,7 @@ def init():
|
|||
"hash": repo_hash,
|
||||
"cachedir": rp_,
|
||||
"lockfile": os.path.join(
|
||||
__opts__["cachedir"], "hgfs", "{}.update.lk".format(repo_hash)
|
||||
__opts__["cachedir"], "hgfs", f"{repo_hash}.update.lk"
|
||||
),
|
||||
}
|
||||
)
|
||||
|
@ -379,7 +379,7 @@ def init():
|
|||
try:
|
||||
with salt.utils.files.fopen(remote_map, "w+") as fp_:
|
||||
timestamp = datetime.now().strftime("%d %b %Y %H:%M:%S.%f")
|
||||
fp_.write("# hgfs_remote map as of {}\n".format(timestamp))
|
||||
fp_.write(f"# hgfs_remote map as of {timestamp}\n")
|
||||
for repo in repos:
|
||||
fp_.write(
|
||||
salt.utils.stringutils.to_str(
|
||||
|
@ -444,7 +444,7 @@ def clear_cache():
|
|||
try:
|
||||
shutil.rmtree(rdir)
|
||||
except OSError as exc:
|
||||
errors.append("Unable to delete {}: {}".format(rdir, exc))
|
||||
errors.append(f"Unable to delete {rdir}: {exc}")
|
||||
return errors
|
||||
|
||||
|
||||
|
@ -694,14 +694,12 @@ def find_file(path, tgt_env="base", **kwargs): # pylint: disable=W0613
|
|||
|
||||
dest = os.path.join(__opts__["cachedir"], "hgfs/refs", tgt_env, path)
|
||||
hashes_glob = os.path.join(
|
||||
__opts__["cachedir"], "hgfs/hash", tgt_env, "{}.hash.*".format(path)
|
||||
__opts__["cachedir"], "hgfs/hash", tgt_env, f"{path}.hash.*"
|
||||
)
|
||||
blobshadest = os.path.join(
|
||||
__opts__["cachedir"], "hgfs/hash", tgt_env, "{}.hash.blob_sha1".format(path)
|
||||
)
|
||||
lk_fn = os.path.join(
|
||||
__opts__["cachedir"], "hgfs/hash", tgt_env, "{}.lk".format(path)
|
||||
__opts__["cachedir"], "hgfs/hash", tgt_env, f"{path}.hash.blob_sha1"
|
||||
)
|
||||
lk_fn = os.path.join(__opts__["cachedir"], "hgfs/hash", tgt_env, f"{path}.lk")
|
||||
destdir = os.path.dirname(dest)
|
||||
hashdir = os.path.dirname(blobshadest)
|
||||
if not os.path.isdir(destdir):
|
||||
|
@ -746,7 +744,7 @@ def find_file(path, tgt_env="base", **kwargs): # pylint: disable=W0613
|
|||
return fnd
|
||||
try:
|
||||
repo["repo"].cat(
|
||||
[salt.utils.stringutils.to_bytes("path:{}".format(repo_path))],
|
||||
[salt.utils.stringutils.to_bytes(f"path:{repo_path}")],
|
||||
rev=ref[2],
|
||||
output=dest,
|
||||
)
|
||||
|
|
|
@ -219,9 +219,7 @@ def update():
|
|||
os.makedirs(mtime_map_path_dir)
|
||||
with salt.utils.files.fopen(mtime_map_path, "wb") as fp_:
|
||||
for file_path, mtime in new_mtime_map.items():
|
||||
fp_.write(
|
||||
salt.utils.stringutils.to_bytes("{}:{}\n".format(file_path, mtime))
|
||||
)
|
||||
fp_.write(salt.utils.stringutils.to_bytes(f"{file_path}:{mtime}\n"))
|
||||
|
||||
if __opts__.get("fileserver_events", False):
|
||||
# if there is a change, fire an event
|
||||
|
@ -349,11 +347,11 @@ def _file_lists(load, form):
|
|||
return []
|
||||
list_cache = os.path.join(
|
||||
list_cachedir,
|
||||
"{}.p".format(salt.utils.files.safe_filename_leaf(actual_saltenv)),
|
||||
f"{salt.utils.files.safe_filename_leaf(actual_saltenv)}.p",
|
||||
)
|
||||
w_lock = os.path.join(
|
||||
list_cachedir,
|
||||
".{}.w".format(salt.utils.files.safe_filename_leaf(actual_saltenv)),
|
||||
f".{salt.utils.files.safe_filename_leaf(actual_saltenv)}.w",
|
||||
)
|
||||
cache_match, refresh_cache, save_cache = salt.fileserver.check_file_list_cache(
|
||||
__opts__, form, list_cache, w_lock
|
||||
|
|
|
@ -136,7 +136,7 @@ def init():
|
|||
|
||||
per_remote_defaults = {}
|
||||
for param in PER_REMOTE_OVERRIDES:
|
||||
per_remote_defaults[param] = str(__opts__["svnfs_{}".format(param)])
|
||||
per_remote_defaults[param] = str(__opts__[f"svnfs_{param}"])
|
||||
|
||||
for remote in __opts__["svnfs_remotes"]:
|
||||
repo_conf = copy.deepcopy(per_remote_defaults)
|
||||
|
@ -239,7 +239,7 @@ def init():
|
|||
try:
|
||||
with salt.utils.files.fopen(remote_map, "w+") as fp_:
|
||||
timestamp = datetime.now().strftime("%d %b %Y %H:%M:%S.%f")
|
||||
fp_.write("# svnfs_remote map as of {}\n".format(timestamp))
|
||||
fp_.write(f"# svnfs_remote map as of {timestamp}\n")
|
||||
for repo_conf in repos:
|
||||
fp_.write(
|
||||
salt.utils.stringutils.to_str(
|
||||
|
@ -306,7 +306,7 @@ def clear_cache():
|
|||
try:
|
||||
shutil.rmtree(rdir)
|
||||
except OSError as exc:
|
||||
errors.append("Unable to delete {}: {}".format(rdir, exc))
|
||||
errors.append(f"Unable to delete {rdir}: {exc}")
|
||||
return errors
|
||||
|
||||
|
||||
|
|
|
@ -289,7 +289,7 @@ def _linux_gpu_data():
|
|||
|
||||
devs = []
|
||||
try:
|
||||
lspci_out = __salt__["cmd.run"]("{} -vmm".format(lspci))
|
||||
lspci_out = __salt__["cmd.run"](f"{lspci} -vmm")
|
||||
|
||||
cur_dev = {}
|
||||
error = False
|
||||
|
@ -363,7 +363,7 @@ def _netbsd_gpu_data():
|
|||
for line in pcictl_out.splitlines():
|
||||
for vendor in known_vendors:
|
||||
vendor_match = re.match(
|
||||
r"[0-9:]+ ({}) (.+) \(VGA .+\)".format(vendor), line, re.IGNORECASE
|
||||
rf"[0-9:]+ ({vendor}) (.+) \(VGA .+\)", line, re.IGNORECASE
|
||||
)
|
||||
if vendor_match:
|
||||
gpus.append(
|
||||
|
@ -425,18 +425,18 @@ def _bsd_cpudata(osdata):
|
|||
if sysctl:
|
||||
cmds.update(
|
||||
{
|
||||
"num_cpus": "{} -n hw.ncpu".format(sysctl),
|
||||
"cpuarch": "{} -n hw.machine".format(sysctl),
|
||||
"cpu_model": "{} -n hw.model".format(sysctl),
|
||||
"num_cpus": f"{sysctl} -n hw.ncpu",
|
||||
"cpuarch": f"{sysctl} -n hw.machine",
|
||||
"cpu_model": f"{sysctl} -n hw.model",
|
||||
}
|
||||
)
|
||||
|
||||
if arch and osdata["kernel"] == "OpenBSD":
|
||||
cmds["cpuarch"] = "{} -s".format(arch)
|
||||
cmds["cpuarch"] = f"{arch} -s"
|
||||
|
||||
if osdata["kernel"] == "Darwin":
|
||||
cmds["cpu_model"] = "{} -n machdep.cpu.brand_string".format(sysctl)
|
||||
cmds["cpu_flags"] = "{} -n machdep.cpu.features".format(sysctl)
|
||||
cmds["cpu_model"] = f"{sysctl} -n machdep.cpu.brand_string"
|
||||
cmds["cpu_flags"] = f"{sysctl} -n machdep.cpu.features"
|
||||
|
||||
grains = {k: __salt__["cmd.run"](v) for k, v in cmds.items()}
|
||||
|
||||
|
@ -521,7 +521,7 @@ def _aix_cpudata(): # pragma: no cover
|
|||
grains = {}
|
||||
cmd = salt.utils.path.which("prtconf")
|
||||
if cmd:
|
||||
data = __salt__["cmd.run"]("{}".format(cmd)) + os.linesep
|
||||
data = __salt__["cmd.run"](f"{cmd}") + os.linesep
|
||||
for dest, regstring in (
|
||||
("cpuarch", r"(?im)^\s*Processor\s+Type:\s+(\S+)"),
|
||||
("cpu_flags", r"(?im)^\s*Processor\s+Version:\s+(\S+)"),
|
||||
|
@ -567,9 +567,9 @@ def _osx_memdata():
|
|||
|
||||
sysctl = salt.utils.path.which("sysctl")
|
||||
if sysctl:
|
||||
mem = __salt__["cmd.run"]("{} -n hw.memsize".format(sysctl))
|
||||
mem = __salt__["cmd.run"](f"{sysctl} -n hw.memsize")
|
||||
swap_total = (
|
||||
__salt__["cmd.run"]("{} -n vm.swapusage".format(sysctl))
|
||||
__salt__["cmd.run"](f"{sysctl} -n vm.swapusage")
|
||||
.split()[2]
|
||||
.replace(",", ".")
|
||||
)
|
||||
|
@ -594,20 +594,20 @@ def _bsd_memdata(osdata):
|
|||
|
||||
sysctl = salt.utils.path.which("sysctl")
|
||||
if sysctl:
|
||||
mem = __salt__["cmd.run"]("{} -n hw.physmem".format(sysctl))
|
||||
mem = __salt__["cmd.run"](f"{sysctl} -n hw.physmem")
|
||||
if osdata["kernel"] == "NetBSD" and mem.startswith("-"):
|
||||
mem = __salt__["cmd.run"]("{} -n hw.physmem64".format(sysctl))
|
||||
mem = __salt__["cmd.run"](f"{sysctl} -n hw.physmem64")
|
||||
grains["mem_total"] = int(mem) // 1024 // 1024
|
||||
|
||||
if osdata["kernel"] in ["OpenBSD", "NetBSD"]:
|
||||
swapctl = salt.utils.path.which("swapctl")
|
||||
swap_data = __salt__["cmd.run"]("{} -sk".format(swapctl))
|
||||
swap_data = __salt__["cmd.run"](f"{swapctl} -sk")
|
||||
if swap_data == "no swap devices configured":
|
||||
swap_total = 0
|
||||
else:
|
||||
swap_total = swap_data.split(" ")[1]
|
||||
else:
|
||||
swap_total = __salt__["cmd.run"]("{} -n vm.swap_total".format(sysctl))
|
||||
swap_total = __salt__["cmd.run"](f"{sysctl} -n vm.swap_total")
|
||||
grains["swap_total"] = int(swap_total) // 1024 // 1024
|
||||
return grains
|
||||
|
||||
|
@ -625,7 +625,7 @@ def _sunos_memdata(): # pragma: no cover
|
|||
grains["mem_total"] = int(comps[2].strip())
|
||||
|
||||
swap_cmd = salt.utils.path.which("swap")
|
||||
swap_data = __salt__["cmd.run"]("{} -s".format(swap_cmd)).split()
|
||||
swap_data = __salt__["cmd.run"](f"{swap_cmd} -s").split()
|
||||
try:
|
||||
swap_avail = int(swap_data[-2][:-1])
|
||||
swap_used = int(swap_data[-4][:-1])
|
||||
|
@ -653,7 +653,7 @@ def _aix_memdata(): # pragma: no cover
|
|||
|
||||
swap_cmd = salt.utils.path.which("swap")
|
||||
if swap_cmd:
|
||||
swap_data = __salt__["cmd.run"]("{} -s".format(swap_cmd)).split()
|
||||
swap_data = __salt__["cmd.run"](f"{swap_cmd} -s").split()
|
||||
try:
|
||||
swap_total = (int(swap_data[-2]) + int(swap_data[-6])) * 4
|
||||
except ValueError:
|
||||
|
@ -706,7 +706,7 @@ def _aix_get_machine_id(): # pragma: no cover
|
|||
grains = {}
|
||||
cmd = salt.utils.path.which("lsattr")
|
||||
if cmd:
|
||||
data = __salt__["cmd.run"]("{} -El sys0".format(cmd)) + os.linesep
|
||||
data = __salt__["cmd.run"](f"{cmd} -El sys0") + os.linesep
|
||||
uuid_regexes = [re.compile(r"(?im)^\s*os_uuid\s+(\S+)\s+(.*)")]
|
||||
for regex in uuid_regexes:
|
||||
res = regex.search(data)
|
||||
|
@ -1033,7 +1033,7 @@ def _virtual(osdata):
|
|||
subtype_cmd = "{} -c current get -H -o value {}-role".format(
|
||||
command, role
|
||||
)
|
||||
ret = __salt__["cmd.run"]("{}".format(subtype_cmd))
|
||||
ret = __salt__["cmd.run"](f"{subtype_cmd}")
|
||||
if ret == "true":
|
||||
roles.append(role)
|
||||
if roles:
|
||||
|
@ -1179,14 +1179,14 @@ def _virtual(osdata):
|
|||
elif osdata["kernel"] == "FreeBSD":
|
||||
kenv = salt.utils.path.which("kenv")
|
||||
if kenv:
|
||||
product = __salt__["cmd.run"]("{} smbios.system.product".format(kenv))
|
||||
maker = __salt__["cmd.run"]("{} smbios.system.maker".format(kenv))
|
||||
product = __salt__["cmd.run"](f"{kenv} smbios.system.product")
|
||||
maker = __salt__["cmd.run"](f"{kenv} smbios.system.maker")
|
||||
if product.startswith("VMware"):
|
||||
grains["virtual"] = "VMware"
|
||||
if product.startswith("VirtualBox"):
|
||||
grains["virtual"] = "VirtualBox"
|
||||
if maker.startswith("Xen"):
|
||||
grains["virtual_subtype"] = "{} {}".format(maker, product)
|
||||
grains["virtual_subtype"] = f"{maker} {product}"
|
||||
grains["virtual"] = "xen"
|
||||
if maker.startswith("Microsoft") and product.startswith("Virtual"):
|
||||
grains["virtual"] = "VirtualPC"
|
||||
|
@ -1197,9 +1197,9 @@ def _virtual(osdata):
|
|||
if maker.startswith("Amazon EC2"):
|
||||
grains["virtual"] = "Nitro"
|
||||
if sysctl:
|
||||
hv_vendor = __salt__["cmd.run"]("{} -n hw.hv_vendor".format(sysctl))
|
||||
model = __salt__["cmd.run"]("{} -n hw.model".format(sysctl))
|
||||
jail = __salt__["cmd.run"]("{} -n security.jail.jailed".format(sysctl))
|
||||
hv_vendor = __salt__["cmd.run"](f"{sysctl} -n hw.hv_vendor")
|
||||
model = __salt__["cmd.run"](f"{sysctl} -n hw.model")
|
||||
jail = __salt__["cmd.run"](f"{sysctl} -n security.jail.jailed")
|
||||
if "bhyve" in hv_vendor:
|
||||
grains["virtual"] = "bhyve"
|
||||
elif "QEMU Virtual CPU" in model:
|
||||
|
@ -1215,22 +1215,19 @@ def _virtual(osdata):
|
|||
elif osdata["kernel"] == "NetBSD":
|
||||
if sysctl:
|
||||
if "QEMU Virtual CPU" in __salt__["cmd.run"](
|
||||
"{} -n machdep.cpu_brand".format(sysctl)
|
||||
f"{sysctl} -n machdep.cpu_brand"
|
||||
):
|
||||
grains["virtual"] = "kvm"
|
||||
elif "invalid" not in __salt__["cmd.run"](
|
||||
"{} -n machdep.xen.suspend".format(sysctl)
|
||||
f"{sysctl} -n machdep.xen.suspend"
|
||||
):
|
||||
grains["virtual"] = "Xen PV DomU"
|
||||
elif "VMware" in __salt__["cmd.run"](
|
||||
"{} -n machdep.dmi.system-vendor".format(sysctl)
|
||||
f"{sysctl} -n machdep.dmi.system-vendor"
|
||||
):
|
||||
grains["virtual"] = "VMware"
|
||||
# NetBSD has Xen dom0 support
|
||||
elif (
|
||||
__salt__["cmd.run"]("{} -n machdep.idle-mechanism".format(sysctl))
|
||||
== "xen"
|
||||
):
|
||||
elif __salt__["cmd.run"](f"{sysctl} -n machdep.idle-mechanism") == "xen":
|
||||
if os.path.isfile("/var/run/xenconsoled.pid"):
|
||||
grains["virtual_subtype"] = "Xen Dom0"
|
||||
elif osdata["kernel"] == "SunOS":
|
||||
|
@ -1238,7 +1235,7 @@ def _virtual(osdata):
|
|||
# check the zonename here as fallback
|
||||
zonename = salt.utils.path.which("zonename")
|
||||
if zonename:
|
||||
zone = __salt__["cmd.run"]("{}".format(zonename))
|
||||
zone = __salt__["cmd.run"](f"{zonename}")
|
||||
if zone != "global":
|
||||
grains["virtual"] = "zone"
|
||||
|
||||
|
@ -1267,7 +1264,7 @@ def _virtual(osdata):
|
|||
r".*Product Name: ([^\r\n]*).*", output, flags=re.DOTALL
|
||||
)
|
||||
if product:
|
||||
grains["virtual_subtype"] = "Amazon EC2 ({})".format(product[1])
|
||||
grains["virtual_subtype"] = f"Amazon EC2 ({product[1]})"
|
||||
elif re.match(r".*Version: [^\r\n]+\.amazon.*", output, flags=re.DOTALL):
|
||||
grains["virtual_subtype"] = "Amazon EC2"
|
||||
|
||||
|
@ -1299,9 +1296,7 @@ def _virtual_hv(osdata):
|
|||
try:
|
||||
version = {}
|
||||
for fn in ("major", "minor", "extra"):
|
||||
with salt.utils.files.fopen(
|
||||
"/sys/hypervisor/version/{}".format(fn), "r"
|
||||
) as fhr:
|
||||
with salt.utils.files.fopen(f"/sys/hypervisor/version/{fn}", "r") as fhr:
|
||||
version[fn] = salt.utils.stringutils.to_unicode(fhr.read().strip())
|
||||
grains["virtual_hv_version"] = "{}.{}{}".format(
|
||||
version["major"], version["minor"], version["extra"]
|
||||
|
@ -1457,7 +1452,7 @@ def _windows_os_release_grain(caption, product_type):
|
|||
# ie: R2
|
||||
if re.match(r"^R\d+$", item):
|
||||
release = item
|
||||
os_release = "{}Server{}".format(version, release)
|
||||
os_release = f"{version}Server{release}"
|
||||
else:
|
||||
for item in caption.split(" "):
|
||||
# If it's a number, decimal number, Thin or Vista, then it's the
|
||||
|
@ -1703,7 +1698,7 @@ def _linux_devicetree_platform_data():
|
|||
try:
|
||||
# /proc/device-tree should be used instead of /sys/firmware/devicetree/base
|
||||
# see https://github.com/torvalds/linux/blob/v5.13/Documentation/ABI/testing/sysfs-firmware-ofw#L14
|
||||
loc = "/proc/device-tree/{}".format(path)
|
||||
loc = f"/proc/device-tree/{path}"
|
||||
if os.path.isfile(loc):
|
||||
with salt.utils.files.fopen(loc, mode="r") as f:
|
||||
return f.read().rstrip("\x00") # all strings are null-terminated
|
||||
|
@ -1942,18 +1937,13 @@ def _linux_bin_exists(binary):
|
|||
"""
|
||||
for search_cmd in ("which", "type -ap"):
|
||||
try:
|
||||
return __salt__["cmd.retcode"]("{} {}".format(search_cmd, binary)) == 0
|
||||
return __salt__["cmd.retcode"](f"{search_cmd} {binary}") == 0
|
||||
except salt.exceptions.CommandExecutionError:
|
||||
pass
|
||||
|
||||
try:
|
||||
return (
|
||||
len(
|
||||
__salt__["cmd.run_all"]("whereis -b {}".format(binary))[
|
||||
"stdout"
|
||||
].split()
|
||||
)
|
||||
> 1
|
||||
len(__salt__["cmd.run_all"](f"whereis -b {binary}")["stdout"].split()) > 1
|
||||
)
|
||||
except salt.exceptions.CommandExecutionError:
|
||||
return False
|
||||
|
@ -1971,7 +1961,7 @@ def _parse_lsb_release():
|
|||
pass
|
||||
else:
|
||||
# Adds lsb_distrib_{id,release,codename,description}
|
||||
ret["lsb_{}".format(key.lower())] = value.rstrip()
|
||||
ret[f"lsb_{key.lower()}"] = value.rstrip()
|
||||
except OSError as exc:
|
||||
log.trace("Failed to parse /etc/lsb-release: %s", exc)
|
||||
return ret
|
||||
|
@ -2716,7 +2706,7 @@ def os_data():
|
|||
osbuild = __salt__["cmd.run"]("sw_vers -buildVersion")
|
||||
grains["os"] = "MacOS"
|
||||
grains["os_family"] = "MacOS"
|
||||
grains["osfullname"] = "{} {}".format(osname, osrelease)
|
||||
grains["osfullname"] = f"{osname} {osrelease}"
|
||||
grains["osrelease"] = osrelease
|
||||
grains["osbuild"] = osbuild
|
||||
grains["init"] = "launchd"
|
||||
|
@ -3257,7 +3247,7 @@ def _hw_data(osdata):
|
|||
"productname": "DeviceDesc",
|
||||
}
|
||||
for grain_name, cmd_key in hwdata.items():
|
||||
result = __salt__["cmd.run_all"]("fw_printenv {}".format(cmd_key))
|
||||
result = __salt__["cmd.run_all"](f"fw_printenv {cmd_key}")
|
||||
if result["retcode"] == 0:
|
||||
uboot_keyval = result["stdout"].split("=")
|
||||
grains[grain_name] = _clean_value(grain_name, uboot_keyval[1])
|
||||
|
@ -3277,7 +3267,7 @@ def _hw_data(osdata):
|
|||
"uuid": "smbios.system.uuid",
|
||||
}
|
||||
for key, val in fbsd_hwdata.items():
|
||||
value = __salt__["cmd.run"]("{} {}".format(kenv, val))
|
||||
value = __salt__["cmd.run"](f"{kenv} {val}")
|
||||
grains[key] = _clean_value(key, value)
|
||||
elif osdata["kernel"] == "OpenBSD":
|
||||
sysctl = salt.utils.path.which("sysctl")
|
||||
|
@ -3289,7 +3279,7 @@ def _hw_data(osdata):
|
|||
"uuid": "hw.uuid",
|
||||
}
|
||||
for key, oid in hwdata.items():
|
||||
value = __salt__["cmd.run"]("{} -n {}".format(sysctl, oid))
|
||||
value = __salt__["cmd.run"](f"{sysctl} -n {oid}")
|
||||
if not value.endswith(" value is not available"):
|
||||
grains[key] = _clean_value(key, value)
|
||||
elif osdata["kernel"] == "NetBSD":
|
||||
|
@ -3304,7 +3294,7 @@ def _hw_data(osdata):
|
|||
"uuid": "machdep.dmi.system-uuid",
|
||||
}
|
||||
for key, oid in nbsd_hwdata.items():
|
||||
result = __salt__["cmd.run_all"]("{} -n {}".format(sysctl, oid))
|
||||
result = __salt__["cmd.run_all"](f"{sysctl} -n {oid}")
|
||||
if result["retcode"] == 0:
|
||||
grains[key] = _clean_value(key, result["stdout"])
|
||||
elif osdata["kernel"] == "Darwin":
|
||||
|
@ -3312,7 +3302,7 @@ def _hw_data(osdata):
|
|||
sysctl = salt.utils.path.which("sysctl")
|
||||
hwdata = {"productname": "hw.model"}
|
||||
for key, oid in hwdata.items():
|
||||
value = __salt__["cmd.run"]("{} -b {}".format(sysctl, oid))
|
||||
value = __salt__["cmd.run"](f"{sysctl} -b {oid}")
|
||||
if not value.endswith(" is invalid"):
|
||||
grains[key] = _clean_value(key, value)
|
||||
elif osdata["kernel"] == "SunOS" and osdata["cpuarch"].startswith("sparc"):
|
||||
|
@ -3326,7 +3316,7 @@ def _hw_data(osdata):
|
|||
("/usr/sbin/virtinfo", "-a"),
|
||||
):
|
||||
if salt.utils.path.which(cmd): # Also verifies that cmd is executable
|
||||
data += __salt__["cmd.run"]("{} {}".format(cmd, args))
|
||||
data += __salt__["cmd.run"](f"{cmd} {args}")
|
||||
data += "\n"
|
||||
|
||||
sn_regexes = [
|
||||
|
@ -3441,7 +3431,7 @@ def _hw_data(osdata):
|
|||
elif osdata["kernel"] == "AIX":
|
||||
cmd = salt.utils.path.which("prtconf")
|
||||
if cmd:
|
||||
data = __salt__["cmd.run"]("{}".format(cmd)) + os.linesep
|
||||
data = __salt__["cmd.run"](f"{cmd}") + os.linesep
|
||||
for dest, regstring in (
|
||||
("serialnumber", r"(?im)^\s*Machine\s+Serial\s+Number:\s+(\S+)"),
|
||||
("systemfirmware", r"(?im)^\s*Firmware\s+Version:\s+(.*)"),
|
||||
|
@ -3523,14 +3513,14 @@ def default_gateway():
|
|||
for line in out.splitlines():
|
||||
if line.startswith("default"):
|
||||
grains["ip_gw"] = True
|
||||
grains["ip{}_gw".format(ip_version)] = True
|
||||
grains[f"ip{ip_version}_gw"] = True
|
||||
try:
|
||||
via, gw_ip = line.split()[1:3]
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
if via == "via":
|
||||
grains["ip{}_gw".format(ip_version)] = gw_ip
|
||||
grains[f"ip{ip_version}_gw"] = gw_ip
|
||||
break
|
||||
except Exception: # pylint: disable=broad-except
|
||||
continue
|
||||
|
|
|
@ -91,14 +91,14 @@ def _freebsd_geom():
|
|||
geom = salt.utils.path.which("geom")
|
||||
ret = {"disks": {}, "ssds": []}
|
||||
|
||||
devices = __salt__["cmd.run"]("{} disk list".format(geom))
|
||||
devices = __salt__["cmd.run"](f"{geom} disk list")
|
||||
devices = devices.split("\n\n")
|
||||
|
||||
def parse_geom_attribs(device):
|
||||
tmp = {}
|
||||
for line in device.split("\n"):
|
||||
for attrib in _geom_attribs:
|
||||
search = re.search(r"{}:\s(.*)".format(attrib), line)
|
||||
search = re.search(rf"{attrib}:\s(.*)", line)
|
||||
if search:
|
||||
value = _datavalue(
|
||||
_geomconsts._datatypes.get(attrib), search.group(1)
|
||||
|
@ -174,7 +174,7 @@ def _windows_disks():
|
|||
info = line.split()
|
||||
if len(info) != 2 or not info[0].isdigit() or not info[1].isdigit():
|
||||
continue
|
||||
device = r"\\.\PhysicalDrive{}".format(info[0])
|
||||
device = rf"\\.\PhysicalDrive{info[0]}"
|
||||
mediatype = info[1]
|
||||
if mediatype == "3":
|
||||
log.trace("Device %s reports itself as an HDD", device)
|
||||
|
|
|
@ -33,14 +33,12 @@ def _linux_lvm():
|
|||
ret = {}
|
||||
cmd = salt.utils.path.which("lvm")
|
||||
if cmd:
|
||||
vgs = __salt__["cmd.run_all"]("{} vgs -o vg_name --noheadings".format(cmd))
|
||||
vgs = __salt__["cmd.run_all"](f"{cmd} vgs -o vg_name --noheadings")
|
||||
|
||||
for vg in vgs["stdout"].splitlines():
|
||||
vg = vg.strip()
|
||||
ret[vg] = []
|
||||
lvs = __salt__["cmd.run_all"](
|
||||
"{} lvs -o lv_name --noheadings {}".format(cmd, vg)
|
||||
)
|
||||
lvs = __salt__["cmd.run_all"](f"{cmd} lvs -o lv_name --noheadings {vg}")
|
||||
for lv in lvs["stdout"].splitlines():
|
||||
ret[vg].append(lv.strip())
|
||||
|
||||
|
@ -52,11 +50,11 @@ def _linux_lvm():
|
|||
def _aix_lvm():
|
||||
ret = {}
|
||||
cmd = salt.utils.path.which("lsvg")
|
||||
vgs = __salt__["cmd.run"]("{}".format(cmd))
|
||||
vgs = __salt__["cmd.run"](f"{cmd}")
|
||||
|
||||
for vg in vgs.splitlines():
|
||||
ret[vg] = []
|
||||
lvs = __salt__["cmd.run"]("{} -l {}".format(cmd, vg))
|
||||
lvs = __salt__["cmd.run"](f"{cmd} -l {vg}")
|
||||
for lvline in lvs.splitlines()[2:]:
|
||||
lv = lvline.split(" ", 1)[0]
|
||||
ret[vg].append(lv)
|
||||
|
|
|
@ -62,7 +62,7 @@ def _user_mdata(mdata_list=None, mdata_get=None):
|
|||
log.warning("mdata-list returned an error, skipping mdata grains.")
|
||||
continue
|
||||
mdata_value = __salt__["cmd.run"](
|
||||
"{} {}".format(mdata_get, mdata_grain), ignore_retcode=True
|
||||
f"{mdata_get} {mdata_grain}", ignore_retcode=True
|
||||
)
|
||||
|
||||
if not mdata_grain.startswith("sdc:"):
|
||||
|
@ -108,7 +108,7 @@ def _sdc_mdata(mdata_list=None, mdata_get=None):
|
|||
|
||||
for mdata_grain in sdc_text_keys + sdc_json_keys:
|
||||
mdata_value = __salt__["cmd.run"](
|
||||
"{} sdc:{}".format(mdata_get, mdata_grain), ignore_retcode=True
|
||||
f"{mdata_get} sdc:{mdata_grain}", ignore_retcode=True
|
||||
)
|
||||
if mdata_value.startswith("ERROR:"):
|
||||
log.warning(
|
||||
|
|
|
@ -24,7 +24,7 @@ import salt.utils.stringutils
|
|||
|
||||
# metadata server information
|
||||
IP = "169.254.169.254"
|
||||
HOST = "http://{}/".format(IP)
|
||||
HOST = f"http://{IP}/"
|
||||
|
||||
|
||||
def __virtual__():
|
||||
|
|
16
salt/key.py
16
salt/key.py
|
@ -177,7 +177,7 @@ class KeyCLI:
|
|||
|
||||
if cmd in ("accept", "reject", "delete") and args is None:
|
||||
args = self.opts.get("match_dict", {}).get("minions")
|
||||
fstr = "key.{}".format(cmd)
|
||||
fstr = f"key.{cmd}"
|
||||
fun = self.client.functions[fstr]
|
||||
args, kwargs = self._get_args_kwargs(fun, args)
|
||||
|
||||
|
@ -230,7 +230,7 @@ class KeyCLI:
|
|||
stat_str = statuses[0]
|
||||
else:
|
||||
stat_str = "{} or {}".format(", ".join(statuses[:-1]), statuses[-1])
|
||||
msg = "The key glob '{}' does not match any {} keys.".format(match, stat_str)
|
||||
msg = f"The key glob '{match}' does not match any {stat_str} keys."
|
||||
print(msg)
|
||||
|
||||
def run(self):
|
||||
|
@ -291,7 +291,7 @@ class KeyCLI:
|
|||
else:
|
||||
salt.output.display_output({"return": ret}, "key", opts=self.opts)
|
||||
except salt.exceptions.SaltException as exc:
|
||||
ret = "{}".format(exc)
|
||||
ret = f"{exc}"
|
||||
if not self.opts.get("quiet", False):
|
||||
salt.output.display_output(ret, "nested", self.opts)
|
||||
return ret
|
||||
|
@ -311,7 +311,7 @@ class Key:
|
|||
self.opts = opts
|
||||
kind = self.opts.get("__role", "") # application kind
|
||||
if kind not in salt.utils.kinds.APPL_KINDS:
|
||||
emsg = "Invalid application kind = '{}'.".format(kind)
|
||||
emsg = f"Invalid application kind = '{kind}'."
|
||||
log.error(emsg)
|
||||
raise ValueError(emsg)
|
||||
self.event = salt.utils.event.get_event(
|
||||
|
@ -377,7 +377,7 @@ class Key:
|
|||
# check given pub-key
|
||||
if pub:
|
||||
if not os.path.isfile(pub):
|
||||
return "Public-key {} does not exist".format(pub)
|
||||
return f"Public-key {pub} does not exist"
|
||||
# default to master.pub
|
||||
else:
|
||||
mpub = self.opts["pki_dir"] + "/" + "master.pub"
|
||||
|
@ -387,7 +387,7 @@ class Key:
|
|||
# check given priv-key
|
||||
if priv:
|
||||
if not os.path.isfile(priv):
|
||||
return "Private-key {} does not exist".format(priv)
|
||||
return f"Private-key {priv} does not exist"
|
||||
# default to master_sign.pem
|
||||
else:
|
||||
mpriv = self.opts["pki_dir"] + "/" + "master_sign.pem"
|
||||
|
@ -467,7 +467,7 @@ class Key:
|
|||
if clist:
|
||||
for minion in clist:
|
||||
if minion not in minions and minion not in preserve_minions:
|
||||
cache.flush("{}/{}".format(self.ACC, minion))
|
||||
cache.flush(f"{self.ACC}/{minion}")
|
||||
|
||||
def check_master(self):
|
||||
"""
|
||||
|
@ -663,7 +663,7 @@ class Key:
|
|||
pass
|
||||
for keydir, key in invalid_keys:
|
||||
matches[keydir].remove(key)
|
||||
sys.stderr.write("Unable to accept invalid key for {}.\n".format(key))
|
||||
sys.stderr.write(f"Unable to accept invalid key for {key}.\n")
|
||||
return self.name_match(match) if match is not None else self.dict_match(matches)
|
||||
|
||||
def accept_all(self):
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue