Update code to be Py3.7+ to reduce merge forward conflicts

This commit is contained in:
Pedro Algarvio 2024-02-27 11:08:46 +00:00
parent 3dea2eb541
commit 03ad4c6337
1424 changed files with 11463 additions and 12874 deletions

1
.gitignore vendored
View file

@ -121,6 +121,7 @@ Session.vim
# Nox requirements archives # Nox requirements archives
nox.*.tar.bzip2 nox.*.tar.bzip2
nox.*.tar.gz
nox.*.tar.xz nox.*.tar.xz
# Debian packages # Debian packages

View file

@ -1625,8 +1625,8 @@ repos:
rev: v3.15.1 rev: v3.15.1
hooks: hooks:
- id: pyupgrade - id: pyupgrade
name: Drop six usage and Py2 support name: Upgrade code to Py3.7+
args: [--py3-plus, --keep-mock] args: [--py37-plus, --keep-mock]
exclude: > exclude: >
(?x)^( (?x)^(
salt/client/ssh/ssh_py_shim.py salt/client/ssh/ssh_py_shim.py

View file

@ -53,7 +53,7 @@ class LiterateCoding(Directive):
comment; False designates code. comment; False designates code.
""" """
comment_char = "#" # TODO: move this into a directive option comment_char = "#" # TODO: move this into a directive option
comment = re.compile(r"^\s*{}[ \n]".format(comment_char)) comment = re.compile(rf"^\s*{comment_char}[ \n]")
section_test = lambda val: bool(comment.match(val)) section_test = lambda val: bool(comment.match(val))
sections = [] sections = []
@ -136,7 +136,7 @@ class LiterateFormula(LiterateCoding):
formulas_dirs = config.formulas_dirs formulas_dirs = config.formulas_dirs
fpath = sls_path.replace(".", "/") fpath = sls_path.replace(".", "/")
name_options = ("{}.sls".format(fpath), os.path.join(fpath, "init.sls")) name_options = (f"{fpath}.sls", os.path.join(fpath, "init.sls"))
paths = [ paths = [
os.path.join(fdir, fname) os.path.join(fdir, fname)
@ -151,7 +151,7 @@ class LiterateFormula(LiterateCoding):
except OSError: except OSError:
pass pass
raise OSError("Could not find sls file '{}'".format(sls_path)) raise OSError(f"Could not find sls file '{sls_path}'")
class CurrentFormula(Directive): class CurrentFormula(Directive):
@ -196,7 +196,7 @@ class Formula(Directive):
targetnode = nodes.target("", "", ids=["module-" + formname], ismod=True) targetnode = nodes.target("", "", ids=["module-" + formname], ismod=True)
self.state.document.note_explicit_target(targetnode) self.state.document.note_explicit_target(targetnode)
indextext = "{}-formula)".format(formname) indextext = f"{formname}-formula)"
inode = addnodes.index( inode = addnodes.index(
entries=[("single", indextext, "module-" + formname, "")] entries=[("single", indextext, "module-" + formname, "")]
) )
@ -221,9 +221,9 @@ class State(Directive):
formula = env.temp_data.get("salt:formula") formula = env.temp_data.get("salt:formula")
indextext = "{1} ({0}-formula)".format(formula, statename) indextext = f"{statename} ({formula}-formula)"
inode = addnodes.index( inode = addnodes.index(
entries=[("single", indextext, "module-{}".format(statename), "")] entries=[("single", indextext, f"module-{statename}", "")]
) )
return [targetnode, inode] return [targetnode, inode]

View file

@ -107,7 +107,7 @@ def session_warn(session, message):
try: try:
session.warn(message) session.warn(message)
except AttributeError: except AttributeError:
session.log("WARNING: {}".format(message)) session.log(f"WARNING: {message}")
def session_run_always(session, *command, **kwargs): def session_run_always(session, *command, **kwargs):
@ -132,15 +132,15 @@ def session_run_always(session, *command, **kwargs):
def find_session_runner(session, name, python_version, onedir=False, **kwargs): def find_session_runner(session, name, python_version, onedir=False, **kwargs):
if onedir: if onedir:
name += "-onedir-{}".format(ONEDIR_PYTHON_PATH) name += f"-onedir-{ONEDIR_PYTHON_PATH}"
else: else:
name += "-{}".format(python_version) name += f"-{python_version}"
for s, _ in session._runner.manifest.list_all_sessions(): for s, _ in session._runner.manifest.list_all_sessions():
if name not in s.signatures: if name not in s.signatures:
continue continue
for signature in s.signatures: for signature in s.signatures:
for key, value in kwargs.items(): for key, value in kwargs.items():
param = "{}={!r}".format(key, value) param = f"{key}={value!r}"
if param not in signature: if param not in signature:
break break
else: else:
@ -211,7 +211,7 @@ def _get_pip_requirements_file(session, crypto=None, requirements_type="ci"):
) )
if os.path.exists(_requirements_file): if os.path.exists(_requirements_file):
return _requirements_file return _requirements_file
session.error("Could not find a windows requirements file for {}".format(pydir)) session.error(f"Could not find a windows requirements file for {pydir}")
elif IS_DARWIN: elif IS_DARWIN:
if crypto is None: if crypto is None:
_requirements_file = os.path.join( _requirements_file = os.path.join(
@ -224,7 +224,7 @@ def _get_pip_requirements_file(session, crypto=None, requirements_type="ci"):
) )
if os.path.exists(_requirements_file): if os.path.exists(_requirements_file):
return _requirements_file return _requirements_file
session.error("Could not find a darwin requirements file for {}".format(pydir)) session.error(f"Could not find a darwin requirements file for {pydir}")
elif IS_FREEBSD: elif IS_FREEBSD:
if crypto is None: if crypto is None:
_requirements_file = os.path.join( _requirements_file = os.path.join(
@ -237,7 +237,7 @@ def _get_pip_requirements_file(session, crypto=None, requirements_type="ci"):
) )
if os.path.exists(_requirements_file): if os.path.exists(_requirements_file):
return _requirements_file return _requirements_file
session.error("Could not find a freebsd requirements file for {}".format(pydir)) session.error(f"Could not find a freebsd requirements file for {pydir}")
else: else:
if crypto is None: if crypto is None:
_requirements_file = os.path.join( _requirements_file = os.path.join(
@ -250,7 +250,7 @@ def _get_pip_requirements_file(session, crypto=None, requirements_type="ci"):
) )
if os.path.exists(_requirements_file): if os.path.exists(_requirements_file):
return _requirements_file return _requirements_file
session.error("Could not find a linux requirements file for {}".format(pydir)) session.error(f"Could not find a linux requirements file for {pydir}")
def _upgrade_pip_setuptools_and_wheel(session, upgrade=True): def _upgrade_pip_setuptools_and_wheel(session, upgrade=True):
@ -569,7 +569,7 @@ def test_parametrized(session, coverage, transport, crypto):
session.install(*install_command, silent=PIP_INSTALL_SILENT) session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd_args = [ cmd_args = [
"--transport={}".format(transport), f"--transport={transport}",
] + session.posargs ] + session.posargs
_pytest(session, coverage=coverage, cmd_args=cmd_args) _pytest(session, coverage=coverage, cmd_args=cmd_args)
@ -1014,7 +1014,7 @@ def _pytest(session, coverage, cmd_args, env=None, on_rerun=False):
if arg == "--log-file" or arg.startswith("--log-file="): if arg == "--log-file" or arg.startswith("--log-file="):
break break
else: else:
args.append("--log-file={}".format(RUNTESTS_LOGFILE)) args.append(f"--log-file={RUNTESTS_LOGFILE}")
args.extend(cmd_args) args.extend(cmd_args)
if PRINT_SYSTEM_INFO_ONLY and "--sys-info-and-exit" not in args: if PRINT_SYSTEM_INFO_ONLY and "--sys-info-and-exit" not in args:
@ -1487,7 +1487,7 @@ def _lint(session, rcfile, flags, paths, upgrade_setuptools_and_pip=True):
] ]
session.install(*install_command, silent=PIP_INSTALL_SILENT) session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd_args = ["pylint", "--rcfile={}".format(rcfile)] + list(flags) + list(paths) cmd_args = ["pylint", f"--rcfile={rcfile}"] + list(flags) + list(paths)
cmd_kwargs = {"env": {"PYTHONUNBUFFERED": "1"}} cmd_kwargs = {"env": {"PYTHONUNBUFFERED": "1"}}
session.run(*cmd_args, **cmd_kwargs) session.run(*cmd_args, **cmd_kwargs)
@ -1528,8 +1528,8 @@ def lint(session):
""" """
Run PyLint against Salt and it's test suite. Run PyLint against Salt and it's test suite.
""" """
session.notify("lint-salt-{}".format(session.python)) session.notify(f"lint-salt-{session.python}")
session.notify("lint-tests-{}".format(session.python)) session.notify(f"lint-tests-{session.python}")
@nox.session(python="3", name="lint-salt") @nox.session(python="3", name="lint-salt")
@ -1593,7 +1593,7 @@ def docs(session, compress, update, clean):
""" """
Build Salt's Documentation Build Salt's Documentation
""" """
session.notify("docs-html-{}(compress={})".format(session.python, compress)) session.notify(f"docs-html-{session.python}(compress={compress})")
session.notify( session.notify(
find_session_runner( find_session_runner(
session, session,

View file

@ -22,7 +22,7 @@ class TornadoImporter:
def create_module(self, spec): def create_module(self, spec):
if USE_VENDORED_TORNADO: if USE_VENDORED_TORNADO:
mod = importlib.import_module("salt.ext.{}".format(spec.name)) mod = importlib.import_module(f"salt.ext.{spec.name}")
else: # pragma: no cover else: # pragma: no cover
# Remove 'salt.ext.' from the module # Remove 'salt.ext.' from the module
mod = importlib.import_module(spec.name[9:]) mod = importlib.import_module(spec.name[9:])

View file

@ -108,9 +108,9 @@ DFLT_LOG_FMT_LOGFILE = "%(asctime)s,%(msecs)03d [%(name)-17s:%(lineno)-4d][%(lev
class SaltLogRecord(logging.LogRecord): class SaltLogRecord(logging.LogRecord):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
logging.LogRecord.__init__(self, *args, **kwargs) logging.LogRecord.__init__(self, *args, **kwargs)
self.bracketname = "[{:<17}]".format(str(self.name)) self.bracketname = f"[{str(self.name):<17}]"
self.bracketlevel = "[{:<8}]".format(str(self.levelname)) self.bracketlevel = f"[{str(self.levelname):<8}]"
self.bracketprocess = "[{:>5}]".format(str(self.process)) self.bracketprocess = f"[{str(self.process):>5}]"
class SaltColorLogRecord(SaltLogRecord): class SaltColorLogRecord(SaltLogRecord):
@ -124,11 +124,11 @@ class SaltColorLogRecord(SaltLogRecord):
self.colorname = "{}[{:<17}]{}".format( self.colorname = "{}[{:<17}]{}".format(
LOG_COLORS["name"], str(self.name), reset LOG_COLORS["name"], str(self.name), reset
) )
self.colorlevel = "{}[{:<8}]{}".format(clevel, str(self.levelname), reset) self.colorlevel = f"{clevel}[{str(self.levelname):<8}]{reset}"
self.colorprocess = "{}[{:>5}]{}".format( self.colorprocess = "{}[{:>5}]{}".format(
LOG_COLORS["process"], str(self.process), reset LOG_COLORS["process"], str(self.process), reset
) )
self.colormsg = "{}{}{}".format(cmsg, self.getMessage(), reset) self.colormsg = f"{cmsg}{self.getMessage()}{reset}"
def get_log_record_factory(): def get_log_record_factory():
@ -726,7 +726,7 @@ def setup_logfile_handler(
syslog_opts["address"] = str(path.resolve().parent) syslog_opts["address"] = str(path.resolve().parent)
except OSError as exc: except OSError as exc:
raise LoggingRuntimeError( raise LoggingRuntimeError(
"Failed to setup the Syslog logging handler: {}".format(exc) f"Failed to setup the Syslog logging handler: {exc}"
) from exc ) from exc
elif parsed_log_path.path: elif parsed_log_path.path:
# In case of udp or tcp with a facility specified # In case of udp or tcp with a facility specified
@ -736,7 +736,7 @@ def setup_logfile_handler(
# Logging facilities start with LOG_ if this is not the case # Logging facilities start with LOG_ if this is not the case
# fail right now! # fail right now!
raise LoggingRuntimeError( raise LoggingRuntimeError(
"The syslog facility '{}' is not known".format(facility_name) f"The syslog facility '{facility_name}' is not known"
) )
else: else:
# This is the case of udp or tcp without a facility specified # This is the case of udp or tcp without a facility specified
@ -747,7 +747,7 @@ def setup_logfile_handler(
# This python syslog version does not know about the user provided # This python syslog version does not know about the user provided
# facility name # facility name
raise LoggingRuntimeError( raise LoggingRuntimeError(
"The syslog facility '{}' is not known".format(facility_name) f"The syslog facility '{facility_name}' is not known"
) )
syslog_opts["facility"] = facility syslog_opts["facility"] = facility
@ -767,7 +767,7 @@ def setup_logfile_handler(
handler = SysLogHandler(**syslog_opts) handler = SysLogHandler(**syslog_opts)
except OSError as exc: except OSError as exc:
raise LoggingRuntimeError( raise LoggingRuntimeError(
"Failed to setup the Syslog logging handler: {}".format(exc) f"Failed to setup the Syslog logging handler: {exc}"
) from exc ) from exc
else: else:
# make sure, the logging directory exists and attempt to create it if necessary # make sure, the logging directory exists and attempt to create it if necessary

View file

@ -137,7 +137,7 @@ class LoadAuth:
mod = self.opts["eauth_acl_module"] mod = self.opts["eauth_acl_module"]
if not mod: if not mod:
mod = load["eauth"] mod = load["eauth"]
fstr = "{}.acl".format(mod) fstr = f"{mod}.acl"
if fstr not in self.auth: if fstr not in self.auth:
return None return None
fcall = salt.utils.args.format_call( fcall = salt.utils.args.format_call(
@ -474,7 +474,7 @@ class LoadAuth:
msg = 'Authentication failure of type "user" occurred' msg = 'Authentication failure of type "user" occurred'
if not auth_ret: # auth_ret can be a boolean or the effective user id if not auth_ret: # auth_ret can be a boolean or the effective user id
if show_username: if show_username:
msg = "{} for user {}.".format(msg, username) msg = f"{msg} for user {username}."
ret["error"] = {"name": "UserAuthenticationError", "message": msg} ret["error"] = {"name": "UserAuthenticationError", "message": msg}
return ret return ret
@ -535,7 +535,7 @@ class Resolver:
if not eauth: if not eauth:
print("External authentication system has not been specified") print("External authentication system has not been specified")
return ret return ret
fstr = "{}.auth".format(eauth) fstr = f"{eauth}.auth"
if fstr not in self.auth: if fstr not in self.auth:
print( print(
'The specified external authentication system "{}" is not available'.format( 'The specified external authentication system "{}" is not available'.format(
@ -554,14 +554,14 @@ class Resolver:
if arg in self.opts: if arg in self.opts:
ret[arg] = self.opts[arg] ret[arg] = self.opts[arg]
elif arg.startswith("pass"): elif arg.startswith("pass"):
ret[arg] = getpass.getpass("{}: ".format(arg)) ret[arg] = getpass.getpass(f"{arg}: ")
else: else:
ret[arg] = input("{}: ".format(arg)) ret[arg] = input(f"{arg}: ")
for kwarg, default in list(args["kwargs"].items()): for kwarg, default in list(args["kwargs"].items()):
if kwarg in self.opts: if kwarg in self.opts:
ret["kwarg"] = self.opts[kwarg] ret["kwarg"] = self.opts[kwarg]
else: else:
ret[kwarg] = input("{} [{}]: ".format(kwarg, default)) ret[kwarg] = input(f"{kwarg} [{default}]: ")
# Use current user if empty # Use current user if empty
if "username" in ret and not ret["username"]: if "username" in ret and not ret["username"]:

View file

@ -111,7 +111,7 @@ def __django_auth_setup():
django_module_name, globals(), locals(), "SaltExternalAuthModel" django_module_name, globals(), locals(), "SaltExternalAuthModel"
) )
# pylint: enable=possibly-unused-variable # pylint: enable=possibly-unused-variable
DJANGO_AUTH_CLASS_str = "django_auth_module.{}".format(django_model_name) DJANGO_AUTH_CLASS_str = f"django_auth_module.{django_model_name}"
DJANGO_AUTH_CLASS = eval(DJANGO_AUTH_CLASS_str) # pylint: disable=W0123 DJANGO_AUTH_CLASS = eval(DJANGO_AUTH_CLASS_str) # pylint: disable=W0123

View file

@ -54,15 +54,15 @@ def _config(key, mandatory=True, opts=None):
""" """
try: try:
if opts: if opts:
value = opts["auth.ldap.{}".format(key)] value = opts[f"auth.ldap.{key}"]
else: else:
value = __opts__["auth.ldap.{}".format(key)] value = __opts__[f"auth.ldap.{key}"]
except KeyError: except KeyError:
try: try:
value = __defopts__["auth.ldap.{}".format(key)] value = __defopts__[f"auth.ldap.{key}"]
except KeyError: except KeyError:
if mandatory: if mandatory:
msg = "missing auth.ldap.{} in master config".format(key) msg = f"missing auth.ldap.{key} in master config"
raise SaltInvocationError(msg) raise SaltInvocationError(msg)
return False return False
return value return value
@ -120,13 +120,13 @@ class _LDAPConnection:
schema = "ldaps" if tls else "ldap" schema = "ldaps" if tls else "ldap"
if self.uri == "": if self.uri == "":
self.uri = "{}://{}:{}".format(schema, self.server, self.port) self.uri = f"{schema}://{self.server}:{self.port}"
try: try:
if no_verify: if no_verify:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
self.ldap = ldap.initialize("{}".format(self.uri)) self.ldap = ldap.initialize(f"{self.uri}")
self.ldap.protocol_version = 3 # ldap.VERSION3 self.ldap.protocol_version = 3 # ldap.VERSION3
self.ldap.set_option(ldap.OPT_REFERRALS, 0) # Needed for AD self.ldap.set_option(ldap.OPT_REFERRALS, 0) # Needed for AD

View file

@ -104,7 +104,7 @@ class PamMessage(Structure):
] ]
def __repr__(self): def __repr__(self):
return "<PamMessage {} '{}'>".format(self.msg_style, self.msg) return f"<PamMessage {self.msg_style} '{self.msg}'>"
class PamResponse(Structure): class PamResponse(Structure):
@ -118,7 +118,7 @@ class PamResponse(Structure):
] ]
def __repr__(self): def __repr__(self):
return "<PamResponse {} '{}'>".format(self.resp_retcode, self.resp) return f"<PamResponse {self.resp_retcode} '{self.resp}'>"
CONV_FUNC = CFUNCTYPE( CONV_FUNC = CFUNCTYPE(
@ -236,8 +236,7 @@ def authenticate(username, password):
ret = subprocess.run( ret = subprocess.run(
[str(pyexe), str(pyfile)], [str(pyexe), str(pyfile)],
env=env, env=env,
stdout=subprocess.PIPE, capture_output=True,
stderr=subprocess.PIPE,
check=False, check=False,
) )
if ret.returncode == 0: if ret.returncode == 0:

View file

@ -75,7 +75,7 @@ class Beacon:
# Run the validate function if it's available, # Run the validate function if it's available,
# otherwise there is a warning about it being missing # otherwise there is a warning about it being missing
validate_str = "{}.validate".format(beacon_name) validate_str = f"{beacon_name}.validate"
if validate_str in self.beacons: if validate_str in self.beacons:
valid, vcomment = self.beacons[validate_str](b_config[mod]) valid, vcomment = self.beacons[validate_str](b_config[mod])
@ -96,7 +96,7 @@ class Beacon:
continue continue
b_config[mod].append({"_beacon_name": mod}) b_config[mod].append({"_beacon_name": mod})
fun_str = "{}.beacon".format(beacon_name) fun_str = f"{beacon_name}.beacon"
if fun_str in self.beacons: if fun_str in self.beacons:
runonce = self._determine_beacon_config( runonce = self._determine_beacon_config(
current_beacon_config, "run_once" current_beacon_config, "run_once"
@ -125,7 +125,7 @@ class Beacon:
if re.match("state.*", job["fun"]): if re.match("state.*", job["fun"]):
is_running = True is_running = True
if is_running: if is_running:
close_str = "{}.close".format(beacon_name) close_str = f"{beacon_name}.close"
if close_str in self.beacons: if close_str in self.beacons:
log.info("Closing beacon %s. State run in progress.", mod) log.info("Closing beacon %s. State run in progress.", mod)
self.beacons[close_str](b_config[mod]) self.beacons[close_str](b_config[mod])
@ -140,7 +140,7 @@ class Beacon:
try: try:
raw = self.beacons[fun_str](b_config[mod]) raw = self.beacons[fun_str](b_config[mod])
except: # pylint: disable=bare-except except: # pylint: disable=bare-except
error = "{}".format(sys.exc_info()[1]) error = f"{sys.exc_info()[1]}"
log.error("Unable to start %s beacon, %s", mod, error) log.error("Unable to start %s beacon, %s", mod, error)
# send beacon error event # send beacon error event
tag = "salt/beacon/{}/{}/".format(self.opts["id"], mod) tag = "salt/beacon/{}/{}/".format(self.opts["id"], mod)
@ -309,7 +309,7 @@ class Beacon:
""" """
beacon_name = next(item.get("beacon_module", name) for item in beacon_data) beacon_name = next(item.get("beacon_module", name) for item in beacon_data)
validate_str = "{}.validate".format(beacon_name) validate_str = f"{beacon_name}.validate"
# Run the validate function if it's available, # Run the validate function if it's available,
# otherwise there is a warning about it being missing # otherwise there is a warning about it being missing
if validate_str in self.beacons: if validate_str in self.beacons:
@ -348,9 +348,9 @@ class Beacon:
complete = False complete = False
else: else:
if name in self.opts["beacons"]: if name in self.opts["beacons"]:
comment = "Updating settings for beacon item: {}".format(name) comment = f"Updating settings for beacon item: {name}"
else: else:
comment = "Added new beacon item: {}".format(name) comment = f"Added new beacon item: {name}"
complete = True complete = True
self.opts["beacons"].update(data) self.opts["beacons"].update(data)
@ -376,12 +376,10 @@ class Beacon:
data[name] = beacon_data data[name] = beacon_data
if name in self._get_beacons(include_opts=False): if name in self._get_beacons(include_opts=False):
comment = ( comment = f"Cannot modify beacon item {name}, it is configured in pillar."
"Cannot modify beacon item {}, it is configured in pillar.".format(name)
)
complete = False complete = False
else: else:
comment = "Updating settings for beacon item: {}".format(name) comment = f"Updating settings for beacon item: {name}"
complete = True complete = True
self.opts["beacons"].update(data) self.opts["beacons"].update(data)
@ -403,16 +401,14 @@ class Beacon:
""" """
if name in self._get_beacons(include_opts=False): if name in self._get_beacons(include_opts=False):
comment = ( comment = f"Cannot delete beacon item {name}, it is configured in pillar."
"Cannot delete beacon item {}, it is configured in pillar.".format(name)
)
complete = False complete = False
else: else:
if name in self.opts["beacons"]: if name in self.opts["beacons"]:
del self.opts["beacons"][name] del self.opts["beacons"][name]
comment = "Deleting beacon item: {}".format(name) comment = f"Deleting beacon item: {name}"
else: else:
comment = "Beacon item {} not found.".format(name) comment = f"Beacon item {name} not found."
complete = True complete = True
# Fire the complete event back along with updated list of beacons # Fire the complete event back along with updated list of beacons
@ -466,13 +462,11 @@ class Beacon:
""" """
if name in self._get_beacons(include_opts=False): if name in self._get_beacons(include_opts=False):
comment = ( comment = f"Cannot enable beacon item {name}, it is configured in pillar."
"Cannot enable beacon item {}, it is configured in pillar.".format(name)
)
complete = False complete = False
else: else:
self._update_enabled(name, True) self._update_enabled(name, True)
comment = "Enabling beacon item {}".format(name) comment = f"Enabling beacon item {name}"
complete = True complete = True
# Fire the complete event back along with updated list of beacons # Fire the complete event back along with updated list of beacons
@ -502,7 +496,7 @@ class Beacon:
complete = False complete = False
else: else:
self._update_enabled(name, False) self._update_enabled(name, False)
comment = "Disabling beacon item {}".format(name) comment = f"Disabling beacon item {name}"
complete = True complete = True
# Fire the complete event back along with updated list of beacons # Fire the complete event back along with updated list of beacons

View file

@ -130,7 +130,7 @@ except ImportError:
def __virtual__(): def __virtual__():
if os.path.isfile(BTMP): if os.path.isfile(BTMP):
return __virtualname__ return __virtualname__
err_msg = "{} does not exist.".format(BTMP) err_msg = f"{BTMP} does not exist."
log.error("Unable to load %s beacon: %s", __virtualname__, err_msg) log.error("Unable to load %s beacon: %s", __virtualname__, err_msg)
return False, err_msg return False, err_msg

View file

@ -95,7 +95,7 @@ def beacon(config):
# if our mount doesn't end with a $, insert one. # if our mount doesn't end with a $, insert one.
mount_re = mount mount_re = mount
if not mount.endswith("$"): if not mount.endswith("$"):
mount_re = "{}$".format(mount) mount_re = f"{mount}$"
if salt.utils.platform.is_windows(): if salt.utils.platform.is_windows():
# mount_re comes in formatted with a $ at the end # mount_re comes in formatted with a $ at the end

View file

@ -68,7 +68,7 @@ def _get_notifier(config):
Check the context for the notifier and construct it if not present Check the context for the notifier and construct it if not present
""" """
beacon_name = config.get("_beacon_name", "inotify") beacon_name = config.get("_beacon_name", "inotify")
notifier = "{}.notifier".format(beacon_name) notifier = f"{beacon_name}.notifier"
if notifier not in __context__: if notifier not in __context__:
__context__["inotify.queue"] = collections.deque() __context__["inotify.queue"] = collections.deque()
wm = pyinotify.WatchManager() wm = pyinotify.WatchManager()
@ -353,7 +353,7 @@ def beacon(config):
def close(config): def close(config):
config = salt.utils.beacons.list_to_dict(config) config = salt.utils.beacons.list_to_dict(config)
beacon_name = config.get("_beacon_name", "inotify") beacon_name = config.get("_beacon_name", "inotify")
notifier = "{}.notifier".format(beacon_name) notifier = f"{beacon_name}.notifier"
if notifier in __context__: if notifier in __context__:
__context__[notifier].stop() __context__[notifier].stop()
del __context__[notifier] del __context__[notifier]

View file

@ -298,7 +298,7 @@ def validate(config):
" dictionary".format(fun), " dictionary".format(fun),
) )
if fun not in __salt__: if fun not in __salt__:
return False, "Execution function {} is not availabe!".format(fun) return False, f"Execution function {fun} is not availabe!"
return True, "Valid configuration for the napal beacon!" return True, "Valid configuration for the napal beacon!"

View file

@ -45,7 +45,7 @@ def validate(config):
# a simple str is taking as the single function with no args / kwargs # a simple str is taking as the single function with no args / kwargs
fun = config["salt_fun"] fun = config["salt_fun"]
if fun not in __salt__: if fun not in __salt__:
return False, "{} not in __salt__".format(fun) return False, f"{fun} not in __salt__"
else: else:
for entry in config["salt_fun"]: for entry in config["salt_fun"]:
if isinstance(entry, dict): if isinstance(entry, dict):
@ -56,7 +56,7 @@ def validate(config):
if not isinstance(args_kwargs_dict[key], list): if not isinstance(args_kwargs_dict[key], list):
return ( return (
False, False,
"args key for fun {} must be list".format(fun), f"args key for fun {fun} must be list",
) )
elif key == "kwargs": elif key == "kwargs":
if not isinstance(args_kwargs_dict[key], list): if not isinstance(args_kwargs_dict[key], list):
@ -70,19 +70,19 @@ def validate(config):
if not isinstance(key_value, dict): if not isinstance(key_value, dict):
return ( return (
False, False,
"{} is not a key / value pair".format(key_value), f"{key_value} is not a key / value pair",
) )
else: else:
return ( return (
False, False,
"key {} not allowed under fun {}".format(key, fun), f"key {key} not allowed under fun {fun}",
) )
else: else:
# entry must be function itself # entry must be function itself
fun = entry fun = entry
if fun not in __salt__: if fun not in __salt__:
return False, "{} not in __salt__".format(fun) return False, f"{fun} not in __salt__"
return True, "valid config" return True, "valid config"

View file

@ -23,9 +23,9 @@ def _run_proxy_processes(proxies):
result = {} result = {}
if not __salt__["salt_proxy.is_running"](proxy)["result"]: if not __salt__["salt_proxy.is_running"](proxy)["result"]:
__salt__["salt_proxy.configure_proxy"](proxy, start=True) __salt__["salt_proxy.configure_proxy"](proxy, start=True)
result[proxy] = "Proxy {} was started".format(proxy) result[proxy] = f"Proxy {proxy} was started"
else: else:
msg = "Proxy {} is already running".format(proxy) msg = f"Proxy {proxy} is already running"
result[proxy] = msg result[proxy] = msg
log.debug(msg) log.debug(msg)
ret.append(result) ret.append(result)

View file

@ -73,7 +73,7 @@ def beacon(config):
config = salt.utils.beacons.list_to_dict(config) config = salt.utils.beacons.list_to_dict(config)
for sensor in config.get("sensors", {}): for sensor in config.get("sensors", {}):
sensor_function = "sensehat.get_{}".format(sensor) sensor_function = f"sensehat.get_{sensor}"
if sensor_function not in __salt__: if sensor_function not in __salt__:
log.error("No sensor for meassuring %s. Skipping.", sensor) log.error("No sensor for meassuring %s. Skipping.", sensor)
continue continue
@ -95,6 +95,6 @@ def beacon(config):
current_value = __salt__[sensor_function]() current_value = __salt__[sensor_function]()
if not sensor_min <= current_value <= sensor_max: if not sensor_min <= current_value <= sensor_max:
ret.append({"tag": "sensehat/{}".format(sensor), sensor: current_value}) ret.append({"tag": f"sensehat/{sensor}", sensor: current_value})
return ret return ret

View file

@ -73,7 +73,7 @@ def beacon(config):
__context__[pkey] = {} __context__[pkey] = {}
for pid in track_pids: for pid in track_pids:
if pid not in __context__[pkey]: if pid not in __context__[pkey]:
cmd = ["strace", "-f", "-e", "execve", "-p", "{}".format(pid)] cmd = ["strace", "-f", "-e", "execve", "-p", f"{pid}"]
__context__[pkey][pid] = {} __context__[pkey][pid] = {}
__context__[pkey][pid]["vt"] = salt.utils.vt.Terminal( __context__[pkey][pid]["vt"] = salt.utils.vt.Terminal(
cmd, cmd,

View file

@ -80,7 +80,7 @@ def beacon(config):
for uuid in current_images: for uuid in current_images:
event = {} event = {}
if uuid not in IMGADM_STATE["images"]: if uuid not in IMGADM_STATE["images"]:
event["tag"] = "imported/{}".format(uuid) event["tag"] = f"imported/{uuid}"
for label in current_images[uuid]: for label in current_images[uuid]:
event[label] = current_images[uuid][label] event[label] = current_images[uuid][label]
@ -91,7 +91,7 @@ def beacon(config):
for uuid in IMGADM_STATE["images"]: for uuid in IMGADM_STATE["images"]:
event = {} event = {}
if uuid not in current_images: if uuid not in current_images:
event["tag"] = "deleted/{}".format(uuid) event["tag"] = f"deleted/{uuid}"
for label in IMGADM_STATE["images"][uuid]: for label in IMGADM_STATE["images"][uuid]:
event[label] = IMGADM_STATE["images"][uuid][label] event[label] = IMGADM_STATE["images"][uuid][label]

View file

@ -83,7 +83,7 @@ def beacon(config):
for uuid in current_vms: for uuid in current_vms:
event = {} event = {}
if uuid not in VMADM_STATE["vms"]: if uuid not in VMADM_STATE["vms"]:
event["tag"] = "created/{}".format(uuid) event["tag"] = f"created/{uuid}"
for label in current_vms[uuid]: for label in current_vms[uuid]:
if label == "state": if label == "state":
continue continue
@ -96,7 +96,7 @@ def beacon(config):
for uuid in VMADM_STATE["vms"]: for uuid in VMADM_STATE["vms"]:
event = {} event = {}
if uuid not in current_vms: if uuid not in current_vms:
event["tag"] = "deleted/{}".format(uuid) event["tag"] = f"deleted/{uuid}"
for label in VMADM_STATE["vms"][uuid]: for label in VMADM_STATE["vms"][uuid]:
if label == "state": if label == "state":
continue continue

View file

@ -143,7 +143,7 @@ def beacon(config):
for func in entry: for func in entry:
ret[func] = {} ret[func] = {}
try: try:
data = __salt__["status.{}".format(func)]() data = __salt__[f"status.{func}"]()
except salt.exceptions.CommandExecutionError as exc: except salt.exceptions.CommandExecutionError as exc:
log.debug( log.debug(
"Status beacon attempted to process function %s " "Status beacon attempted to process function %s "
@ -166,8 +166,6 @@ def beacon(config):
except TypeError: except TypeError:
ret[func][item] = data[int(item)] ret[func][item] = data[int(item)]
except KeyError as exc: except KeyError as exc:
ret[func] = ( ret[func] = f"Status beacon is incorrectly configured: {exc}"
"Status beacon is incorrectly configured: {}".format(exc)
)
return [{"tag": ctime, "data": ret}] return [{"tag": ctime, "data": ret}]

View file

@ -159,7 +159,7 @@ except ImportError:
def __virtual__(): def __virtual__():
if os.path.isfile(WTMP): if os.path.isfile(WTMP):
return __virtualname__ return __virtualname__
err_msg = "{} does not exist.".format(WTMP) err_msg = f"{WTMP} does not exist."
log.error("Unable to load %s beacon: %s", __virtualname__, err_msg) log.error("Unable to load %s beacon: %s", __virtualname__, err_msg)
return False, err_msg return False, err_msg

View file

@ -69,7 +69,7 @@ class Cache:
def __lazy_init(self): def __lazy_init(self):
self._modules = salt.loader.cache(self.opts) self._modules = salt.loader.cache(self.opts)
fun = "{}.init_kwargs".format(self.driver) fun = f"{self.driver}.init_kwargs"
if fun in self.modules: if fun in self.modules:
self._kwargs = self.modules[fun](self._kwargs) self._kwargs = self.modules[fun](self._kwargs)
else: else:
@ -140,7 +140,7 @@ class Cache:
Raises an exception if cache driver detected an error accessing data Raises an exception if cache driver detected an error accessing data
in the cache backend (auth, permissions, etc). in the cache backend (auth, permissions, etc).
""" """
fun = "{}.store".format(self.driver) fun = f"{self.driver}.store"
return self.modules[fun](bank, key, data, **self._kwargs) return self.modules[fun](bank, key, data, **self._kwargs)
def fetch(self, bank, key): def fetch(self, bank, key):
@ -164,7 +164,7 @@ class Cache:
Raises an exception if cache driver detected an error accessing data Raises an exception if cache driver detected an error accessing data
in the cache backend (auth, permissions, etc). in the cache backend (auth, permissions, etc).
""" """
fun = "{}.fetch".format(self.driver) fun = f"{self.driver}.fetch"
return self.modules[fun](bank, key, **self._kwargs) return self.modules[fun](bank, key, **self._kwargs)
def updated(self, bank, key): def updated(self, bank, key):
@ -188,7 +188,7 @@ class Cache:
Raises an exception if cache driver detected an error accessing data Raises an exception if cache driver detected an error accessing data
in the cache backend (auth, permissions, etc). in the cache backend (auth, permissions, etc).
""" """
fun = "{}.updated".format(self.driver) fun = f"{self.driver}.updated"
return self.modules[fun](bank, key, **self._kwargs) return self.modules[fun](bank, key, **self._kwargs)
def flush(self, bank, key=None): def flush(self, bank, key=None):
@ -209,7 +209,7 @@ class Cache:
Raises an exception if cache driver detected an error accessing data Raises an exception if cache driver detected an error accessing data
in the cache backend (auth, permissions, etc). in the cache backend (auth, permissions, etc).
""" """
fun = "{}.flush".format(self.driver) fun = f"{self.driver}.flush"
return self.modules[fun](bank, key=key, **self._kwargs) return self.modules[fun](bank, key=key, **self._kwargs)
def list(self, bank): def list(self, bank):
@ -228,7 +228,7 @@ class Cache:
Raises an exception if cache driver detected an error accessing data Raises an exception if cache driver detected an error accessing data
in the cache backend (auth, permissions, etc). in the cache backend (auth, permissions, etc).
""" """
fun = "{}.list".format(self.driver) fun = f"{self.driver}.list"
return self.modules[fun](bank, **self._kwargs) return self.modules[fun](bank, **self._kwargs)
def contains(self, bank, key=None): def contains(self, bank, key=None):
@ -253,7 +253,7 @@ class Cache:
Raises an exception if cache driver detected an error accessing data Raises an exception if cache driver detected an error accessing data
in the cache backend (auth, permissions, etc). in the cache backend (auth, permissions, etc).
""" """
fun = "{}.contains".format(self.driver) fun = f"{self.driver}.contains"
return self.modules[fun](bank, key, **self._kwargs) return self.modules[fun](bank, key, **self._kwargs)
@ -288,7 +288,7 @@ class MemCache(Cache):
break break
def _get_storage_id(self): def _get_storage_id(self):
fun = "{}.storage_id".format(self.driver) fun = f"{self.driver}.storage_id"
if fun in self.modules: if fun in self.modules:
return self.modules[fun](self.kwargs) return self.modules[fun](self.kwargs)
else: else:

36
salt/cache/consul.py vendored
View file

@ -119,33 +119,29 @@ def store(bank, key, data):
""" """
Store a key value. Store a key value.
""" """
c_key = "{}/{}".format(bank, key) c_key = f"{bank}/{key}"
tstamp_key = "{}/{}{}".format(bank, key, _tstamp_suffix) tstamp_key = f"{bank}/{key}{_tstamp_suffix}"
try: try:
c_data = salt.payload.dumps(data) c_data = salt.payload.dumps(data)
api.kv.put(c_key, c_data) api.kv.put(c_key, c_data)
api.kv.put(tstamp_key, salt.payload.dumps(int(time.time()))) api.kv.put(tstamp_key, salt.payload.dumps(int(time.time())))
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError( raise SaltCacheError(f"There was an error writing the key, {c_key}: {exc}")
"There was an error writing the key, {}: {}".format(c_key, exc)
)
def fetch(bank, key): def fetch(bank, key):
""" """
Fetch a key value. Fetch a key value.
""" """
c_key = "{}/{}".format(bank, key) c_key = f"{bank}/{key}"
try: try:
_, value = api.kv.get(c_key) _, value = api.kv.get(c_key)
if value is None: if value is None:
return {} return {}
return salt.payload.loads(value["Value"]) return salt.payload.loads(value["Value"])
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError( raise SaltCacheError(f"There was an error reading the key, {c_key}: {exc}")
"There was an error reading the key, {}: {}".format(c_key, exc)
)
def flush(bank, key=None): def flush(bank, key=None):
@ -156,16 +152,14 @@ def flush(bank, key=None):
c_key = bank c_key = bank
tstamp_key = None tstamp_key = None
else: else:
c_key = "{}/{}".format(bank, key) c_key = f"{bank}/{key}"
tstamp_key = "{}/{}{}".format(bank, key, _tstamp_suffix) tstamp_key = f"{bank}/{key}{_tstamp_suffix}"
try: try:
if tstamp_key: if tstamp_key:
api.kv.delete(tstamp_key) api.kv.delete(tstamp_key)
return api.kv.delete(c_key, recurse=key is None) return api.kv.delete(c_key, recurse=key is None)
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError( raise SaltCacheError(f"There was an error removing the key, {c_key}: {exc}")
"There was an error removing the key, {}: {}".format(c_key, exc)
)
def list_(bank): def list_(bank):
@ -175,9 +169,7 @@ def list_(bank):
try: try:
_, keys = api.kv.get(bank + "/", keys=True, separator="/") _, keys = api.kv.get(bank + "/", keys=True, separator="/")
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError( raise SaltCacheError(f'There was an error getting the key "{bank}": {exc}')
'There was an error getting the key "{}": {}'.format(bank, exc)
)
if keys is None: if keys is None:
keys = [] keys = []
else: else:
@ -198,9 +190,7 @@ def contains(bank, key):
c_key = "{}/{}".format(bank, key or "") c_key = "{}/{}".format(bank, key or "")
_, value = api.kv.get(c_key, keys=True) _, value = api.kv.get(c_key, keys=True)
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError( raise SaltCacheError(f"There was an error getting the key, {c_key}: {exc}")
"There was an error getting the key, {}: {}".format(c_key, exc)
)
return value is not None return value is not None
@ -209,13 +199,11 @@ def updated(bank, key):
Return the Unix Epoch timestamp of when the key was last updated. Return Return the Unix Epoch timestamp of when the key was last updated. Return
None if key is not found. None if key is not found.
""" """
c_key = "{}/{}{}".format(bank, key, _tstamp_suffix) c_key = f"{bank}/{key}{_tstamp_suffix}"
try: try:
_, value = api.kv.get(c_key) _, value = api.kv.get(c_key)
if value is None: if value is None:
return None return None
return salt.payload.loads(value["Value"]) return salt.payload.loads(value["Value"])
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError( raise SaltCacheError(f"There was an error reading the key, {c_key}: {exc}")
"There was an error reading the key, {}: {}".format(c_key, exc)
)

View file

@ -141,16 +141,14 @@ def store(bank, key, data):
Store a key value. Store a key value.
""" """
_init_client() _init_client()
etcd_key = "{}/{}/{}".format(path_prefix, bank, key) etcd_key = f"{path_prefix}/{bank}/{key}"
etcd_tstamp_key = "{}/{}/{}".format(path_prefix, bank, key + _tstamp_suffix) etcd_tstamp_key = f"{path_prefix}/{bank}/{key + _tstamp_suffix}"
try: try:
value = salt.payload.dumps(data) value = salt.payload.dumps(data)
client.write(etcd_key, base64.b64encode(value)) client.write(etcd_key, base64.b64encode(value))
client.write(etcd_tstamp_key, int(time.time())) client.write(etcd_tstamp_key, int(time.time()))
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError( raise SaltCacheError(f"There was an error writing the key, {etcd_key}: {exc}")
"There was an error writing the key, {}: {}".format(etcd_key, exc)
)
def fetch(bank, key): def fetch(bank, key):
@ -158,16 +156,14 @@ def fetch(bank, key):
Fetch a key value. Fetch a key value.
""" """
_init_client() _init_client()
etcd_key = "{}/{}/{}".format(path_prefix, bank, key) etcd_key = f"{path_prefix}/{bank}/{key}"
try: try:
value = client.read(etcd_key).value value = client.read(etcd_key).value
return salt.payload.loads(base64.b64decode(value)) return salt.payload.loads(base64.b64decode(value))
except etcd.EtcdKeyNotFound: except etcd.EtcdKeyNotFound:
return {} return {}
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError( raise SaltCacheError(f"There was an error reading the key, {etcd_key}: {exc}")
"There was an error reading the key, {}: {}".format(etcd_key, exc)
)
def flush(bank, key=None): def flush(bank, key=None):
@ -176,11 +172,11 @@ def flush(bank, key=None):
""" """
_init_client() _init_client()
if key is None: if key is None:
etcd_key = "{}/{}".format(path_prefix, bank) etcd_key = f"{path_prefix}/{bank}"
tstamp_key = None tstamp_key = None
else: else:
etcd_key = "{}/{}/{}".format(path_prefix, bank, key) etcd_key = f"{path_prefix}/{bank}/{key}"
tstamp_key = "{}/{}/{}".format(path_prefix, bank, key + _tstamp_suffix) tstamp_key = f"{path_prefix}/{bank}/{key + _tstamp_suffix}"
try: try:
client.read(etcd_key) client.read(etcd_key)
except etcd.EtcdKeyNotFound: except etcd.EtcdKeyNotFound:
@ -190,9 +186,7 @@ def flush(bank, key=None):
client.delete(tstamp_key) client.delete(tstamp_key)
client.delete(etcd_key, recursive=True) client.delete(etcd_key, recursive=True)
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError( raise SaltCacheError(f"There was an error removing the key, {etcd_key}: {exc}")
"There was an error removing the key, {}: {}".format(etcd_key, exc)
)
def _walk(r): def _walk(r):
@ -218,14 +212,14 @@ def ls(bank):
bank. bank.
""" """
_init_client() _init_client()
path = "{}/{}".format(path_prefix, bank) path = f"{path_prefix}/{bank}"
try: try:
return _walk(client.read(path)) return _walk(client.read(path))
except etcd.EtcdKeyNotFound: except etcd.EtcdKeyNotFound:
return [] return []
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError( raise SaltCacheError(
'There was an error getting the key "{}": {}'.format(bank, exc) f'There was an error getting the key "{bank}": {exc}'
) from exc ) from exc
@ -242,9 +236,7 @@ def contains(bank, key):
except etcd.EtcdKeyNotFound: except etcd.EtcdKeyNotFound:
return False return False
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError( raise SaltCacheError(f"There was an error getting the key, {etcd_key}: {exc}")
"There was an error getting the key, {}: {}".format(etcd_key, exc)
)
def updated(bank, key): def updated(bank, key):
@ -252,13 +244,11 @@ def updated(bank, key):
Return Unix Epoch based timestamp of when the bank/key was updated. Return Unix Epoch based timestamp of when the bank/key was updated.
""" """
_init_client() _init_client()
tstamp_key = "{}/{}/{}".format(path_prefix, bank, key + _tstamp_suffix) tstamp_key = f"{path_prefix}/{bank}/{key + _tstamp_suffix}"
try: try:
value = client.read(tstamp_key).value value = client.read(tstamp_key).value
return int(value) return int(value)
except etcd.EtcdKeyNotFound: except etcd.EtcdKeyNotFound:
return None return None
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
raise SaltCacheError( raise SaltCacheError(f"There was an error reading the key, {tstamp_key}: {exc}")
"There was an error reading the key, {}: {}".format(tstamp_key, exc)
)

24
salt/cache/localfs.py vendored
View file

@ -51,10 +51,10 @@ def store(bank, key, data, cachedir):
except OSError as exc: except OSError as exc:
if exc.errno != errno.EEXIST: if exc.errno != errno.EEXIST:
raise SaltCacheError( raise SaltCacheError(
"The cache directory, {}, could not be created: {}".format(base, exc) f"The cache directory, {base}, could not be created: {exc}"
) )
outfile = os.path.join(base, "{}.p".format(key)) outfile = os.path.join(base, f"{key}.p")
tmpfh, tmpfname = tempfile.mkstemp(dir=base) tmpfh, tmpfname = tempfile.mkstemp(dir=base)
os.close(tmpfh) os.close(tmpfh)
try: try:
@ -64,7 +64,7 @@ def store(bank, key, data, cachedir):
salt.utils.atomicfile.atomic_rename(tmpfname, outfile) salt.utils.atomicfile.atomic_rename(tmpfname, outfile)
except OSError as exc: except OSError as exc:
raise SaltCacheError( raise SaltCacheError(
"There was an error writing the cache file, {}: {}".format(base, exc) f"There was an error writing the cache file, {base}: {exc}"
) )
@ -73,7 +73,7 @@ def fetch(bank, key, cachedir):
Fetch information from a file. Fetch information from a file.
""" """
inkey = False inkey = False
key_file = os.path.join(cachedir, os.path.normpath(bank), "{}.p".format(key)) key_file = os.path.join(cachedir, os.path.normpath(bank), f"{key}.p")
if not os.path.isfile(key_file): if not os.path.isfile(key_file):
# The bank includes the full filename, and the key is inside the file # The bank includes the full filename, and the key is inside the file
key_file = os.path.join(cachedir, os.path.normpath(bank) + ".p") key_file = os.path.join(cachedir, os.path.normpath(bank) + ".p")
@ -90,7 +90,7 @@ def fetch(bank, key, cachedir):
return salt.payload.load(fh_) return salt.payload.load(fh_)
except OSError as exc: except OSError as exc:
raise SaltCacheError( raise SaltCacheError(
'There was an error reading the cache file "{}": {}'.format(key_file, exc) f'There was an error reading the cache file "{key_file}": {exc}'
) )
@ -98,7 +98,7 @@ def updated(bank, key, cachedir):
""" """
Return the epoch of the mtime for this cache file Return the epoch of the mtime for this cache file
""" """
key_file = os.path.join(cachedir, os.path.normpath(bank), "{}.p".format(key)) key_file = os.path.join(cachedir, os.path.normpath(bank), f"{key}.p")
if not os.path.isfile(key_file): if not os.path.isfile(key_file):
log.warning('Cache file "%s" does not exist', key_file) log.warning('Cache file "%s" does not exist', key_file)
return None return None
@ -106,7 +106,7 @@ def updated(bank, key, cachedir):
return int(os.path.getmtime(key_file)) return int(os.path.getmtime(key_file))
except OSError as exc: except OSError as exc:
raise SaltCacheError( raise SaltCacheError(
'There was an error reading the mtime for "{}": {}'.format(key_file, exc) f'There was an error reading the mtime for "{key_file}": {exc}'
) )
@ -124,12 +124,12 @@ def flush(bank, key=None, cachedir=None):
return False return False
shutil.rmtree(target) shutil.rmtree(target)
else: else:
target = os.path.join(cachedir, os.path.normpath(bank), "{}.p".format(key)) target = os.path.join(cachedir, os.path.normpath(bank), f"{key}.p")
if not os.path.isfile(target): if not os.path.isfile(target):
return False return False
os.remove(target) os.remove(target)
except OSError as exc: except OSError as exc:
raise SaltCacheError('There was an error removing "{}": {}'.format(target, exc)) raise SaltCacheError(f'There was an error removing "{target}": {exc}')
return True return True
@ -143,9 +143,7 @@ def list_(bank, cachedir):
try: try:
items = os.listdir(base) items = os.listdir(base)
except OSError as exc: except OSError as exc:
raise SaltCacheError( raise SaltCacheError(f'There was an error accessing directory "{base}": {exc}')
'There was an error accessing directory "{}": {}'.format(base, exc)
)
ret = [] ret = []
for item in items: for item in items:
if item.endswith(".p"): if item.endswith(".p"):
@ -163,5 +161,5 @@ def contains(bank, key, cachedir):
base = os.path.join(cachedir, os.path.normpath(bank)) base = os.path.join(cachedir, os.path.normpath(bank))
return os.path.isdir(base) return os.path.isdir(base)
else: else:
keyfile = os.path.join(cachedir, os.path.normpath(bank), "{}.p".format(key)) keyfile = os.path.join(cachedir, os.path.normpath(bank), f"{key}.p")
return os.path.isfile(keyfile) return os.path.isfile(keyfile)

View file

@ -144,9 +144,7 @@ def run_query(conn, query, args=None, retries=3):
if len(query) > 150: if len(query) > 150:
query = query[:150] + "<...>" query = query[:150] + "<...>"
raise SaltCacheError( raise SaltCacheError(
"Error running {}{}: {}".format( "Error running {}{}: {}".format(query, f"- args: {args}" if args else "", e)
query, "- args: {}".format(args) if args else "", e
)
) )
@ -266,7 +264,7 @@ def store(bank, key, data):
cur, cnt = run_query(__context__.get("mysql_client"), query, args=args) cur, cnt = run_query(__context__.get("mysql_client"), query, args=args)
cur.close() cur.close()
if cnt not in (1, 2): if cnt not in (1, 2):
raise SaltCacheError("Error storing {} {} returned {}".format(bank, key, cnt)) raise SaltCacheError(f"Error storing {bank} {key} returned {cnt}")
def fetch(bank, key): def fetch(bank, key):

View file

@ -351,7 +351,7 @@ def _get_banks_to_remove(redis_server, bank, path=""):
A simple tree traversal algorithm that builds the list of banks to remove, A simple tree traversal algorithm that builds the list of banks to remove,
starting from an arbitrary node in the tree. starting from an arbitrary node in the tree.
""" """
current_path = bank if not path else "{path}/{bank}".format(path=path, bank=bank) current_path = bank if not path else f"{path}/{bank}"
bank_paths_to_remove = [current_path] bank_paths_to_remove = [current_path]
# as you got here, you'll be removed # as you got here, you'll be removed

View file

@ -143,7 +143,7 @@ class AsyncReqChannel:
auth, auth,
timeout=REQUEST_CHANNEL_TIMEOUT, timeout=REQUEST_CHANNEL_TIMEOUT,
tries=REQUEST_CHANNEL_TRIES, tries=REQUEST_CHANNEL_TRIES,
**kwargs **kwargs,
): ):
self.opts = dict(opts) self.opts = dict(opts)
self.transport = transport self.transport = transport
@ -446,7 +446,7 @@ class AsyncPubChannel:
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
if "-|RETRY|-" not in str(exc): if "-|RETRY|-" not in str(exc):
raise salt.exceptions.SaltClientError( raise salt.exceptions.SaltClientError(
"Unable to sign_in to master: {}".format(exc) f"Unable to sign_in to master: {exc}"
) # TODO: better error message ) # TODO: better error message
def close(self): def close(self):

View file

@ -144,9 +144,7 @@ class ReqServerChannel:
raise salt.ext.tornado.gen.Return("bad load: id contains a null byte") raise salt.ext.tornado.gen.Return("bad load: id contains a null byte")
except TypeError: except TypeError:
log.error("Payload contains non-string id: %s", payload) log.error("Payload contains non-string id: %s", payload)
raise salt.ext.tornado.gen.Return( raise salt.ext.tornado.gen.Return(f"bad load: id {id_} is not a string")
"bad load: id {} is not a string".format(id_)
)
version = 0 version = 0
if "version" in payload: if "version" in payload:

View file

@ -191,7 +191,7 @@ class Batch:
if next_: if next_:
if not self.quiet: if not self.quiet:
salt.utils.stringutils.print_cli( salt.utils.stringutils.print_cli(
"\nExecuting run on {}\n".format(sorted(next_)) f"\nExecuting run on {sorted(next_)}\n"
) )
# create a new iterator for this batch of minions # create a new iterator for this batch of minions
return_value = self.opts.get("return", self.opts.get("ret", "")) return_value = self.opts.get("return", self.opts.get("ret", ""))

View file

@ -75,7 +75,7 @@ class BaseCaller:
docs[name] = func.__doc__ docs[name] = func.__doc__
for name in sorted(docs): for name in sorted(docs):
if name.startswith(self.opts.get("fun", "")): if name.startswith(self.opts.get("fun", "")):
salt.utils.stringutils.print_cli("{}:\n{}\n".format(name, docs[name])) salt.utils.stringutils.print_cli(f"{name}:\n{docs[name]}\n")
def print_grains(self): def print_grains(self):
""" """
@ -130,7 +130,7 @@ class BaseCaller:
salt.minion.get_proc_dir(self.opts["cachedir"]), ret["jid"] salt.minion.get_proc_dir(self.opts["cachedir"]), ret["jid"]
) )
if fun not in self.minion.functions: if fun not in self.minion.functions:
docs = self.minion.functions["sys.doc"]("{}*".format(fun)) docs = self.minion.functions["sys.doc"](f"{fun}*")
if docs: if docs:
docs[fun] = self.minion.functions.missing_fun_string(fun) docs[fun] = self.minion.functions.missing_fun_string(fun)
ret["out"] = "nested" ret["out"] = "nested"
@ -194,20 +194,16 @@ class BaseCaller:
executors = [executors] executors = [executors]
try: try:
for name in executors: for name in executors:
fname = "{}.execute".format(name) fname = f"{name}.execute"
if fname not in self.minion.executors: if fname not in self.minion.executors:
raise SaltInvocationError( raise SaltInvocationError(f"Executor '{name}' is not available")
"Executor '{}' is not available".format(name)
)
ret["return"] = self.minion.executors[fname]( ret["return"] = self.minion.executors[fname](
self.opts, data, func, args, kwargs self.opts, data, func, args, kwargs
) )
if ret["return"] is not None: if ret["return"] is not None:
break break
except TypeError as exc: except TypeError as exc:
sys.stderr.write( sys.stderr.write(f"\nPassed invalid arguments: {exc}.\n\nUsage:\n")
"\nPassed invalid arguments: {}.\n\nUsage:\n".format(exc)
)
salt.utils.stringutils.print_cli(func.__doc__) salt.utils.stringutils.print_cli(func.__doc__)
active_level = LOG_LEVELS.get( active_level = LOG_LEVELS.get(
self.opts["log_level"].lower(), logging.ERROR self.opts["log_level"].lower(), logging.ERROR
@ -272,7 +268,7 @@ class BaseCaller:
continue continue
try: try:
ret["success"] = True ret["success"] = True
self.minion.returners["{}.returner".format(returner)](ret) self.minion.returners[f"{returner}.returner"](ret)
except Exception: # pylint: disable=broad-except except Exception: # pylint: disable=broad-except
pass pass

View file

@ -44,7 +44,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
auto_reconnect=True, auto_reconnect=True,
) )
except SaltClientError as exc: except SaltClientError as exc:
self.exit(2, "{}\n".format(exc)) self.exit(2, f"{exc}\n")
return return
if self.options.batch or self.options.static: if self.options.batch or self.options.static:
@ -146,9 +146,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
if self.config["async"]: if self.config["async"]:
jid = self.local_client.cmd_async(**kwargs) jid = self.local_client.cmd_async(**kwargs)
salt.utils.stringutils.print_cli( salt.utils.stringutils.print_cli(f"Executed command with job ID: {jid}")
"Executed command with job ID: {}".format(jid)
)
return return
# local will be None when there was an error # local will be None when there was an error
@ -337,16 +335,14 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
salt.utils.stringutils.print_cli("Summary") salt.utils.stringutils.print_cli("Summary")
salt.utils.stringutils.print_cli("-------------------------------------------") salt.utils.stringutils.print_cli("-------------------------------------------")
salt.utils.stringutils.print_cli( salt.utils.stringutils.print_cli(
"# of minions targeted: {}".format(return_counter + not_return_counter) f"# of minions targeted: {return_counter + not_return_counter}"
)
salt.utils.stringutils.print_cli(f"# of minions returned: {return_counter}")
salt.utils.stringutils.print_cli(
f"# of minions that did not return: {not_return_counter}"
) )
salt.utils.stringutils.print_cli( salt.utils.stringutils.print_cli(
"# of minions returned: {}".format(return_counter) f"# of minions with errors: {len(failed_minions)}"
)
salt.utils.stringutils.print_cli(
"# of minions that did not return: {}".format(not_return_counter)
)
salt.utils.stringutils.print_cli(
"# of minions with errors: {}".format(len(failed_minions))
) )
if self.options.verbose: if self.options.verbose:
if not_connected_minions: if not_connected_minions:
@ -449,7 +445,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
if not ret: if not ret:
self.exit(2, "No minions found to gather docs from\n") self.exit(2, "No minions found to gather docs from\n")
if isinstance(ret, str): if isinstance(ret, str):
self.exit(2, "{}\n".format(ret)) self.exit(2, f"{ret}\n")
for host in ret: for host in ret:
if isinstance(ret[host], str) and ( if isinstance(ret[host], str) and (
ret[host].startswith("Minion did not return") ret[host].startswith("Minion did not return")
@ -464,6 +460,6 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
salt.output.display_output({fun: docs[fun]}, "nested", self.config) salt.output.display_output({fun: docs[fun]}, "nested", self.config)
else: else:
for fun in sorted(docs): for fun in sorted(docs):
salt.utils.stringutils.print_cli("{}:".format(fun)) salt.utils.stringutils.print_cli(f"{fun}:")
salt.utils.stringutils.print_cli(docs[fun]) salt.utils.stringutils.print_cli(docs[fun])
salt.utils.stringutils.print_cli("") salt.utils.stringutils.print_cli("")

View file

@ -245,7 +245,7 @@ class LocalClient:
# The username may contain '\' if it is in Windows # The username may contain '\' if it is in Windows
# 'DOMAIN\username' format. Fix this for the keyfile path. # 'DOMAIN\username' format. Fix this for the keyfile path.
key_user = key_user.replace("\\", "_") key_user = key_user.replace("\\", "_")
keyfile = os.path.join(self.opts["cachedir"], ".{}_key".format(key_user)) keyfile = os.path.join(self.opts["cachedir"], f".{key_user}_key")
try: try:
# Make sure all key parent directories are accessible # Make sure all key parent directories are accessible
salt.utils.verify.check_path_traversal( salt.utils.verify.check_path_traversal(
@ -265,7 +265,7 @@ class LocalClient:
try: try:
return range_.expand(tgt) return range_.expand(tgt)
except seco.range.RangeException as err: except seco.range.RangeException as err:
print("Range server exception: {}".format(err)) print(f"Range server exception: {err}")
return [] return []
def _get_timeout(self, timeout): def _get_timeout(self, timeout):
@ -1053,11 +1053,11 @@ class LocalClient:
:returns: all of the information for the JID :returns: all of the information for the JID
""" """
if verbose: if verbose:
msg = "Executing job with jid {}".format(jid) msg = f"Executing job with jid {jid}"
print(msg) print(msg)
print("-" * len(msg) + "\n") print("-" * len(msg) + "\n")
elif show_jid: elif show_jid:
print("jid: {}".format(jid)) print(f"jid: {jid}")
if timeout is None: if timeout is None:
timeout = self.opts["timeout"] timeout = self.opts["timeout"]
fret = {} fret = {}
@ -1163,11 +1163,9 @@ class LocalClient:
# iterator for this job's return # iterator for this job's return
if self.opts["order_masters"]: if self.opts["order_masters"]:
# If we are a MoM, we need to gather expected minions from downstreams masters. # If we are a MoM, we need to gather expected minions from downstreams masters.
ret_iter = self.get_returns_no_block( ret_iter = self.get_returns_no_block(f"(salt/job|syndic/.*)/{jid}", "regex")
"(salt/job|syndic/.*)/{}".format(jid), "regex"
)
else: else:
ret_iter = self.get_returns_no_block("salt/job/{}".format(jid)) ret_iter = self.get_returns_no_block(f"salt/job/{jid}")
# iterator for the info of this job # iterator for the info of this job
jinfo_iter = [] jinfo_iter = []
# open event jids that need to be un-subscribed from later # open event jids that need to be un-subscribed from later
@ -1547,11 +1545,11 @@ class LocalClient:
log.trace("entered - function get_cli_static_event_returns()") log.trace("entered - function get_cli_static_event_returns()")
minions = set(minions) minions = set(minions)
if verbose: if verbose:
msg = "Executing job with jid {}".format(jid) msg = f"Executing job with jid {jid}"
print(msg) print(msg)
print("-" * len(msg) + "\n") print("-" * len(msg) + "\n")
elif show_jid: elif show_jid:
print("jid: {}".format(jid)) print(f"jid: {jid}")
if timeout is None: if timeout is None:
timeout = self.opts["timeout"] timeout = self.opts["timeout"]
@ -1581,7 +1579,7 @@ class LocalClient:
time_left = timeout_at - int(time.time()) time_left = timeout_at - int(time.time())
# Wait 0 == forever, use a minimum of 1s # Wait 0 == forever, use a minimum of 1s
wait = max(1, time_left) wait = max(1, time_left)
jid_tag = "salt/job/{}".format(jid) jid_tag = f"salt/job/{jid}"
raw = self.event.get_event( raw = self.event.get_event(
wait, jid_tag, auto_reconnect=self.auto_reconnect wait, jid_tag, auto_reconnect=self.auto_reconnect
) )
@ -1641,11 +1639,11 @@ class LocalClient:
log.trace("func get_cli_event_returns()") log.trace("func get_cli_event_returns()")
if verbose: if verbose:
msg = "Executing job with jid {}".format(jid) msg = f"Executing job with jid {jid}"
print(msg) print(msg)
print("-" * len(msg) + "\n") print("-" * len(msg) + "\n")
elif show_jid: elif show_jid:
print("jid: {}".format(jid)) print(f"jid: {jid}")
# lazy load the connected minions # lazy load the connected minions
connected_minions = None connected_minions = None
@ -1684,7 +1682,7 @@ class LocalClient:
if ( if (
self.opts["minion_data_cache"] self.opts["minion_data_cache"]
and salt.cache.factory(self.opts).contains( and salt.cache.factory(self.opts).contains(
"minions/{}".format(id_), "data" f"minions/{id_}", "data"
) )
and connected_minions and connected_minions
and id_ not in connected_minions and id_ not in connected_minions
@ -1775,9 +1773,7 @@ class LocalClient:
""" """
if ng not in self.opts["nodegroups"]: if ng not in self.opts["nodegroups"]:
conf_file = self.opts.get("conf_file", "the master config file") conf_file = self.opts.get("conf_file", "the master config file")
raise SaltInvocationError( raise SaltInvocationError(f"Node group {ng} unavailable in {conf_file}")
"Node group {} unavailable in {}".format(ng, conf_file)
)
return salt.utils.minions.nodegroup_comp(ng, self.opts["nodegroups"]) return salt.utils.minions.nodegroup_comp(ng, self.opts["nodegroups"])
def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs):
@ -2062,8 +2058,8 @@ class LocalClient:
def _clean_up_subscriptions(self, job_id): def _clean_up_subscriptions(self, job_id):
if self.opts.get("order_masters"): if self.opts.get("order_masters"):
self.event.unsubscribe("syndic/.*/{}".format(job_id), "regex") self.event.unsubscribe(f"syndic/.*/{job_id}", "regex")
self.event.unsubscribe("salt/job/{}".format(job_id)) self.event.unsubscribe(f"salt/job/{job_id}")
def destroy(self): def destroy(self):
if self.event is not None: if self.event is not None:
@ -2122,7 +2118,7 @@ class FunctionWrapper(dict):
""" """
args = list(args) args = list(args)
for _key, _val in kwargs.items(): for _key, _val in kwargs.items():
args.append("{}={}".format(_key, _val)) args.append(f"{_key}={_val}")
return self.local.cmd(self.minion, key, args) return self.local.cmd(self.minion, key, args)
return func return func
@ -2272,9 +2268,9 @@ class ProxyCaller:
if isinstance(executors, str): if isinstance(executors, str):
executors = [executors] executors = [executors]
for name in executors: for name in executors:
fname = "{}.execute".format(name) fname = f"{name}.execute"
if fname not in self.sminion.executors: if fname not in self.sminion.executors:
raise SaltInvocationError("Executor '{}' is not available".format(name)) raise SaltInvocationError(f"Executor '{name}' is not available")
return_data = self.sminion.executors[fname]( return_data = self.sminion.executors[fname](
self.opts, data, func, args, kwargs self.opts, data, func, args, kwargs
) )

View file

@ -273,7 +273,7 @@ class SyncClientMixin(ClientStateMixin):
return True return True
try: try:
return self.opts["{}_returns".format(class_name)] return self.opts[f"{class_name}_returns"]
except KeyError: except KeyError:
# No such option, assume this isn't one we care about gating and # No such option, assume this isn't one we care about gating and
# just return True. # just return True.
@ -300,7 +300,7 @@ class SyncClientMixin(ClientStateMixin):
tag = low.get("__tag__", salt.utils.event.tagify(jid, prefix=self.tag_prefix)) tag = low.get("__tag__", salt.utils.event.tagify(jid, prefix=self.tag_prefix))
data = { data = {
"fun": "{}.{}".format(self.client, fun), "fun": f"{self.client}.{fun}",
"jid": jid, "jid": jid,
"user": low.get("__user__", "UNKNOWN"), "user": low.get("__user__", "UNKNOWN"),
} }
@ -523,7 +523,7 @@ class AsyncClientMixin(ClientStateMixin):
tag, tag,
jid, jid,
daemonize=True, daemonize=True,
full_return=False full_return=False,
): ):
""" """
Run this method in a multiprocess target to execute the function Run this method in a multiprocess target to execute the function

View file

@ -48,7 +48,7 @@ class NetapiClient:
for fun in self.netapi: for fun in self.netapi:
if fun.endswith(".start"): if fun.endswith(".start"):
name = "RunNetapi({})".format(self.netapi[fun].__module__) name = f"RunNetapi({self.netapi[fun].__module__})"
log.info("Starting %s", name) log.info("Starting %s", name)
self.process_manager.add_process( self.process_manager.add_process(
RunNetapi, args=(self.opts, fun), name=name RunNetapi, args=(self.opts, fun), name=name

View file

@ -129,26 +129,26 @@ class Shell:
options.append("PasswordAuthentication=no") options.append("PasswordAuthentication=no")
if self.opts.get("_ssh_version", (0,)) > (4, 9): if self.opts.get("_ssh_version", (0,)) > (4, 9):
options.append("GSSAPIAuthentication=no") options.append("GSSAPIAuthentication=no")
options.append("ConnectTimeout={}".format(self.timeout)) options.append(f"ConnectTimeout={self.timeout}")
if self.opts.get("ignore_host_keys"): if self.opts.get("ignore_host_keys"):
options.append("StrictHostKeyChecking=no") options.append("StrictHostKeyChecking=no")
if self.opts.get("no_host_keys"): if self.opts.get("no_host_keys"):
options.extend(["StrictHostKeyChecking=no", "UserKnownHostsFile=/dev/null"]) options.extend(["StrictHostKeyChecking=no", "UserKnownHostsFile=/dev/null"])
known_hosts = self.opts.get("known_hosts_file") known_hosts = self.opts.get("known_hosts_file")
if known_hosts and os.path.isfile(known_hosts): if known_hosts and os.path.isfile(known_hosts):
options.append("UserKnownHostsFile={}".format(known_hosts)) options.append(f"UserKnownHostsFile={known_hosts}")
if self.port: if self.port:
options.append("Port={}".format(self.port)) options.append(f"Port={self.port}")
if self.priv and self.priv != "agent-forwarding": if self.priv and self.priv != "agent-forwarding":
options.append("IdentityFile={}".format(self.priv)) options.append(f"IdentityFile={self.priv}")
if self.user: if self.user:
options.append("User={}".format(self.user)) options.append(f"User={self.user}")
if self.identities_only: if self.identities_only:
options.append("IdentitiesOnly=yes") options.append("IdentitiesOnly=yes")
ret = [] ret = []
for option in options: for option in options:
ret.append("-o {} ".format(option)) ret.append(f"-o {option} ")
return "".join(ret) return "".join(ret)
def _passwd_opts(self): def _passwd_opts(self):
@ -164,7 +164,7 @@ class Shell:
] ]
if self.opts["_ssh_version"] > (4, 9): if self.opts["_ssh_version"] > (4, 9):
options.append("GSSAPIAuthentication=no") options.append("GSSAPIAuthentication=no")
options.append("ConnectTimeout={}".format(self.timeout)) options.append(f"ConnectTimeout={self.timeout}")
if self.opts.get("ignore_host_keys"): if self.opts.get("ignore_host_keys"):
options.append("StrictHostKeyChecking=no") options.append("StrictHostKeyChecking=no")
if self.opts.get("no_host_keys"): if self.opts.get("no_host_keys"):
@ -183,19 +183,19 @@ class Shell:
] ]
) )
if self.port: if self.port:
options.append("Port={}".format(self.port)) options.append(f"Port={self.port}")
if self.user: if self.user:
options.append("User={}".format(self.user)) options.append(f"User={self.user}")
if self.identities_only: if self.identities_only:
options.append("IdentitiesOnly=yes") options.append("IdentitiesOnly=yes")
ret = [] ret = []
for option in options: for option in options:
ret.append("-o {} ".format(option)) ret.append(f"-o {option} ")
return "".join(ret) return "".join(ret)
def _ssh_opts(self): def _ssh_opts(self):
return " ".join(["-o {}".format(opt) for opt in self.ssh_options]) return " ".join([f"-o {opt}" for opt in self.ssh_options])
def _copy_id_str_old(self): def _copy_id_str_old(self):
""" """
@ -206,7 +206,7 @@ class Shell:
# passwords containing '$' # passwords containing '$'
return "{} {} '{} -p {} {} {}@{}'".format( return "{} {} '{} -p {} {} {}@{}'".format(
"ssh-copy-id", "ssh-copy-id",
"-i {}.pub".format(self.priv), f"-i {self.priv}.pub",
self._passwd_opts(), self._passwd_opts(),
self.port, self.port,
self._ssh_opts(), self._ssh_opts(),
@ -225,7 +225,7 @@ class Shell:
# passwords containing '$' # passwords containing '$'
return "{} {} {} -p {} {} {}@{}".format( return "{} {} {} -p {} {} {}@{}".format(
"ssh-copy-id", "ssh-copy-id",
"-i {}.pub".format(self.priv), f"-i {self.priv}.pub",
self._passwd_opts(), self._passwd_opts(),
self.port, self.port,
self._ssh_opts(), self._ssh_opts(),
@ -261,10 +261,7 @@ class Shell:
if ssh != "scp" and self.remote_port_forwards: if ssh != "scp" and self.remote_port_forwards:
command.append( command.append(
" ".join( " ".join(
[ [f"-R {item}" for item in self.remote_port_forwards.split(",")]
"-R {}".format(item)
for item in self.remote_port_forwards.split(",")
]
) )
) )
if self.ssh_options: if self.ssh_options:
@ -306,7 +303,7 @@ class Shell:
rcode = None rcode = None
cmd = self._cmd_str(cmd) cmd = self._cmd_str(cmd)
logmsg = "Executing non-blocking command: {}".format(cmd) logmsg = f"Executing non-blocking command: {cmd}"
if self.passwd: if self.passwd:
logmsg = logmsg.replace(self.passwd, ("*" * 6)) logmsg = logmsg.replace(self.passwd, ("*" * 6))
log.debug(logmsg) log.debug(logmsg)
@ -325,7 +322,7 @@ class Shell:
""" """
cmd = self._cmd_str(cmd) cmd = self._cmd_str(cmd)
logmsg = "Executing command: {}".format(cmd) logmsg = f"Executing command: {cmd}"
if self.passwd: if self.passwd:
logmsg = logmsg.replace(self.passwd, ("*" * 6)) logmsg = logmsg.replace(self.passwd, ("*" * 6))
if 'decode("base64")' in logmsg or "base64.b64decode(" in logmsg: if 'decode("base64")' in logmsg or "base64.b64decode(" in logmsg:
@ -342,17 +339,17 @@ class Shell:
scp a file or files to a remote system scp a file or files to a remote system
""" """
if makedirs: if makedirs:
self.exec_cmd("mkdir -p {}".format(os.path.dirname(remote))) self.exec_cmd(f"mkdir -p {os.path.dirname(remote)}")
# scp needs [<ipv6} # scp needs [<ipv6}
host = self.host host = self.host
if ":" in host: if ":" in host:
host = "[{}]".format(host) host = f"[{host}]"
cmd = "{} {}:{}".format(local, host, remote) cmd = f"{local} {host}:{remote}"
cmd = self._cmd_str(cmd, ssh="scp") cmd = self._cmd_str(cmd, ssh="scp")
logmsg = "Executing command: {}".format(cmd) logmsg = f"Executing command: {cmd}"
if self.passwd: if self.passwd:
logmsg = logmsg.replace(self.passwd, ("*" * 6)) logmsg = logmsg.replace(self.passwd, ("*" * 6))
log.debug(logmsg) log.debug(logmsg)
@ -371,7 +368,7 @@ class Shell:
cmd_lst = shlex.split(cmd) cmd_lst = shlex.split(cmd)
else: else:
cmd_lst = shlex.split(ssh_part) cmd_lst = shlex.split(ssh_part)
cmd_lst.append("/bin/sh {}".format(cmd_part)) cmd_lst.append(f"/bin/sh {cmd_part}")
return cmd_lst return cmd_lst
def _run_cmd(self, cmd, key_accept=False, passwd_retries=3): def _run_cmd(self, cmd, key_accept=False, passwd_retries=3):

View file

@ -30,7 +30,7 @@ class FunctionWrapper:
cmd_prefix=None, cmd_prefix=None,
aliases=None, aliases=None,
minion_opts=None, minion_opts=None,
**kwargs **kwargs,
): ):
super().__init__() super().__init__()
self.cmd_prefix = cmd_prefix self.cmd_prefix = cmd_prefix
@ -79,14 +79,14 @@ class FunctionWrapper:
cmd_prefix=cmd, cmd_prefix=cmd,
aliases=self.aliases, aliases=self.aliases,
minion_opts=self.minion_opts, minion_opts=self.minion_opts,
**kwargs **kwargs,
) )
if self.cmd_prefix: if self.cmd_prefix:
# We're in an inner FunctionWrapper as created by the code block # We're in an inner FunctionWrapper as created by the code block
# above. Reconstruct the original cmd in the form 'cmd.run' and # above. Reconstruct the original cmd in the form 'cmd.run' and
# then evaluate as normal # then evaluate as normal
cmd = "{}.{}".format(self.cmd_prefix, cmd) cmd = f"{self.cmd_prefix}.{cmd}"
if cmd in self.wfuncs: if cmd in self.wfuncs:
return self.wfuncs[cmd] return self.wfuncs[cmd]
@ -115,7 +115,7 @@ class FunctionWrapper:
disable_wipe=True, disable_wipe=True,
fsclient=self.fsclient, fsclient=self.fsclient,
minion_opts=self.minion_opts, minion_opts=self.minion_opts,
**self.kwargs **self.kwargs,
) )
stdout, stderr, retcode = single.cmd_block() stdout, stderr, retcode = single.cmd_block()
if stderr.count("Permission Denied"): if stderr.count("Permission Denied"):
@ -149,15 +149,13 @@ class FunctionWrapper:
# Form of salt.cmd.run in Jinja -- it's expecting a subdictionary # Form of salt.cmd.run in Jinja -- it's expecting a subdictionary
# containing only 'cmd' module calls, in that case. We don't # containing only 'cmd' module calls, in that case. We don't
# support assigning directly to prefixes in this way # support assigning directly to prefixes in this way
raise KeyError( raise KeyError(f"Cannot assign to module key {cmd} in the FunctionWrapper")
"Cannot assign to module key {} in the FunctionWrapper".format(cmd)
)
if self.cmd_prefix: if self.cmd_prefix:
# We're in an inner FunctionWrapper as created by the first code # We're in an inner FunctionWrapper as created by the first code
# block in __getitem__. Reconstruct the original cmd in the form # block in __getitem__. Reconstruct the original cmd in the form
# 'cmd.run' and then evaluate as normal # 'cmd.run' and then evaluate as normal
cmd = "{}.{}".format(self.cmd_prefix, cmd) cmd = f"{self.cmd_prefix}.{cmd}"
if cmd in self.wfuncs: if cmd in self.wfuncs:
self.wfuncs[cmd] = value self.wfuncs[cmd] = value

View file

@ -235,9 +235,9 @@ def dot_vals(value):
""" """
ret = {} ret = {}
for key, val in __pillar__.get("master", {}).items(): for key, val in __pillar__.get("master", {}).items():
if key.startswith("{}.".format(value)): if key.startswith(f"{value}."):
ret[key] = val ret[key] = val
for key, val in __opts__.items(): for key, val in __opts__.items():
if key.startswith("{}.".format(value)): if key.startswith(f"{value}."):
ret[key] = val ret[key] = val
return ret return ret

View file

@ -102,7 +102,7 @@ def _render_filenames(path, dest, saltenv, template):
# render the path as a template using path_template_engine as the engine # render the path as a template using path_template_engine as the engine
if template not in salt.utils.templates.TEMPLATE_REGISTRY: if template not in salt.utils.templates.TEMPLATE_REGISTRY:
raise CommandExecutionError( raise CommandExecutionError(
"Attempted to render file paths with unavailable engine {}".format(template) f"Attempted to render file paths with unavailable engine {template}"
) )
kwargs = {} kwargs = {}

View file

@ -60,17 +60,17 @@ def communicator(func):
trace = traceback.format_exc() trace = traceback.format_exc()
queue.put("KEYBOARDINT") queue.put("KEYBOARDINT")
queue.put("Keyboard interrupt") queue.put("Keyboard interrupt")
queue.put("{}\n{}\n".format(ex, trace)) queue.put(f"{ex}\n{trace}\n")
except Exception as ex: # pylint: disable=broad-except except Exception as ex: # pylint: disable=broad-except
trace = traceback.format_exc() trace = traceback.format_exc()
queue.put("ERROR") queue.put("ERROR")
queue.put("Exception") queue.put("Exception")
queue.put("{}\n{}\n".format(ex, trace)) queue.put(f"{ex}\n{trace}\n")
except SystemExit as ex: except SystemExit as ex:
trace = traceback.format_exc() trace = traceback.format_exc()
queue.put("ERROR") queue.put("ERROR")
queue.put("System exit") queue.put("System exit")
queue.put("{}\n{}\n".format(ex, trace)) queue.put(f"{ex}\n{trace}\n")
return ret return ret
return _call return _call
@ -150,7 +150,7 @@ def enter_mainloop(
" we bail out".format(target) " we bail out".format(target)
) )
log.error(msg) log.error(msg)
raise SaltCloudSystemExit("Exception caught\n{}".format(msg)) raise SaltCloudSystemExit(f"Exception caught\n{msg}")
elif mapped_args is not None: elif mapped_args is not None:
iterable = [[queue, [arg], kwargs] for arg in mapped_args] iterable = [[queue, [arg], kwargs] for arg in mapped_args]
ret = pool.map(func=target, iterable=iterable) ret = pool.map(func=target, iterable=iterable)
@ -161,12 +161,12 @@ def enter_mainloop(
if test in ["ERROR", "KEYBOARDINT"]: if test in ["ERROR", "KEYBOARDINT"]:
type_ = queue.get() type_ = queue.get()
trace = queue.get() trace = queue.get()
msg = "Caught {}, terminating workers\n".format(type_) msg = f"Caught {type_}, terminating workers\n"
msg += "TRACE: {}\n".format(trace) msg += f"TRACE: {trace}\n"
log.error(msg) log.error(msg)
pool.terminate() pool.terminate()
pool.join() pool.join()
raise SaltCloudSystemExit("Exception caught\n{}".format(msg)) raise SaltCloudSystemExit(f"Exception caught\n{msg}")
elif test in ["END"] or (callback and callback(test)): elif test in ["END"] or (callback and callback(test)):
pool.close() pool.close()
pool.join() pool.join()
@ -199,7 +199,7 @@ class CloudClient:
for name, profile in pillars.pop("profiles", {}).items(): for name, profile in pillars.pop("profiles", {}).items():
provider = profile["provider"].split(":")[0] provider = profile["provider"].split(":")[0]
driver = next(iter(self.opts["providers"][provider].keys())) driver = next(iter(self.opts["providers"][provider].keys()))
profile["provider"] = "{}:{}".format(provider, driver) profile["provider"] = f"{provider}:{driver}"
profile["profile"] = name profile["profile"] = name
self.opts["profiles"].update({name: profile}) self.opts["profiles"].update({name: profile})
self.opts["providers"][provider][driver]["profiles"].update( self.opts["providers"][provider][driver]["profiles"].update(
@ -392,7 +392,7 @@ class CloudClient:
mapper = salt.cloud.Map(self._opts_defaults()) mapper = salt.cloud.Map(self._opts_defaults())
providers = self.opts["providers"] providers = self.opts["providers"]
if provider in providers: if provider in providers:
provider += ":{}".format(next(iter(providers[provider].keys()))) provider += f":{next(iter(providers[provider].keys()))}"
else: else:
return False return False
if isinstance(names, str): if isinstance(names, str):
@ -433,7 +433,7 @@ class CloudClient:
mapper = salt.cloud.Map(self._opts_defaults()) mapper = salt.cloud.Map(self._opts_defaults())
providers = mapper.map_providers_parallel() providers = mapper.map_providers_parallel()
if provider in providers: if provider in providers:
provider += ":{}".format(next(iter(providers[provider].keys()))) provider += f":{next(iter(providers[provider].keys()))}"
else: else:
return False return False
if isinstance(names, str): if isinstance(names, str):
@ -518,7 +518,7 @@ class Cloud:
for alias, drivers in self.opts["providers"].items(): for alias, drivers in self.opts["providers"].items():
if len(drivers) > 1: if len(drivers) > 1:
for driver in drivers: for driver in drivers:
providers.add("{}:{}".format(alias, driver)) providers.add(f"{alias}:{driver}")
continue continue
providers.add(alias) providers.add(alias)
return providers return providers
@ -609,7 +609,7 @@ class Cloud:
pmap = {} pmap = {}
for alias, drivers in self.opts["providers"].items(): for alias, drivers in self.opts["providers"].items():
for driver, details in drivers.items(): for driver, details in drivers.items():
fun = "{}.{}".format(driver, query) fun = f"{driver}.{query}"
if fun not in self.clouds: if fun not in self.clouds:
log.error("Public cloud provider %s is not available", driver) log.error("Public cloud provider %s is not available", driver)
continue continue
@ -659,11 +659,11 @@ class Cloud:
# for minimum information, Otherwise still use query param. # for minimum information, Otherwise still use query param.
if ( if (
opts.get("selected_query_option") is None opts.get("selected_query_option") is None
and "{}.list_nodes_min".format(driver) in self.clouds and f"{driver}.list_nodes_min" in self.clouds
): ):
this_query = "list_nodes_min" this_query = "list_nodes_min"
fun = "{}.{}".format(driver, this_query) fun = f"{driver}.{this_query}"
if fun not in self.clouds: if fun not in self.clouds:
log.error("Public cloud provider %s is not available", driver) log.error("Public cloud provider %s is not available", driver)
continue continue
@ -771,7 +771,7 @@ class Cloud:
provider_by_driver[name][alias] = data provider_by_driver[name][alias] = data
for driver, providers_data in provider_by_driver.items(): for driver, providers_data in provider_by_driver.items():
fun = "{}.optimize_providers".format(driver) fun = f"{driver}.optimize_providers"
if fun not in self.clouds: if fun not in self.clouds:
log.debug("The '%s' cloud driver is unable to be optimized.", driver) log.debug("The '%s' cloud driver is unable to be optimized.", driver)
@ -801,7 +801,7 @@ class Cloud:
return data return data
for alias, driver in lookups: for alias, driver in lookups:
fun = "{}.avail_locations".format(driver) fun = f"{driver}.avail_locations"
if fun not in self.clouds: if fun not in self.clouds:
# The capability to gather locations is not supported by this # The capability to gather locations is not supported by this
# cloud module # cloud module
@ -842,7 +842,7 @@ class Cloud:
return data return data
for alias, driver in lookups: for alias, driver in lookups:
fun = "{}.avail_images".format(driver) fun = f"{driver}.avail_images"
if fun not in self.clouds: if fun not in self.clouds:
# The capability to gather images is not supported by this # The capability to gather images is not supported by this
# cloud module # cloud module
@ -882,7 +882,7 @@ class Cloud:
return data return data
for alias, driver in lookups: for alias, driver in lookups:
fun = "{}.avail_sizes".format(driver) fun = f"{driver}.avail_sizes"
if fun not in self.clouds: if fun not in self.clouds:
# The capability to gather sizes is not supported by this # The capability to gather sizes is not supported by this
# cloud module # cloud module
@ -1017,7 +1017,7 @@ class Cloud:
else: else:
log.info("Destroying in non-parallel mode.") log.info("Destroying in non-parallel mode.")
for alias, driver, name in vms_to_destroy: for alias, driver, name in vms_to_destroy:
fun = "{}.destroy".format(driver) fun = f"{driver}.destroy"
with salt.utils.context.func_globals_inject( with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver]) self.clouds[fun], __active_provider_name__=":".join([alias, driver])
): ):
@ -1050,7 +1050,7 @@ class Cloud:
key_file = os.path.join( key_file = os.path.join(
self.opts["pki_dir"], "minions", minion_dict.get("id", name) self.opts["pki_dir"], "minions", minion_dict.get("id", name)
) )
globbed_key_file = glob.glob("{}.*".format(key_file)) globbed_key_file = glob.glob(f"{key_file}.*")
if not os.path.isfile(key_file) and not globbed_key_file: if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed # There's no such key file!? It might have been renamed
@ -1090,25 +1090,25 @@ class Cloud:
) )
while True: while True:
for idx, filename in enumerate(globbed_key_file): for idx, filename in enumerate(globbed_key_file):
print(" {}: {}".format(idx, os.path.basename(filename))) print(f" {idx}: {os.path.basename(filename)}")
selection = input("Which minion key should be deleted(number)? ") selection = input("Which minion key should be deleted(number)? ")
try: try:
selection = int(selection) selection = int(selection)
except ValueError: except ValueError:
print("'{}' is not a valid selection.".format(selection)) print(f"'{selection}' is not a valid selection.")
try: try:
filename = os.path.basename(globbed_key_file.pop(selection)) filename = os.path.basename(globbed_key_file.pop(selection))
except Exception: # pylint: disable=broad-except except Exception: # pylint: disable=broad-except
continue continue
delete = input("Delete '{}'? [Y/n]? ".format(filename)) delete = input(f"Delete '{filename}'? [Y/n]? ")
if delete == "" or delete.lower().startswith("y"): if delete == "" or delete.lower().startswith("y"):
salt.utils.cloud.remove_key(self.opts["pki_dir"], filename) salt.utils.cloud.remove_key(self.opts["pki_dir"], filename)
print("Deleted '{}'".format(filename)) print(f"Deleted '{filename}'")
break break
print("Did not delete '{}'".format(filename)) print(f"Did not delete '{filename}'")
break break
if names and not processed: if names and not processed:
@ -1138,7 +1138,7 @@ class Cloud:
if node in names: if node in names:
acts[prov].append(node) acts[prov].append(node)
for prov, names_ in acts.items(): for prov, names_ in acts.items():
fun = "{}.reboot".format(prov) fun = f"{prov}.reboot"
for name in names_: for name in names_:
ret.append({name: self.clouds[fun](name)}) ret.append({name: self.clouds[fun](name)})
@ -1155,7 +1155,7 @@ class Cloud:
) )
alias, driver = vm_["provider"].split(":") alias, driver = vm_["provider"].split(":")
fun = "{}.create".format(driver) fun = f"{driver}.create"
if fun not in self.clouds: if fun not in self.clouds:
log.error( log.error(
"Creating '%s' using '%s' as the provider " "Creating '%s' using '%s' as the provider "
@ -1220,7 +1220,7 @@ class Cloud:
try: try:
alias, driver = vm_["provider"].split(":") alias, driver = vm_["provider"].split(":")
func = "{}.create".format(driver) func = f"{driver}.create"
with salt.utils.context.func_globals_inject( with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver]) self.clouds[fun], __active_provider_name__=":".join([alias, driver])
): ):
@ -1357,7 +1357,7 @@ class Cloud:
handle them handle them
""" """
if profile not in self.opts["profiles"]: if profile not in self.opts["profiles"]:
msg = "Profile {} is not defined".format(profile) msg = f"Profile {profile} is not defined"
log.error(msg) log.error(msg)
return {"Error": msg} return {"Error": msg}
@ -1396,7 +1396,7 @@ class Cloud:
if name in vms: if name in vms:
prov = vms[name]["provider"] prov = vms[name]["provider"]
driv = vms[name]["driver"] driv = vms[name]["driver"]
msg = "{} already exists under {}:{}".format(name, prov, driv) msg = f"{name} already exists under {prov}:{driv}"
log.error(msg) log.error(msg)
ret[name] = {"Error": msg} ret[name] = {"Error": msg}
continue continue
@ -1542,14 +1542,12 @@ class Cloud:
raise SaltCloudSystemExit( raise SaltCloudSystemExit(
"More than one results matched '{}'. Please specify one of: {}".format( "More than one results matched '{}'. Please specify one of: {}".format(
prov, prov,
", ".join( ", ".join([f"{alias}:{driver}" for (alias, driver) in matches]),
["{}:{}".format(alias, driver) for (alias, driver) in matches]
),
) )
) )
alias, driver = matches.pop() alias, driver = matches.pop()
fun = "{}.{}".format(driver, func) fun = f"{driver}.{func}"
if fun not in self.clouds: if fun not in self.clouds:
raise SaltCloudSystemExit( raise SaltCloudSystemExit(
"The '{}' cloud provider alias, for the '{}' driver, does " "The '{}' cloud provider alias, for the '{}' driver, does "
@ -1573,7 +1571,7 @@ class Cloud:
""" """
for alias, drivers in self.opts["providers"].copy().items(): for alias, drivers in self.opts["providers"].copy().items():
for driver in drivers.copy(): for driver in drivers.copy():
fun = "{}.get_configured_provider".format(driver) fun = f"{driver}.get_configured_provider"
if fun not in self.clouds: if fun not in self.clouds:
# Mis-configured provider that got removed? # Mis-configured provider that got removed?
log.warning( log.warning(
@ -1898,7 +1896,7 @@ class Map(Cloud):
"The required profile, '{}', defined in the map " "The required profile, '{}', defined in the map "
"does not exist. The defined nodes, {}, will not " "does not exist. The defined nodes, {}, will not "
"be created.".format( "be created.".format(
profile_name, ", ".join("'{}'".format(node) for node in nodes) profile_name, ", ".join(f"'{node}'" for node in nodes)
) )
) )
log.error(msg) log.error(msg)
@ -1931,7 +1929,7 @@ class Map(Cloud):
# Update profile data with the map overrides # Update profile data with the map overrides
for setting in ("grains", "master", "minion", "volumes", "requires"): for setting in ("grains", "master", "minion", "volumes", "requires"):
deprecated = "map_{}".format(setting) deprecated = f"map_{setting}"
if deprecated in overrides: if deprecated in overrides:
log.warning( log.warning(
"The use of '%s' on the '%s' mapping has " "The use of '%s' on the '%s' mapping has "

View file

@ -412,9 +412,7 @@ def get_image(vm_):
if vm_image and str(vm_image) in images: if vm_image and str(vm_image) in images:
return images[vm_image]["ImageId"] return images[vm_image]["ImageId"]
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified image, '{vm_image}', could not be found.")
"The specified image, '{}', could not be found.".format(vm_image)
)
def get_securitygroup(vm_): def get_securitygroup(vm_):
@ -432,7 +430,7 @@ def get_securitygroup(vm_):
if securitygroup and str(securitygroup) in sgs: if securitygroup and str(securitygroup) in sgs:
return sgs[securitygroup]["SecurityGroupId"] return sgs[securitygroup]["SecurityGroupId"]
raise SaltCloudNotFound( raise SaltCloudNotFound(
"The specified security group, '{}', could not be found.".format(securitygroup) f"The specified security group, '{securitygroup}', could not be found."
) )
@ -451,9 +449,7 @@ def get_size(vm_):
if vm_size and str(vm_size) in sizes: if vm_size and str(vm_size) in sizes:
return sizes[vm_size]["InstanceTypeId"] return sizes[vm_size]["InstanceTypeId"]
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified size, '{vm_size}', could not be found.")
"The specified size, '{}', could not be found.".format(vm_size)
)
def __get_location(vm_): def __get_location(vm_):
@ -471,7 +467,7 @@ def __get_location(vm_):
if vm_location and str(vm_location) in locations: if vm_location and str(vm_location) in locations:
return locations[vm_location]["RegionId"] return locations[vm_location]["RegionId"]
raise SaltCloudNotFound( raise SaltCloudNotFound(
"The specified location, '{}', could not be found.".format(vm_location) f"The specified location, '{vm_location}', could not be found."
) )
@ -920,7 +916,7 @@ def _get_node(name):
) )
# Just a little delay between attempts... # Just a little delay between attempts...
time.sleep(0.5) time.sleep(0.5)
raise SaltCloudNotFound("The specified instance {} not found".format(name)) raise SaltCloudNotFound(f"The specified instance {name} not found")
def show_image(kwargs, call=None): def show_image(kwargs, call=None):
@ -982,7 +978,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -1001,7 +997,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],

View file

@ -836,7 +836,7 @@ def create_network_interface(call=None, kwargs=None):
NetworkInterfaceIPConfiguration( NetworkInterfaceIPConfiguration(
name="{}-ip".format(kwargs["iface_name"]), name="{}-ip".format(kwargs["iface_name"]),
subnet=subnet_obj, subnet=subnet_obj,
**ip_kwargs **ip_kwargs,
) )
] ]
break break
@ -999,7 +999,7 @@ def request_instance(vm_, kwargs=None):
if not win_installer and ssh_publickeyfile_contents is not None: if not win_installer and ssh_publickeyfile_contents is not None:
sshpublickey = SshPublicKey( sshpublickey = SshPublicKey(
key_data=ssh_publickeyfile_contents, key_data=ssh_publickeyfile_contents,
path="/home/{}/.ssh/authorized_keys".format(vm_username), path=f"/home/{vm_username}/.ssh/authorized_keys",
) )
sshconfiguration = SshConfiguration( sshconfiguration = SshConfiguration(
public_keys=[sshpublickey], public_keys=[sshpublickey],
@ -1620,7 +1620,7 @@ def _get_cloud_environment():
cloud_env = getattr(cloud_env_module, cloud_environment or "AZURE_PUBLIC_CLOUD") cloud_env = getattr(cloud_env_module, cloud_environment or "AZURE_PUBLIC_CLOUD")
except (AttributeError, ImportError): except (AttributeError, ImportError):
raise SaltCloudSystemExit( raise SaltCloudSystemExit(
"The azure {} cloud environment is not available.".format(cloud_environment) f"The azure {cloud_environment} cloud environment is not available."
) )
return cloud_env return cloud_env
@ -1911,7 +1911,7 @@ def create_or_update_vmextension(
except CloudError as exc: except CloudError as exc:
salt.utils.azurearm.log_cloud_error( salt.utils.azurearm.log_cloud_error(
"compute", "compute",
"Error attempting to create the VM extension: {}".format(exc.message), f"Error attempting to create the VM extension: {exc.message}",
) )
ret = {"error": exc.message} ret = {"error": exc.message}
@ -1959,9 +1959,9 @@ def stop(name, call=None):
ret = {"error": exc.message} ret = {"error": exc.message}
if not ret: if not ret:
salt.utils.azurearm.log_cloud_error( salt.utils.azurearm.log_cloud_error(
"compute", "Unable to find virtual machine with name: {}".format(name) "compute", f"Unable to find virtual machine with name: {name}"
) )
ret = {"error": "Unable to find virtual machine with name: {}".format(name)} ret = {"error": f"Unable to find virtual machine with name: {name}"}
else: else:
try: try:
instance = compconn.virtual_machines.deallocate( instance = compconn.virtual_machines.deallocate(
@ -1972,7 +1972,7 @@ def stop(name, call=None):
ret = vm_result.as_dict() ret = vm_result.as_dict()
except CloudError as exc: except CloudError as exc:
salt.utils.azurearm.log_cloud_error( salt.utils.azurearm.log_cloud_error(
"compute", "Error attempting to stop {}: {}".format(name, exc.message) "compute", f"Error attempting to stop {name}: {exc.message}"
) )
ret = {"error": exc.message} ret = {"error": exc.message}
@ -2022,9 +2022,9 @@ def start(name, call=None):
ret = {"error": exc.message} ret = {"error": exc.message}
if not ret: if not ret:
salt.utils.azurearm.log_cloud_error( salt.utils.azurearm.log_cloud_error(
"compute", "Unable to find virtual machine with name: {}".format(name) "compute", f"Unable to find virtual machine with name: {name}"
) )
ret = {"error": "Unable to find virtual machine with name: {}".format(name)} ret = {"error": f"Unable to find virtual machine with name: {name}"}
else: else:
try: try:
instance = compconn.virtual_machines.start( instance = compconn.virtual_machines.start(
@ -2036,7 +2036,7 @@ def start(name, call=None):
except CloudError as exc: except CloudError as exc:
salt.utils.azurearm.log_cloud_error( salt.utils.azurearm.log_cloud_error(
"compute", "compute",
"Error attempting to start {}: {}".format(name, exc.message), f"Error attempting to start {name}: {exc.message}",
) )
ret = {"error": exc.message} ret = {"error": exc.message}

View file

@ -421,7 +421,7 @@ def create(vm_):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"waiting for ssh", "waiting for ssh",
"salt/cloud/{}/waiting_for_ssh".format(name), f"salt/cloud/{name}/waiting_for_ssh",
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
args={"ip_address": vm_["ssh_host"]}, args={"ip_address": vm_["ssh_host"]},
transport=__opts__["transport"], transport=__opts__["transport"],

View file

@ -474,7 +474,7 @@ def destroy(name, conn=None, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
args={"name": name}, args={"name": name},
) )
@ -499,7 +499,7 @@ def destroy(name, conn=None, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"detaching volume", "detaching volume",
"salt/cloud/{}/detaching".format(volume.name), f"salt/cloud/{volume.name}/detaching",
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
args={"name": volume.name}, args={"name": volume.name},
) )
@ -510,7 +510,7 @@ def destroy(name, conn=None, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"detached volume", "detached volume",
"salt/cloud/{}/detached".format(volume.name), f"salt/cloud/{volume.name}/detached",
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
args={"name": volume.name}, args={"name": volume.name},
) )
@ -519,7 +519,7 @@ def destroy(name, conn=None, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying volume", "destroying volume",
"salt/cloud/{}/destroying".format(volume.name), f"salt/cloud/{volume.name}/destroying",
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
args={"name": volume.name}, args={"name": volume.name},
) )
@ -530,7 +530,7 @@ def destroy(name, conn=None, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed volume", "destroyed volume",
"salt/cloud/{}/destroyed".format(volume.name), f"salt/cloud/{volume.name}/destroyed",
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
args={"name": volume.name}, args={"name": volume.name},
) )
@ -545,7 +545,7 @@ def destroy(name, conn=None, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
args={"name": name}, args={"name": name},
) )

View file

@ -223,9 +223,7 @@ def get_image(vm_):
if images[image]["slug"] is not None: if images[image]["slug"] is not None:
return images[image]["slug"] return images[image]["slug"]
return int(images[image]["id"]) return int(images[image]["id"])
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified image, '{vm_image}', could not be found.")
"The specified image, '{}', could not be found.".format(vm_image)
)
def get_size(vm_): def get_size(vm_):
@ -239,9 +237,7 @@ def get_size(vm_):
for size in sizes: for size in sizes:
if vm_size.lower() == sizes[size]["slug"]: if vm_size.lower() == sizes[size]["slug"]:
return sizes[size]["slug"] return sizes[size]["slug"]
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified size, '{vm_size}', could not be found.")
"The specified size, '{}', could not be found.".format(vm_size)
)
def get_location(vm_): def get_location(vm_):
@ -257,7 +253,7 @@ def get_location(vm_):
if vm_location in (locations[location]["name"], locations[location]["slug"]): if vm_location in (locations[location]["name"], locations[location]["slug"]):
return locations[location]["slug"] return locations[location]["slug"]
raise SaltCloudNotFound( raise SaltCloudNotFound(
"The specified location, '{}', could not be found.".format(vm_location) f"The specified location, '{vm_location}', could not be found."
) )
@ -333,7 +329,7 @@ def create(vm_):
if key_filename is not None and not os.path.isfile(key_filename): if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError( raise SaltCloudConfigError(
"The defined key_filename '{}' does not exist".format(key_filename) f"The defined key_filename '{key_filename}' does not exist"
) )
if not __opts__.get("ssh_agent", False) and key_filename is None: if not __opts__.get("ssh_agent", False) and key_filename is None:
@ -616,10 +612,10 @@ def query(
) )
) )
path = "{}/{}/".format(base_path, method) path = f"{base_path}/{method}/"
if droplet_id: if droplet_id:
path += "{}/".format(droplet_id) path += f"{droplet_id}/"
if command: if command:
path += command path += command
@ -875,7 +871,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -912,7 +908,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -987,7 +983,7 @@ def destroy_dns_records(fqdn):
ret = query( ret = query(
method="domains", method="domains",
droplet_id=domain, droplet_id=domain,
command="records/{}".format(id_), command=f"records/{id_}",
http_method="delete", http_method="delete",
) )
except SaltCloudSystemExit: except SaltCloudSystemExit:

View file

@ -296,11 +296,9 @@ def query(
location = get_location() location = get_location()
if not requesturl: if not requesturl:
endpoint = provider.get( endpoint = provider.get("endpoint", f"ec2.{location}.{service_url}")
"endpoint", "ec2.{}.{}".format(location, service_url)
)
requesturl = "https://{}/".format(endpoint) requesturl = f"https://{endpoint}/"
endpoint = urllib.parse.urlparse(requesturl).netloc endpoint = urllib.parse.urlparse(requesturl).netloc
endpoint_path = urllib.parse.urlparse(requesturl).path endpoint_path = urllib.parse.urlparse(requesturl).path
else: else:
@ -1480,7 +1478,7 @@ def _create_eni_if_necessary(interface, vm_):
eni_desc = result[1] eni_desc = result[1]
if not eni_desc or not eni_desc.get("networkInterfaceId"): if not eni_desc or not eni_desc.get("networkInterfaceId"):
raise SaltCloudException("Failed to create interface: {}".format(result)) raise SaltCloudException(f"Failed to create interface: {result}")
eni_id = eni_desc.get("networkInterfaceId") eni_id = eni_desc.get("networkInterfaceId")
log.debug("Created network interface %s inst %s", eni_id, interface["DeviceIndex"]) log.debug("Created network interface %s inst %s", eni_id, interface["DeviceIndex"])
@ -1751,11 +1749,11 @@ def _param_from_config(key, data):
if isinstance(data, dict): if isinstance(data, dict):
for k, v in data.items(): for k, v in data.items():
param.update(_param_from_config("{}.{}".format(key, k), v)) param.update(_param_from_config(f"{key}.{k}", v))
elif isinstance(data, list) or isinstance(data, tuple): elif isinstance(data, list) or isinstance(data, tuple):
for idx, conf_item in enumerate(data): for idx, conf_item in enumerate(data):
prefix = "{}.{}".format(key, idx) prefix = f"{key}.{idx}"
param.update(_param_from_config(prefix, conf_item)) param.update(_param_from_config(prefix, conf_item))
else: else:
@ -1870,7 +1868,7 @@ def request_instance(vm_=None, call=None):
params[spot_prefix + "SecurityGroup.1"] = ex_securitygroup params[spot_prefix + "SecurityGroup.1"] = ex_securitygroup
else: else:
for counter, sg_ in enumerate(ex_securitygroup): for counter, sg_ in enumerate(ex_securitygroup):
params[spot_prefix + "SecurityGroup.{}".format(counter)] = sg_ params[spot_prefix + f"SecurityGroup.{counter}"] = sg_
ex_iam_profile = iam_profile(vm_) ex_iam_profile = iam_profile(vm_)
if ex_iam_profile: if ex_iam_profile:
@ -1905,7 +1903,7 @@ def request_instance(vm_=None, call=None):
params[spot_prefix + "SecurityGroupId.1"] = ex_securitygroupid params[spot_prefix + "SecurityGroupId.1"] = ex_securitygroupid
else: else:
for counter, sg_ in enumerate(ex_securitygroupid): for counter, sg_ in enumerate(ex_securitygroupid):
params[spot_prefix + "SecurityGroupId.{}".format(counter)] = sg_ params[spot_prefix + f"SecurityGroupId.{counter}"] = sg_
placementgroup_ = get_placementgroup(vm_) placementgroup_ = get_placementgroup(vm_)
if placementgroup_ is not None: if placementgroup_ is not None:
@ -2044,9 +2042,9 @@ def request_instance(vm_=None, call=None):
else: else:
dev_index = len(dev_list) dev_index = len(dev_list)
# Add the device name in since it wasn't already there # Add the device name in since it wasn't already there
params[ params[f"{spot_prefix}BlockDeviceMapping.{dev_index}.DeviceName"] = (
"{}BlockDeviceMapping.{}.DeviceName".format(spot_prefix, dev_index) rd_name
] = rd_name )
# Set the termination value # Set the termination value
termination_key = "{}BlockDeviceMapping.{}.Ebs.DeleteOnTermination".format( termination_key = "{}BlockDeviceMapping.{}.Ebs.DeleteOnTermination".format(
@ -2509,7 +2507,7 @@ def wait_for_instance(
for line in comps[0].splitlines(): for line in comps[0].splitlines():
if not line: if not line:
continue continue
keys += "\n{} {}".format(ip_address, line) keys += f"\n{ip_address} {line}"
with salt.utils.files.fopen(known_hosts_file, "a") as fp_: with salt.utils.files.fopen(known_hosts_file, "a") as fp_:
fp_.write(salt.utils.stringutils.to_str(keys)) fp_.write(salt.utils.stringutils.to_str(keys))
@ -2563,7 +2561,7 @@ def _validate_key_path_and_mode(key_filename):
if not os.path.exists(key_filename): if not os.path.exists(key_filename):
raise SaltCloudSystemExit( raise SaltCloudSystemExit(
"The EC2 key file '{}' does not exist.\n".format(key_filename) f"The EC2 key file '{key_filename}' does not exist.\n"
) )
key_mode = stat.S_IMODE(os.stat(key_filename).st_mode) key_mode = stat.S_IMODE(os.stat(key_filename).st_mode)
@ -2752,7 +2750,7 @@ def create(vm_=None, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"setting tags", "setting tags",
"salt/cloud/spot_request_{}/tagging".format(sir_id), f"salt/cloud/spot_request_{sir_id}/tagging",
args={"tags": spot_request_tags}, args={"tags": spot_request_tags},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -2924,7 +2922,7 @@ def create(vm_=None, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"setting tags", "setting tags",
"salt/cloud/block_volume_{}/tagging".format(str(volid)), f"salt/cloud/block_volume_{str(volid)}/tagging",
args={"tags": tags}, args={"tags": tags},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -3054,7 +3052,7 @@ def stop(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"stopping instance", "stopping instance",
"salt/cloud/{}/stopping".format(name), f"salt/cloud/{name}/stopping",
args={"name": name, "instance_id": instance_id}, args={"name": name, "instance_id": instance_id},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -3088,7 +3086,7 @@ def start(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"starting instance", "starting instance",
"salt/cloud/{}/starting".format(name), f"salt/cloud/{name}/starting",
args={"name": name, "instance_id": instance_id}, args={"name": name, "instance_id": instance_id},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -3163,8 +3161,8 @@ def set_tags(
tags = kwargs tags = kwargs
for idx, (tag_k, tag_v) in enumerate(tags.items()): for idx, (tag_k, tag_v) in enumerate(tags.items()):
params["Tag.{}.Key".format(idx)] = tag_k params[f"Tag.{idx}.Key"] = tag_k
params["Tag.{}.Value".format(idx)] = tag_v params[f"Tag.{idx}.Value"] = tag_v
attempts = 0 attempts = 0
while attempts < aws.AWS_MAX_RETRIES: while attempts < aws.AWS_MAX_RETRIES:
@ -3210,7 +3208,7 @@ def set_tags(
return settags return settags
raise SaltCloudSystemExit("Failed to set tags on {}!".format(name)) raise SaltCloudSystemExit(f"Failed to set tags on {name}!")
def get_tags( def get_tags(
@ -3292,7 +3290,7 @@ def del_tags(
params = {"Action": "DeleteTags", "ResourceId.1": instance_id} params = {"Action": "DeleteTags", "ResourceId.1": instance_id}
for idx, tag in enumerate(kwargs["tags"].split(",")): for idx, tag in enumerate(kwargs["tags"].split(",")):
params["Tag.{}.Key".format(idx)] = tag params[f"Tag.{idx}.Key"] = tag
aws.query( aws.query(
params, params,
@ -3356,7 +3354,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name, "instance_id": instance_id}, args={"name": name, "instance_id": instance_id},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -3377,7 +3375,7 @@ def destroy(name, call=None):
"rename_on_destroy", get_configured_provider(), __opts__, search_global=False "rename_on_destroy", get_configured_provider(), __opts__, search_global=False
) )
if rename_on_destroy is not False: if rename_on_destroy is not False:
newname = "{}-DEL{}".format(name, uuid.uuid4().hex) newname = f"{name}-DEL{uuid.uuid4().hex}"
rename(name, kwargs={"newname": newname}, call="action") rename(name, kwargs={"newname": newname}, call="action")
log.info( log.info(
"Machine will be identified as %s until it has been cleaned up.", newname "Machine will be identified as %s until it has been cleaned up.", newname
@ -3410,7 +3408,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name, "instance_id": instance_id}, args={"name": name, "instance_id": instance_id},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -4056,8 +4054,8 @@ def _toggle_delvol(
if volume_id is not None and volume_id != item["ebs"]["volumeId"]: if volume_id is not None and volume_id != item["ebs"]["volumeId"]:
continue continue
params["BlockDeviceMapping.{}.DeviceName".format(idx)] = device_name params[f"BlockDeviceMapping.{idx}.DeviceName"] = device_name
params["BlockDeviceMapping.{}.Ebs.DeleteOnTermination".format(idx)] = value params[f"BlockDeviceMapping.{idx}.Ebs.DeleteOnTermination"] = value
aws.query( aws.query(
params, params,
@ -4477,7 +4475,7 @@ def describe_volumes(kwargs=None, call=None):
if "volume_id" in kwargs: if "volume_id" in kwargs:
volume_id = kwargs["volume_id"].split(",") volume_id = kwargs["volume_id"].split(",")
for volume_index, volume_id in enumerate(volume_id): for volume_index, volume_id in enumerate(volume_id):
params["VolumeId.{}".format(volume_index)] = volume_id params[f"VolumeId.{volume_index}"] = volume_id
log.debug(params) log.debug(params)
@ -4796,17 +4794,17 @@ def describe_snapshots(kwargs=None, call=None):
if "snapshot_id" in kwargs: if "snapshot_id" in kwargs:
snapshot_ids = kwargs["snapshot_id"].split(",") snapshot_ids = kwargs["snapshot_id"].split(",")
for snapshot_index, snapshot_id in enumerate(snapshot_ids): for snapshot_index, snapshot_id in enumerate(snapshot_ids):
params["SnapshotId.{}".format(snapshot_index)] = snapshot_id params[f"SnapshotId.{snapshot_index}"] = snapshot_id
if "owner" in kwargs: if "owner" in kwargs:
owners = kwargs["owner"].split(",") owners = kwargs["owner"].split(",")
for owner_index, owner in enumerate(owners): for owner_index, owner in enumerate(owners):
params["Owner.{}".format(owner_index)] = owner params[f"Owner.{owner_index}"] = owner
if "restorable_by" in kwargs: if "restorable_by" in kwargs:
restorable_bys = kwargs["restorable_by"].split(",") restorable_bys = kwargs["restorable_by"].split(",")
for restorable_by_index, restorable_by in enumerate(restorable_bys): for restorable_by_index, restorable_by in enumerate(restorable_bys):
params["RestorableBy.{}".format(restorable_by_index)] = restorable_by params[f"RestorableBy.{restorable_by_index}"] = restorable_by
log.debug(params) log.debug(params)
@ -5013,11 +5011,11 @@ def _parse_pricing(url, name):
"storageGiB", "storageGiB",
"USD", "USD",
): ):
price_js = price_js.replace(keyword, '"{}"'.format(keyword)) price_js = price_js.replace(keyword, f'"{keyword}"')
for keyword in ("region", "price", "size"): for keyword in ("region", "price", "size"):
price_js = price_js.replace(keyword, '"{}"'.format(keyword)) price_js = price_js.replace(keyword, f'"{keyword}"')
price_js = price_js.replace('"{}"s'.format(keyword), '"{}s"'.format(keyword)) price_js = price_js.replace(f'"{keyword}"s', f'"{keyword}s"')
price_js = price_js.replace('""', '"') price_js = price_js.replace('""', '"')
@ -5031,7 +5029,7 @@ def _parse_pricing(url, name):
sizes[size["size"]] = size sizes[size["size"]] = size
regions[region["region"]] = sizes regions[region["region"]] = sizes
outfile = os.path.join(__opts__["cachedir"], "ec2-pricing-{}.p".format(name)) outfile = os.path.join(__opts__["cachedir"], f"ec2-pricing-{name}.p")
with salt.utils.files.fopen(outfile, "w") as fho: with salt.utils.files.fopen(outfile, "w") as fho:
salt.utils.msgpack.dump(regions, fho) salt.utils.msgpack.dump(regions, fho)
@ -5093,7 +5091,7 @@ def show_pricing(kwargs=None, call=None):
else: else:
name = "linux" name = "linux"
pricefile = os.path.join(__opts__["cachedir"], "ec2-pricing-{}.p".format(name)) pricefile = os.path.join(__opts__["cachedir"], f"ec2-pricing-{name}.p")
if not os.path.isfile(pricefile): if not os.path.isfile(pricefile):
update_pricing({"type": name}, "function") update_pricing({"type": name}, "function")

View file

@ -186,7 +186,7 @@ def get_conn():
"service_account_private_key", provider, __opts__ "service_account_private_key", provider, __opts__
) )
gce = driver(email, private_key, project=project) gce = driver(email, private_key, project=project)
gce.connection.user_agent_append("{}/{}".format(_UA_PRODUCT, _UA_VERSION)) gce.connection.user_agent_append(f"{_UA_PRODUCT}/{_UA_VERSION}")
return gce return gce
@ -544,7 +544,7 @@ def _parse_allow(allow):
pairs = p.split(":") pairs = p.split(":")
if pairs[0].lower() not in ["tcp", "udp", "icmp"]: if pairs[0].lower() not in ["tcp", "udp", "icmp"]:
raise SaltCloudSystemExit( raise SaltCloudSystemExit(
"Unsupported protocol {}. Must be tcp, udp, or icmp.".format(pairs[0]) f"Unsupported protocol {pairs[0]}. Must be tcp, udp, or icmp."
) )
if len(pairs) == 1 or pairs[0].lower() == "icmp": if len(pairs) == 1 or pairs[0].lower() == "icmp":
seen_protos[pairs[0]] = [] seen_protos[pairs[0]] = []
@ -2014,7 +2014,7 @@ def reboot(vm_name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"reboot instance", "reboot instance",
"salt/cloud/{}/rebooting".format(vm_name), f"salt/cloud/{vm_name}/rebooting",
args={"name": vm_name}, args={"name": vm_name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -2025,7 +2025,7 @@ def reboot(vm_name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"reboot instance", "reboot instance",
"salt/cloud/{}/rebooted".format(vm_name), f"salt/cloud/{vm_name}/rebooted",
args={"name": vm_name}, args={"name": vm_name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -2056,7 +2056,7 @@ def start(vm_name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"start instance", "start instance",
"salt/cloud/{}/starting".format(vm_name), f"salt/cloud/{vm_name}/starting",
args={"name": vm_name}, args={"name": vm_name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -2067,7 +2067,7 @@ def start(vm_name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"start instance", "start instance",
"salt/cloud/{}/started".format(vm_name), f"salt/cloud/{vm_name}/started",
args={"name": vm_name}, args={"name": vm_name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -2096,7 +2096,7 @@ def stop(vm_name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"stop instance", "stop instance",
"salt/cloud/{}/stopping".format(vm_name), f"salt/cloud/{vm_name}/stopping",
args={"name": vm_name}, args={"name": vm_name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -2107,7 +2107,7 @@ def stop(vm_name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"stop instance", "stop instance",
"salt/cloud/{}/stopped".format(vm_name), f"salt/cloud/{vm_name}/stopped",
args={"name": vm_name}, args={"name": vm_name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -2145,12 +2145,12 @@ def destroy(vm_name, call=None):
exc, exc,
exc_info_on_loglevel=logging.DEBUG, exc_info_on_loglevel=logging.DEBUG,
) )
raise SaltCloudSystemExit("Could not find instance {}.".format(vm_name)) raise SaltCloudSystemExit(f"Could not find instance {vm_name}.")
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"delete instance", "delete instance",
"salt/cloud/{}/deleting".format(vm_name), f"salt/cloud/{vm_name}/deleting",
args={"name": vm_name}, args={"name": vm_name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -2186,11 +2186,11 @@ def destroy(vm_name, call=None):
exc, exc,
exc_info_on_loglevel=logging.DEBUG, exc_info_on_loglevel=logging.DEBUG,
) )
raise SaltCloudSystemExit("Could not destroy instance {}.".format(vm_name)) raise SaltCloudSystemExit(f"Could not destroy instance {vm_name}.")
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"delete instance", "delete instance",
"salt/cloud/{}/deleted".format(vm_name), f"salt/cloud/{vm_name}/deleted",
args={"name": vm_name}, args={"name": vm_name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -2279,7 +2279,7 @@ def create_attach_volumes(name, kwargs, call=None):
letter = ord("a") - 1 letter = ord("a") - 1
for idx, volume in enumerate(volumes): for idx, volume in enumerate(volumes):
volume_name = "{}-sd{}".format(name, chr(letter + 2 + idx)) volume_name = f"{name}-sd{chr(letter + 2 + idx)}"
volume_dict = { volume_dict = {
"disk_name": volume_name, "disk_name": volume_name,

View file

@ -417,7 +417,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -428,7 +428,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -536,7 +536,7 @@ def _query(
path += action path += action
if command: if command:
path += "/{}".format(command) path += f"/{command}"
log.debug("GoGrid URL: %s", path) log.debug("GoGrid URL: %s", path)

View file

@ -470,22 +470,22 @@ def start(name, call=None, wait=True):
client = _connect_client() client = _connect_client()
server = client.servers.get_by_name(name) server = client.servers.get_by_name(name)
if server is None: if server is None:
return "Instance {} doesn't exist.".format(name) return f"Instance {name} doesn't exist."
server.power_on() server.power_on()
if wait and not wait_until(name, "running"): if wait and not wait_until(name, "running"):
return "Instance {} doesn't start.".format(name) return f"Instance {name} doesn't start."
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"started instance", "started instance",
"salt/cloud/{}/started".format(name), f"salt/cloud/{name}/started",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
) )
return {"Started": "{} was started.".format(name)} return {"Started": f"{name} was started."}
def stop(name, call=None, wait=True): def stop(name, call=None, wait=True):
@ -504,22 +504,22 @@ def stop(name, call=None, wait=True):
client = _connect_client() client = _connect_client()
server = client.servers.get_by_name(name) server = client.servers.get_by_name(name)
if server is None: if server is None:
return "Instance {} doesn't exist.".format(name) return f"Instance {name} doesn't exist."
server.power_off() server.power_off()
if wait and not wait_until(name, "off"): if wait and not wait_until(name, "off"):
return "Instance {} doesn't stop.".format(name) return f"Instance {name} doesn't stop."
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"stopped instance", "stopped instance",
"salt/cloud/{}/stopped".format(name), f"salt/cloud/{name}/stopped",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
) )
return {"Stopped": "{} was stopped.".format(name)} return {"Stopped": f"{name} was stopped."}
def reboot(name, call=None, wait=True): def reboot(name, call=None, wait=True):
@ -540,14 +540,14 @@ def reboot(name, call=None, wait=True):
client = _connect_client() client = _connect_client()
server = client.servers.get_by_name(name) server = client.servers.get_by_name(name)
if server is None: if server is None:
return "Instance {} doesn't exist.".format(name) return f"Instance {name} doesn't exist."
server.reboot() server.reboot()
if wait and not wait_until(name, "running"): if wait and not wait_until(name, "running"):
return "Instance {} doesn't start.".format(name) return f"Instance {name} doesn't start."
return {"Rebooted": "{} was rebooted.".format(name)} return {"Rebooted": f"{name} was rebooted."}
def destroy(name, call=None): def destroy(name, call=None):
@ -568,12 +568,12 @@ def destroy(name, call=None):
client = _connect_client() client = _connect_client()
server = client.servers.get_by_name(name) server = client.servers.get_by_name(name)
if server is None: if server is None:
return "Instance {} doesn't exist.".format(name) return f"Instance {name} doesn't exist."
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -583,14 +583,14 @@ def destroy(name, call=None):
if node["state"] == "running": if node["state"] == "running":
stop(name, call="action", wait=False) stop(name, call="action", wait=False)
if not wait_until(name, "off"): if not wait_until(name, "off"):
return {"Error": "Unable to destroy {}, command timed out".format(name)} return {"Error": f"Unable to destroy {name}, command timed out"}
server.delete() server.delete()
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -603,7 +603,7 @@ def destroy(name, call=None):
__opts__, __opts__,
) )
return {"Destroyed": "{} was destroyed.".format(name)} return {"Destroyed": f"{name} was destroyed."}
def resize(name, kwargs, call=None): def resize(name, kwargs, call=None):
@ -624,7 +624,7 @@ def resize(name, kwargs, call=None):
client = _connect_client() client = _connect_client()
server = client.servers.get_by_name(name) server = client.servers.get_by_name(name)
if server is None: if server is None:
return "Instance {} doesn't exist.".format(name) return f"Instance {name} doesn't exist."
# Check the configuration # Check the configuration
size = kwargs.get("size", None) size = kwargs.get("size", None)
@ -638,7 +638,7 @@ def resize(name, kwargs, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"resizing instance", "resizing instance",
"salt/cloud/{}/resizing".format(name), f"salt/cloud/{name}/resizing",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -648,17 +648,17 @@ def resize(name, kwargs, call=None):
if node["state"] == "running": if node["state"] == "running":
stop(name, call="action", wait=False) stop(name, call="action", wait=False)
if not wait_until(name, "off"): if not wait_until(name, "off"):
return {"Error": "Unable to resize {}, command timed out".format(name)} return {"Error": f"Unable to resize {name}, command timed out"}
server.change_type(server_type, kwargs.get("upgrade_disk", False)) server.change_type(server_type, kwargs.get("upgrade_disk", False))
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"resizing instance", "resizing instance",
"salt/cloud/{}/resized".format(name), f"salt/cloud/{name}/resized",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
) )
return {"Resized": "{} was resized.".format(name)} return {"Resized": f"{name} was resized."}

View file

@ -161,9 +161,7 @@ def get_image(vm_):
images[vm_image]["name"] = images[vm_image]["id"] images[vm_image]["name"] = images[vm_image]["id"]
return images[vm_image] return images[vm_image]
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified image, '{vm_image}', could not be found.")
"The specified image, '{}', could not be found.".format(vm_image)
)
def get_size(vm_): def get_size(vm_):
@ -178,9 +176,7 @@ def get_size(vm_):
if vm_size and str(vm_size) in sizes: if vm_size and str(vm_size) in sizes:
return sizes[vm_size] return sizes[vm_size]
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified size, '{vm_size}', could not be found.")
"The specified size, '{}', could not be found.".format(vm_size)
)
def query_instance(vm_=None, call=None): def query_instance(vm_=None, call=None):
@ -375,11 +371,11 @@ def create_node(**kwargs):
if metadata is not None: if metadata is not None:
for key, value in metadata.items(): for key, value in metadata.items():
create_data["metadata.{}".format(key)] = value create_data[f"metadata.{key}"] = value
if tag is not None: if tag is not None:
for key, value in tag.items(): for key, value in tag.items():
create_data["tag.{}".format(key)] = value create_data[f"tag.{key}"] = value
if firewall_enabled is not None: if firewall_enabled is not None:
create_data["firewall_enabled"] = firewall_enabled create_data["firewall_enabled"] = firewall_enabled
@ -419,7 +415,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -435,7 +431,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -920,11 +916,11 @@ def avail_images(call=None):
get_configured_provider(), get_configured_provider(),
__opts__, __opts__,
search_global=False, search_global=False,
default="{}{}/{}/images".format(DEFAULT_LOCATION, JOYENT_API_HOST_SUFFIX, user), default=f"{DEFAULT_LOCATION}{JOYENT_API_HOST_SUFFIX}/{user}/images",
) )
if not img_url.startswith("http://") and not img_url.startswith("https://"): if not img_url.startswith("http://") and not img_url.startswith("https://"):
img_url = "{}://{}".format(_get_proto(), img_url) img_url = f"{_get_proto()}://{img_url}"
rcode, data = query(command="my/images", method="GET") rcode, data = query(command="my/images", method="GET")
log.debug(data) log.debug(data)
@ -1077,7 +1073,7 @@ def get_location_path(
:param location: joyent data center location :param location: joyent data center location
:return: url :return: url
""" """
return "{}://{}{}".format(_get_proto(), location, api_host_suffix) return f"{_get_proto()}://{location}{api_host_suffix}"
def query(action=None, command=None, args=None, method="GET", location=None, data=None): def query(action=None, command=None, args=None, method="GET", location=None, data=None):
@ -1151,7 +1147,7 @@ def query(action=None, command=None, args=None, method="GET", location=None, dat
path += action path += action
if command: if command:
path += "/{}".format(command) path += f"/{command}"
log.debug("User: '%s' on PATH: %s", user, path) log.debug("User: '%s' on PATH: %s", user, path)
@ -1174,9 +1170,9 @@ def query(action=None, command=None, args=None, method="GET", location=None, dat
signed = base64.b64encode(signed) signed = base64.b64encode(signed)
user_arr = user.split("/") user_arr = user.split("/")
if len(user_arr) == 1: if len(user_arr) == 1:
keyid = "/{}/keys/{}".format(user_arr[0], ssh_keyname) keyid = f"/{user_arr[0]}/keys/{ssh_keyname}"
elif len(user_arr) == 2: elif len(user_arr) == 2:
keyid = "/{}/users/{}/keys/{}".format(user_arr[0], user_arr[1], ssh_keyname) keyid = f"/{user_arr[0]}/users/{user_arr[1]}/keys/{ssh_keyname}"
else: else:
log.error("Malformed user string") log.error("Malformed user string")

View file

@ -332,7 +332,7 @@ def create(vm_):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"starting create", "starting create",
"salt/cloud/{}/creating".format(name), f"salt/cloud/{name}/creating",
args=__utils__["cloud.filter_event"]( args=__utils__["cloud.filter_event"](
"creating", vm_, ["name", "profile", "provider", "driver"] "creating", vm_, ["name", "profile", "provider", "driver"]
), ),
@ -345,7 +345,7 @@ def create(vm_):
) )
if key_filename is not None and not os.path.isfile(key_filename): if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError( raise SaltCloudConfigError(
"The defined key_filename '{}' does not exist".format(key_filename) f"The defined key_filename '{key_filename}' does not exist"
) )
vm_["key_filename"] = key_filename vm_["key_filename"] = key_filename
# wait_for_instance requires private_key # wait_for_instance requires private_key
@ -374,7 +374,7 @@ def create(vm_):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"requesting instance", "requesting instance",
"salt/cloud/{}/requesting".format(name), f"salt/cloud/{name}/requesting",
args={ args={
"kwargs": __utils__["cloud.filter_event"]( "kwargs": __utils__["cloud.filter_event"](
"requesting", kwargs, list(kwargs) "requesting", kwargs, list(kwargs)
@ -392,7 +392,7 @@ def create(vm_):
description_elem = ElementTree.Element("description") description_elem = ElementTree.Element("description")
domain_xml.insert(0, description_elem) domain_xml.insert(0, description_elem)
description = domain_xml.find("./description") description = domain_xml.find("./description")
description.text = "Cloned from {}".format(base) description.text = f"Cloned from {base}"
domain_xml.remove(domain_xml.find("./uuid")) domain_xml.remove(domain_xml.find("./uuid"))
for iface_xml in domain_xml.findall("./devices/interface"): for iface_xml in domain_xml.findall("./devices/interface"):
@ -426,9 +426,7 @@ def create(vm_):
# see if there is a path element that needs rewriting # see if there is a path element that needs rewriting
if source_element and "path" in source_element.attrib: if source_element and "path" in source_element.attrib:
path = source_element.attrib["path"] path = source_element.attrib["path"]
new_path = path.replace( new_path = path.replace(f"/domain-{base}/", f"/domain-{name}/")
"/domain-{}/".format(base), "/domain-{}/".format(name)
)
log.debug("Rewriting agent socket path to %s", new_path) log.debug("Rewriting agent socket path to %s", new_path)
source_element.attrib["path"] = new_path source_element.attrib["path"] = new_path
@ -471,7 +469,7 @@ def create(vm_):
disk.find("./source").attrib["file"] = new_volume.path() disk.find("./source").attrib["file"] = new_volume.path()
else: else:
raise SaltCloudExecutionFailure( raise SaltCloudExecutionFailure(
"Disk type '{}' not supported".format(disk_type) f"Disk type '{disk_type}' not supported"
) )
clone_xml = salt.utils.stringutils.to_str(ElementTree.tostring(domain_xml)) clone_xml = salt.utils.stringutils.to_str(ElementTree.tostring(domain_xml))
@ -515,7 +513,7 @@ def create(vm_):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"created instance", "created instance",
"salt/cloud/{}/created".format(name), f"salt/cloud/{name}/created",
args=__utils__["cloud.filter_event"]( args=__utils__["cloud.filter_event"](
"created", vm_, ["name", "profile", "provider", "driver"] "created", vm_, ["name", "profile", "provider", "driver"]
), ),
@ -617,15 +615,15 @@ def destroy(name, call=None):
pass pass
if not found: if not found:
return "{} doesn't exist and can't be deleted".format(name) return f"{name} doesn't exist and can't be deleted"
if len(found) > 1: if len(found) > 1:
return "{} doesn't identify a unique machine leaving things".format(name) return f"{name} doesn't identify a unique machine leaving things"
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -636,7 +634,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -718,15 +716,15 @@ def find_pool_and_volume(conn, path):
for v in sp.listAllVolumes(): for v in sp.listAllVolumes():
if v.path() == path: if v.path() == path:
return sp, v return sp, v
raise SaltCloudNotFound("Could not find volume for path {}".format(path)) raise SaltCloudNotFound(f"Could not find volume for path {path}")
def generate_new_name(orig_name): def generate_new_name(orig_name):
if "." not in orig_name: if "." not in orig_name:
return "{}-{}".format(orig_name, uuid.uuid1()) return f"{orig_name}-{uuid.uuid1()}"
name, ext = orig_name.rsplit(".", 1) name, ext = orig_name.rsplit(".", 1)
return "{}-{}.{}".format(name, uuid.uuid1(), ext) return f"{name}-{uuid.uuid1()}.{ext}"
def get_domain_volumes(conn, domain): def get_domain_volumes(conn, domain):

View file

@ -339,7 +339,7 @@ def _get_ssh_keys(vm_):
key_files = _get_ssh_key_files(vm_) key_files = _get_ssh_key_files(vm_)
for file in map(lambda file: Path(file).resolve(), key_files): for file in map(lambda file: Path(file).resolve(), key_files):
if not (file.exists() or file.is_file()): if not (file.exists() or file.is_file()):
raise SaltCloudSystemExit("Invalid SSH key file: {}".format(str(file))) raise SaltCloudSystemExit(f"Invalid SSH key file: {str(file)}")
ssh_keys.add(file.read_text()) ssh_keys.add(file.read_text())
return list(ssh_keys) return list(ssh_keys)
@ -513,11 +513,11 @@ class LinodeAPIv4(LinodeAPI):
if headers is None: if headers is None:
headers = {} headers = {}
headers["Authorization"] = "Bearer {}".format(api_key) headers["Authorization"] = f"Bearer {api_key}"
headers["Content-Type"] = "application/json" headers["Content-Type"] = "application/json"
headers["User-Agent"] = "salt-cloud-linode" headers["User-Agent"] = "salt-cloud-linode"
url = "https://api.linode.com/{}{}".format(api_version, path) url = f"https://api.linode.com/{api_version}{path}"
decode = method != "DELETE" decode = method != "DELETE"
result = None result = None
@ -578,7 +578,7 @@ class LinodeAPIv4(LinodeAPI):
# If the response is not valid JSON or the error was not included, propagate the # If the response is not valid JSON or the error was not included, propagate the
# human readable status representation. # human readable status representation.
raise SaltCloudSystemExit( raise SaltCloudSystemExit(
"Linode API error occurred: {}".format(err_response.reason) f"Linode API error occurred: {err_response.reason}"
) )
if decode: if decode:
return self._get_response_json(result) return self._get_response_json(result)
@ -623,7 +623,7 @@ class LinodeAPIv4(LinodeAPI):
) )
response = self._query( response = self._query(
"/linode/instances/{}/boot".format(linode_id), f"/linode/instances/{linode_id}/boot",
method="POST", method="POST",
data={"config_id": config_id}, data={"config_id": config_id},
) )
@ -656,7 +656,7 @@ class LinodeAPIv4(LinodeAPI):
) )
return self._query( return self._query(
"/linode/instances/{}/clone".format(linode_id), f"/linode/instances/{linode_id}/clone",
method="POST", method="POST",
data={"region": location, "type": size}, data={"region": location, "type": size},
) )
@ -688,7 +688,7 @@ class LinodeAPIv4(LinodeAPI):
} }
return self._query( return self._query(
"/linode/instances/{}/configs".format(linode_id), f"/linode/instances/{linode_id}/configs",
method="POST", method="POST",
data={"label": name, "devices": devices}, data={"label": name, "devices": devices},
) )
@ -702,7 +702,7 @@ class LinodeAPIv4(LinodeAPI):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"starting create", "starting create",
"salt/cloud/{}/creating".format(name), f"salt/cloud/{name}/creating",
args=__utils__["cloud.filter_event"]( args=__utils__["cloud.filter_event"](
"creating", vm_, ["name", "profile", "provider", "driver"] "creating", vm_, ["name", "profile", "provider", "driver"]
), ),
@ -795,7 +795,7 @@ class LinodeAPIv4(LinodeAPI):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"waiting for ssh", "waiting for ssh",
"salt/cloud/{}/waiting_for_ssh".format(name), f"salt/cloud/{name}/waiting_for_ssh",
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
args={"ip_address": vm_["ssh_host"]}, args={"ip_address": vm_["ssh_host"]},
transport=__opts__["transport"], transport=__opts__["transport"],
@ -810,7 +810,7 @@ class LinodeAPIv4(LinodeAPI):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"created instance", "created instance",
"salt/cloud/{}/created".format(name), f"salt/cloud/{name}/created",
args=__utils__["cloud.filter_event"]( args=__utils__["cloud.filter_event"](
"created", vm_, ["name", "profile", "provider", "driver"] "created", vm_, ["name", "profile", "provider", "driver"]
), ),
@ -824,7 +824,7 @@ class LinodeAPIv4(LinodeAPI):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -838,7 +838,7 @@ class LinodeAPIv4(LinodeAPI):
instance = self._get_linode_by_name(name) instance = self._get_linode_by_name(name)
linode_id = instance.get("id", None) linode_id = instance.get("id", None)
self._query("/linode/instances/{}".format(linode_id), method="DELETE") self._query(f"/linode/instances/{linode_id}", method="DELETE")
def get_config_id(self, kwargs=None): def get_config_id(self, kwargs=None):
name = kwargs.get("name", None) name = kwargs.get("name", None)
@ -853,7 +853,7 @@ class LinodeAPIv4(LinodeAPI):
if linode_id is None: if linode_id is None:
linode_id = self.get_linode(kwargs=kwargs).get("id", None) linode_id = self.get_linode(kwargs=kwargs).get("id", None)
response = self._query("/linode/instances/{}/configs".format(linode_id)) response = self._query(f"/linode/instances/{linode_id}/configs")
configs = response.get("data", []) configs = response.get("data", [])
return {"config_id": configs[0]["id"]} return {"config_id": configs[0]["id"]}
@ -879,7 +879,7 @@ class LinodeAPIv4(LinodeAPI):
instance = self._get_linode_by_name(name) instance = self._get_linode_by_name(name)
linode_id = instance.get("id", None) linode_id = instance.get("id", None)
self._query("/linode/instances/{}/reboot".format(linode_id), method="POST") self._query(f"/linode/instances/{linode_id}/reboot", method="POST")
return self._wait_for_linode_status(linode_id, "running") return self._wait_for_linode_status(linode_id, "running")
def show_instance(self, name): def show_instance(self, name):
@ -939,7 +939,7 @@ class LinodeAPIv4(LinodeAPI):
"msg": "Machine already running", "msg": "Machine already running",
} }
self._query("/linode/instances/{}/boot".format(linode_id), method="POST") self._query(f"/linode/instances/{linode_id}/boot", method="POST")
self._wait_for_linode_status(linode_id, "running") self._wait_for_linode_status(linode_id, "running")
return { return {
@ -960,13 +960,13 @@ class LinodeAPIv4(LinodeAPI):
"msg": "Machine already stopped", "msg": "Machine already stopped",
} }
self._query("/linode/instances/{}/shutdown".format(linode_id), method="POST") self._query(f"/linode/instances/{linode_id}/shutdown", method="POST")
self._wait_for_linode_status(linode_id, "offline") self._wait_for_linode_status(linode_id, "offline")
return {"success": True, "state": "Stopped", "action": "stop"} return {"success": True, "state": "Stopped", "action": "stop"}
def _get_linode_by_id(self, linode_id): def _get_linode_by_id(self, linode_id):
return self._query("/linode/instances/{}".format(linode_id)) return self._query(f"/linode/instances/{linode_id}")
def _get_linode_by_name(self, name): def _get_linode_by_name(self, name):
result = self._query("/linode/instances") result = self._query("/linode/instances")
@ -976,9 +976,7 @@ class LinodeAPIv4(LinodeAPI):
if instance["label"] == name: if instance["label"] == name:
return instance return instance
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified name, {name}, could not be found.")
"The specified name, {}, could not be found.".format(name)
)
def _list_linodes(self, full=False): def _list_linodes(self, full=False):
result = self._query("/linode/instances") result = self._query("/linode/instances")
@ -1005,7 +1003,7 @@ class LinodeAPIv4(LinodeAPI):
return ret return ret
def _get_linode_type(self, linode_type): def _get_linode_type(self, linode_type):
return self._query("/linode/types/{}".format(linode_type)) return self._query(f"/linode/types/{linode_type}")
def _get_ips(self, linode_id): def _get_ips(self, linode_id):
instance = self._get_linode_by_id(linode_id) instance = self._get_linode_by_id(linode_id)
@ -1049,15 +1047,13 @@ class LinodeAPIv4(LinodeAPI):
time.sleep(poll_interval / 1000) time.sleep(poll_interval / 1000)
log.info("retrying: polling for %s...", description) log.info("retrying: polling for %s...", description)
else: else:
raise SaltCloudException( raise SaltCloudException(f"timed out: polling for {description}")
"timed out: polling for {}".format(description)
)
def _wait_for_entity_status( def _wait_for_entity_status(
self, getter, status, entity_name="item", identifier="some", timeout=None self, getter, status, entity_name="item", identifier="some", timeout=None
): ):
return self._poll( return self._poll(
"{} (id={}) status to be '{}'".format(entity_name, identifier, status), f"{entity_name} (id={identifier}) status to be '{status}'",
getter, getter,
lambda item: item.get("status") == status, lambda item: item.get("status") == status,
timeout=timeout, timeout=timeout,
@ -1126,8 +1122,8 @@ class LinodeAPIv4(LinodeAPI):
return True return True
return self._poll( return self._poll(
"event {} to be '{}'".format(event_id, status), f"event {event_id} to be '{status}'",
lambda: self._query("/account/events/{}".format(event_id)), lambda: self._query(f"/account/events/{event_id}"),
condition, condition,
timeout=timeout, timeout=timeout,
) )
@ -1170,7 +1166,7 @@ class LinodeAPIv3(LinodeAPI):
if "api_key" not in args.keys(): if "api_key" not in args.keys():
args["api_key"] = apikey args["api_key"] = apikey
if action and "api_action" not in args.keys(): if action and "api_action" not in args.keys():
args["api_action"] = "{}.{}".format(action, command) args["api_action"] = f"{action}.{command}"
if header_dict is None: if header_dict is None:
header_dict = {} header_dict = {}
if method != "POST": if method != "POST":
@ -1266,7 +1262,7 @@ class LinodeAPIv3(LinodeAPI):
if status == "1": if status == "1":
raise SaltCloudSystemExit( raise SaltCloudSystemExit(
"Cannot boot Linode {0}. " "Cannot boot Linode {0}. "
+ "Linode {} is already running.".format(linode_item) + f"Linode {linode_item} is already running."
) )
# Boot the VM and get the JobID from Linode # Boot the VM and get the JobID from Linode
@ -1311,7 +1307,7 @@ class LinodeAPIv3(LinodeAPI):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"starting create", "starting create",
"salt/cloud/{}/creating".format(name), f"salt/cloud/{name}/creating",
args=__utils__["cloud.filter_event"]( args=__utils__["cloud.filter_event"](
"creating", vm_, ["name", "profile", "provider", "driver"] "creating", vm_, ["name", "profile", "provider", "driver"]
), ),
@ -1348,7 +1344,7 @@ class LinodeAPIv3(LinodeAPI):
kwargs = { kwargs = {
"clonefrom": clonefrom_name, "clonefrom": clonefrom_name,
"image": "Clone of {}".format(clonefrom_name), "image": f"Clone of {clonefrom_name}",
} }
if size is None: if size is None:
@ -1412,7 +1408,7 @@ class LinodeAPIv3(LinodeAPI):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"requesting instance", "requesting instance",
"salt/cloud/{}/requesting".format(name), f"salt/cloud/{name}/requesting",
args=__utils__["cloud.filter_event"]( args=__utils__["cloud.filter_event"](
"requesting", vm_, ["name", "profile", "provider", "driver"] "requesting", vm_, ["name", "profile", "provider", "driver"]
), ),
@ -1505,7 +1501,7 @@ class LinodeAPIv3(LinodeAPI):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"waiting for ssh", "waiting for ssh",
"salt/cloud/{}/waiting_for_ssh".format(name), f"salt/cloud/{name}/waiting_for_ssh",
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
args={"ip_address": vm_["ssh_host"]}, args={"ip_address": vm_["ssh_host"]},
transport=__opts__["transport"], transport=__opts__["transport"],
@ -1522,7 +1518,7 @@ class LinodeAPIv3(LinodeAPI):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"created instance", "created instance",
"salt/cloud/{}/created".format(name), f"salt/cloud/{name}/created",
args=__utils__["cloud.filter_event"]( args=__utils__["cloud.filter_event"](
"created", vm_, ["name", "profile", "provider", "driver"] "created", vm_, ["name", "profile", "provider", "driver"]
), ),
@ -1560,9 +1556,9 @@ class LinodeAPIv3(LinodeAPI):
instance = self._get_linode_by_name(name) instance = self._get_linode_by_name(name)
linode_id = instance.get("id", None) linode_id = instance.get("id", None)
disklist = "{},{}".format(root_disk_id, swap_disk_id) disklist = f"{root_disk_id},{swap_disk_id}"
if data_disk_id is not None: if data_disk_id is not None:
disklist = "{},{},{}".format(root_disk_id, swap_disk_id, data_disk_id) disklist = f"{root_disk_id},{swap_disk_id},{data_disk_id}"
config_args = { config_args = {
"LinodeID": int(linode_id), "LinodeID": int(linode_id),
@ -1663,7 +1659,7 @@ class LinodeAPIv3(LinodeAPI):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -1678,7 +1674,7 @@ class LinodeAPIv3(LinodeAPI):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -1732,7 +1728,7 @@ class LinodeAPIv3(LinodeAPI):
plan_type = "Nanode" plan_type = "Nanode"
plan_size = plan_size / 1024 plan_size = plan_size / 1024
new_label = "{} {}GB".format(plan_type, plan_size) new_label = f"{plan_type} {plan_size}GB"
if new_label not in sizes: if new_label not in sizes:
raise SaltCloudException( raise SaltCloudException(
@ -2052,9 +2048,7 @@ class LinodeAPIv3(LinodeAPI):
if name == node["LABEL"]: if name == node["LABEL"]:
return node return node
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified name, {name}, could not be found.")
"The specified name, {}, could not be found.".format(name)
)
def _get_linode_by_id(self, linode_id): def _get_linode_by_id(self, linode_id):
result = self._query("linode", "list", args={"LinodeID": linode_id}) result = self._query("linode", "list", args={"LinodeID": linode_id})

View file

@ -179,7 +179,7 @@ def _salt(fun, *args, **kw):
ping_retries += 1 ping_retries += 1
log.error("%s unreachable, retrying", target) log.error("%s unreachable, retrying", target)
if not ping: if not ping:
raise SaltCloudSystemExit("Target {} unreachable".format(target)) raise SaltCloudSystemExit(f"Target {target} unreachable")
jid = conn.cmd_async(tgt=target, fun=fun, arg=args, kwarg=kw, **rkwargs) jid = conn.cmd_async(tgt=target, fun=fun, arg=args, kwarg=kw, **rkwargs)
cret = conn.cmd( cret = conn.cmd(
tgt=target, fun="saltutil.find_job", arg=[jid], timeout=10, **kwargs tgt=target, fun="saltutil.find_job", arg=[jid], timeout=10, **kwargs
@ -224,9 +224,7 @@ def _salt(fun, *args, **kw):
time.sleep(0.5) time.sleep(0.5)
try: try:
if "is not available." in ret: if "is not available." in ret:
raise SaltCloudSystemExit( raise SaltCloudSystemExit(f"module/function {fun} is not available")
"module/function {} is not available".format(fun)
)
except SaltCloudSystemExit: # pylint: disable=try-except-raise except SaltCloudSystemExit: # pylint: disable=try-except-raise
raise raise
except TypeError: except TypeError:
@ -367,12 +365,12 @@ def destroy(vm_, call=None):
) )
if not get_configured_provider(): if not get_configured_provider():
return return
ret = {"comment": "{} was not found".format(vm_), "result": False} ret = {"comment": f"{vm_} was not found", "result": False}
if _salt("lxc.info", vm_, path=path): if _salt("lxc.info", vm_, path=path):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(vm_), f"salt/cloud/{vm_}/destroying",
args={"name": vm_, "instance_id": vm_}, args={"name": vm_, "instance_id": vm_},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -380,11 +378,11 @@ def destroy(vm_, call=None):
cret = _salt("lxc.destroy", vm_, stop=True, path=path) cret = _salt("lxc.destroy", vm_, stop=True, path=path)
ret["result"] = cret["result"] ret["result"] = cret["result"]
if ret["result"]: if ret["result"]:
ret["comment"] = "{} was destroyed".format(vm_) ret["comment"] = f"{vm_} was destroyed"
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(vm_), f"salt/cloud/{vm_}/destroyed",
args={"name": vm_, "instance_id": vm_}, args={"name": vm_, "instance_id": vm_},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -506,14 +504,14 @@ def get_configured_provider(vm_=None):
matched = False matched = False
# --list-images level # --list-images level
if img_provider: if img_provider:
tgt = "provider: {}".format(img_provider) tgt = f"provider: {img_provider}"
if dalias == img_provider: if dalias == img_provider:
data = get_provider(img_provider) data = get_provider(img_provider)
matched = True matched = True
# providers are set in configuration # providers are set in configuration
if not data and "profile" not in __opts__ and arg_providers: if not data and "profile" not in __opts__ and arg_providers:
for name in arg_providers: for name in arg_providers:
tgt = "provider: {}".format(name) tgt = f"provider: {name}"
if dalias == name: if dalias == name:
data = get_provider(name) data = get_provider(name)
if data: if data:
@ -523,13 +521,13 @@ def get_configured_provider(vm_=None):
elif "profile" in __opts__: elif "profile" in __opts__:
curprof = __opts__["profile"] curprof = __opts__["profile"]
profs = __opts__["profiles"] profs = __opts__["profiles"]
tgt = "profile: {}".format(curprof) tgt = f"profile: {curprof}"
if ( if (
curprof in profs curprof in profs
and profs[curprof]["provider"] == _get_active_provider_name() and profs[curprof]["provider"] == _get_active_provider_name()
): ):
prov, cdriver = profs[curprof]["provider"].split(":") prov, cdriver = profs[curprof]["provider"].split(":")
tgt += " provider: {}".format(prov) tgt += f" provider: {prov}"
data = get_provider(prov) data = get_provider(prov)
matched = True matched = True
# fallback if we have only __active_provider_name__ # fallback if we have only __active_provider_name__

View file

@ -853,7 +853,7 @@ def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True):
kwargs["service_name"], kwargs["service_name"],
kwargs["deployment_name"], kwargs["deployment_name"],
kwargs["role_name"], kwargs["role_name"],
**volume **volume,
) )
log.debug(attach) log.debug(attach)
@ -954,7 +954,7 @@ def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True):
kwargs["service_name"], kwargs["service_name"],
kwargs["deployment_name"], kwargs["deployment_name"],
kwargs["role_name"], kwargs["role_name"],
**volume **volume,
) )
_wait_for_async(conn, result.request_id) _wait_for_async(conn, result.request_id)
@ -1031,7 +1031,7 @@ def destroy(name, conn=None, call=None, kwargs=None):
result = conn.delete_deployment(service_name, service_name) result = conn.delete_deployment(service_name, service_name)
except AzureConflictHttpError as exc: except AzureConflictHttpError as exc:
log.error(exc.message) log.error(exc.message)
raise SaltCloudSystemExit("{}: {}".format(name, exc.message)) raise SaltCloudSystemExit(f"{name}: {exc.message}")
delete_type = "delete_deployment" delete_type = "delete_deployment"
_wait_for_async(conn, result.request_id) _wait_for_async(conn, result.request_id)
ret[name] = { ret[name] = {

View file

@ -193,9 +193,7 @@ def get_size(vm_):
if size: if size:
return size return size
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified size, '{vm_size}', could not be found.")
"The specified size, '{}', could not be found.".format(vm_size)
)
def get_image(vm_): def get_image(vm_):
@ -211,9 +209,7 @@ def get_image(vm_):
if vm_image and vm_image in (images[key]["id"], images[key]["name"]): if vm_image and vm_image in (images[key]["id"], images[key]["name"]):
return images[key] return images[key]
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified image, '{vm_image}', could not be found.")
"The specified image, '{}', could not be found.".format(vm_image)
)
def avail_locations(conn=None, call=None): def avail_locations(conn=None, call=None):
@ -735,7 +731,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -749,7 +745,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -846,7 +842,7 @@ def get_key_filename(vm_):
key_filename = os.path.expanduser(key_filename) key_filename = os.path.expanduser(key_filename)
if not os.path.isfile(key_filename): if not os.path.isfile(key_filename):
raise SaltCloudConfigError( raise SaltCloudConfigError(
"The defined ssh_private_key '{}' does not exist".format(key_filename) f"The defined ssh_private_key '{key_filename}' does not exist"
) )
return key_filename return key_filename
@ -897,11 +893,9 @@ def _wait_for_completion(conn, wait_timeout, server_id):
if server_state == "powered_on": if server_state == "powered_on":
return return
elif server_state == "failed": elif server_state == "failed":
raise Exception("Server creation failed for {}".format(server_id)) raise Exception(f"Server creation failed for {server_id}")
elif server_state in ("active", "enabled", "deploying", "configuring"): elif server_state in ("active", "enabled", "deploying", "configuring"):
continue continue
else: else:
raise Exception("Unknown server state {}".format(server_state)) raise Exception(f"Unknown server state {server_state}")
raise Exception( raise Exception(f"Timed out waiting for server create completion for {server_id}")
"Timed out waiting for server create completion for {}".format(server_id)
)

View file

@ -558,7 +558,7 @@ def get_cluster_id(kwargs=None, call=None):
try: try:
ret = list_clusters()[name]["id"] ret = list_clusters()[name]["id"]
except KeyError: except KeyError:
raise SaltCloudSystemExit("The cluster '{}' could not be found".format(name)) raise SaltCloudSystemExit(f"The cluster '{name}' could not be found")
return ret return ret
@ -590,7 +590,7 @@ def get_datastore_id(kwargs=None, call=None):
try: try:
ret = list_datastores()[name]["id"] ret = list_datastores()[name]["id"]
except KeyError: except KeyError:
raise SaltCloudSystemExit("The datastore '{}' could not be found.".format(name)) raise SaltCloudSystemExit(f"The datastore '{name}' could not be found.")
return ret return ret
@ -622,7 +622,7 @@ def get_host_id(kwargs=None, call=None):
try: try:
ret = avail_locations()[name]["id"] ret = avail_locations()[name]["id"]
except KeyError: except KeyError:
raise SaltCloudSystemExit("The host '{}' could not be found".format(name)) raise SaltCloudSystemExit(f"The host '{name}' could not be found")
return ret return ret
@ -641,9 +641,7 @@ def get_image(vm_):
for image in images: for image in images:
if vm_image in (images[image]["name"], images[image]["id"]): if vm_image in (images[image]["name"], images[image]["id"]):
return images[image]["id"] return images[image]["id"]
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified image, '{vm_image}', could not be found.")
"The specified image, '{}', could not be found.".format(vm_image)
)
def get_image_id(kwargs=None, call=None): def get_image_id(kwargs=None, call=None):
@ -673,7 +671,7 @@ def get_image_id(kwargs=None, call=None):
try: try:
ret = avail_images()[name]["id"] ret = avail_images()[name]["id"]
except KeyError: except KeyError:
raise SaltCloudSystemExit("The image '{}' could not be found".format(name)) raise SaltCloudSystemExit(f"The image '{name}' could not be found")
return ret return ret
@ -697,7 +695,7 @@ def get_location(vm_):
if vm_location in (locations[location]["name"], locations[location]["id"]): if vm_location in (locations[location]["name"], locations[location]["id"]):
return locations[location]["id"] return locations[location]["id"]
raise SaltCloudNotFound( raise SaltCloudNotFound(
"The specified location, '{}', could not be found.".format(vm_location) f"The specified location, '{vm_location}', could not be found."
) )
@ -728,9 +726,7 @@ def get_secgroup_id(kwargs=None, call=None):
try: try:
ret = list_security_groups()[name]["id"] ret = list_security_groups()[name]["id"]
except KeyError: except KeyError:
raise SaltCloudSystemExit( raise SaltCloudSystemExit(f"The security group '{name}' could not be found.")
"The security group '{}' could not be found.".format(name)
)
return ret return ret
@ -761,7 +757,7 @@ def get_template_image(kwargs=None, call=None):
ret = list_templates()[name]["template"]["disk"]["image"] ret = list_templates()[name]["template"]["disk"]["image"]
except KeyError: except KeyError:
raise SaltCloudSystemExit( raise SaltCloudSystemExit(
"The image for template '{}' could not be found.".format(name) f"The image for template '{name}' could not be found."
) )
return ret return ret
@ -794,7 +790,7 @@ def get_template_id(kwargs=None, call=None):
try: try:
ret = list_templates()[name]["id"] ret = list_templates()[name]["id"]
except KeyError: except KeyError:
raise SaltCloudSystemExit("The template '{}' could not be found.".format(name)) raise SaltCloudSystemExit(f"The template '{name}' could not be found.")
return ret return ret
@ -816,7 +812,7 @@ def get_template(vm_):
return list_templates()[vm_template]["id"] return list_templates()[vm_template]["id"]
except KeyError: except KeyError:
raise SaltCloudNotFound( raise SaltCloudNotFound(
"The specified template, '{}', could not be found.".format(vm_template) f"The specified template, '{vm_template}', could not be found."
) )
@ -847,7 +843,7 @@ def get_vm_id(kwargs=None, call=None):
try: try:
ret = list_nodes()[name]["id"] ret = list_nodes()[name]["id"]
except KeyError: except KeyError:
raise SaltCloudSystemExit("The VM '{}' could not be found.".format(name)) raise SaltCloudSystemExit(f"The VM '{name}' could not be found.")
return ret return ret
@ -879,7 +875,7 @@ def get_vn_id(kwargs=None, call=None):
try: try:
ret = list_vns()[name]["id"] ret = list_vns()[name]["id"]
except KeyError: except KeyError:
raise SaltCloudSystemExit("The VN '{}' could not be found.".format(name)) raise SaltCloudSystemExit(f"The VN '{name}' could not be found.")
return ret return ret
@ -895,9 +891,7 @@ def _get_device_template(disk, disk_info, template=None):
def _require_disk_opts(*args): def _require_disk_opts(*args):
for arg in args: for arg in args:
if arg not in disk_info: if arg not in disk_info:
raise SaltCloudSystemExit( raise SaltCloudSystemExit(f"The disk {disk} requires a {arg} argument")
"The disk {} requires a {} argument".format(disk, arg)
)
_require_disk_opts("disk_type", "size") _require_disk_opts("disk_type", "size")
@ -919,12 +913,12 @@ def _get_device_template(disk, disk_info, template=None):
if disk_type == "volatile": if disk_type == "volatile":
_require_disk_opts("type") _require_disk_opts("type")
v_type = disk_info["type"] v_type = disk_info["type"]
temp = "DISK=[TYPE={}, SIZE={}]".format(v_type, size) temp = f"DISK=[TYPE={v_type}, SIZE={size}]"
if v_type == "fs": if v_type == "fs":
_require_disk_opts("format") _require_disk_opts("format")
format = disk_info["format"] format = disk_info["format"]
temp = "DISK=[TYPE={}, SIZE={}, FORMAT={}]".format(v_type, size, format) temp = f"DISK=[TYPE={v_type}, SIZE={size}, FORMAT={format}]"
return temp return temp
# TODO add persistant disk_type # TODO add persistant disk_type
@ -1101,7 +1095,7 @@ def create(vm_):
) )
if key_filename is not None and not os.path.isfile(key_filename): if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError( raise SaltCloudConfigError(
"The defined key_filename '{}' does not exist".format(key_filename) f"The defined key_filename '{key_filename}' does not exist"
) )
if fqdn: if fqdn:
@ -1178,7 +1172,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
) )
@ -1192,7 +1186,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
) )
@ -4474,7 +4468,7 @@ def _get_xml(xml_str):
except etree.XMLSyntaxError as err: except etree.XMLSyntaxError as err:
# opennebula returned invalid XML, which could be an error message, so # opennebula returned invalid XML, which could be an error message, so
# log it # log it
raise SaltCloudSystemExit("opennebula returned: {}".format(xml_str)) raise SaltCloudSystemExit(f"opennebula returned: {xml_str}")
return xml_data return xml_data

View file

@ -737,7 +737,7 @@ def create(vm_):
) )
if key_filename is not None and not os.path.isfile(key_filename): if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError( raise SaltCloudConfigError(
"The defined ssh_key_file '{}' does not exist".format(key_filename) f"The defined ssh_key_file '{key_filename}' does not exist"
) )
vm_["key_filename"] = key_filename vm_["key_filename"] = key_filename
@ -846,7 +846,7 @@ def destroy(name, conn=None, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -863,7 +863,7 @@ def destroy(name, conn=None, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],

View file

@ -242,9 +242,7 @@ def _wait_for_status(status_type, object_id, status=None, timeout=500, quiet=Tru
manager = packet.Manager(auth_token=vm_["token"]) manager = packet.Manager(auth_token=vm_["token"])
for i in range(0, iterations): for i in range(0, iterations):
get_object = getattr( get_object = getattr(manager, f"get_{status_type}")
manager, "get_{status_type}".format(status_type=status_type)
)
obj = get_object(object_id) obj = get_object(object_id)
if obj.state == status: if obj.state == status:
@ -340,7 +338,7 @@ def create(vm_):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"starting create", "starting create",
"salt/cloud/{}/creating".format(name), f"salt/cloud/{name}/creating",
args=__utils__["cloud.filter_event"]( args=__utils__["cloud.filter_event"](
"creating", vm_, ["name", "profile", "provider", "driver"] "creating", vm_, ["name", "profile", "provider", "driver"]
), ),
@ -413,7 +411,7 @@ def create(vm_):
volume = manager.create_volume( volume = manager.create_volume(
vm_["project_id"], vm_["project_id"],
"{}_storage".format(name), f"{name}_storage",
vm_.get("storage_tier"), vm_.get("storage_tier"),
vm_.get("storage_size"), vm_.get("storage_size"),
vm_.get("location"), vm_.get("location"),
@ -441,7 +439,7 @@ def create(vm_):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"created instance", "created instance",
"salt/cloud/{}/created".format(name), f"salt/cloud/{name}/created",
args=__utils__["cloud.filter_event"]( args=__utils__["cloud.filter_event"](
"created", vm_, ["name", "profile", "provider", "driver"] "created", vm_, ["name", "profile", "provider", "driver"]
), ),
@ -580,7 +578,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -606,7 +604,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],

View file

@ -310,11 +310,11 @@ def create(vm_):
name = vm_["name"] name = vm_["name"]
if not wait_until(name, "CREATED"): if not wait_until(name, "CREATED"):
return {"Error": "Unable to start {}, command timed out".format(name)} return {"Error": f"Unable to start {name}, command timed out"}
start(vm_["name"], call="action") start(vm_["name"], call="action")
if not wait_until(name, "STARTED"): if not wait_until(name, "STARTED"):
return {"Error": "Unable to start {}, command timed out".format(name)} return {"Error": f"Unable to start {name}, command timed out"}
def __query_node_data(vm_name): def __query_node_data(vm_name):
data = show_instance(vm_name, call="action") data = show_instance(vm_name, call="action")
@ -391,7 +391,7 @@ def query(action=None, command=None, args=None, method="GET", data=None):
path += action path += action
if command: if command:
path += "/{}".format(command) path += f"/{command}"
if not type(args, dict): if not type(args, dict):
args = {} args = {}
@ -404,7 +404,7 @@ def query(action=None, command=None, args=None, method="GET", data=None):
if args: if args:
params = urllib.parse.urlencode(args) params = urllib.parse.urlencode(args)
req = urllib.request.Request(url="{}?{}".format(path, params), **kwargs) req = urllib.request.Request(url=f"{path}?{params}", **kwargs)
else: else:
req = urllib.request.Request(url=path, **kwargs) req = urllib.request.Request(url=path, **kwargs)
@ -526,7 +526,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -536,7 +536,7 @@ def destroy(name, call=None):
if node["state"] == "STARTED": if node["state"] == "STARTED":
stop(name, call="action") stop(name, call="action")
if not wait_until(name, "STOPPED"): if not wait_until(name, "STOPPED"):
return {"Error": "Unable to destroy {}, command timed out".format(name)} return {"Error": f"Unable to destroy {name}, command timed out"}
data = query(action="ve", command=name, method="DELETE") data = query(action="ve", command=name, method="DELETE")
@ -546,7 +546,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -557,7 +557,7 @@ def destroy(name, call=None):
name, _get_active_provider_name().split(":")[0], __opts__ name, _get_active_provider_name().split(":")[0], __opts__
) )
return {"Destroyed": "{} was destroyed.".format(name)} return {"Destroyed": f"{name} was destroyed."}
def start(name, call=None): def start(name, call=None):
@ -575,12 +575,12 @@ def start(name, call=None):
"The show_instance action must be called with -a or --action." "The show_instance action must be called with -a or --action."
) )
data = query(action="ve", command="{}/start".format(name), method="PUT") data = query(action="ve", command=f"{name}/start", method="PUT")
if "error" in data: if "error" in data:
return data["error"] return data["error"]
return {"Started": "{} was started.".format(name)} return {"Started": f"{name} was started."}
def stop(name, call=None): def stop(name, call=None):
@ -598,9 +598,9 @@ def stop(name, call=None):
"The show_instance action must be called with -a or --action." "The show_instance action must be called with -a or --action."
) )
data = query(action="ve", command="{}/stop".format(name), method="PUT") data = query(action="ve", command=f"{name}/stop", method="PUT")
if "error" in data: if "error" in data:
return data["error"] return data["error"]
return {"Stopped": "{} was stopped.".format(name)} return {"Stopped": f"{name} was stopped."}

View file

@ -328,9 +328,7 @@ def get_size(vm_):
combinations = (str(sizes[size]["id"]), str(size)) combinations = (str(sizes[size]["id"]), str(size))
if vm_size and str(vm_size) in combinations: if vm_size and str(vm_size) in combinations:
return sizes[size] return sizes[size]
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified size, '{vm_size}', could not be found.")
"The specified size, '{}', could not be found.".format(vm_size)
)
def get_datacenter_id(): def get_datacenter_id():
@ -415,7 +413,7 @@ def get_datacenter(conn):
return item return item
raise SaltCloudNotFound( raise SaltCloudNotFound(
"The specified datacenter '{}' could not be found.".format(datacenter_id) f"The specified datacenter '{datacenter_id}' could not be found."
) )
@ -488,9 +486,7 @@ def get_image(vm_):
if vm_image and vm_image in (images[key]["id"], images[key]["name"]): if vm_image and vm_image in (images[key]["id"], images[key]["name"]):
return images[key] return images[key]
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified image, '{vm_image}', could not be found.")
"The specified image, '{}', could not be found.".format(vm_image)
)
def list_datacenters(conn=None, call=None): def list_datacenters(conn=None, call=None):
@ -725,7 +721,7 @@ def get_public_keys(vm_):
key_filename = os.path.expanduser(key_filename) key_filename = os.path.expanduser(key_filename)
if not os.path.isfile(key_filename): if not os.path.isfile(key_filename):
raise SaltCloudConfigError( raise SaltCloudConfigError(
"The defined ssh_public_key '{}' does not exist".format(key_filename) f"The defined ssh_public_key '{key_filename}' does not exist"
) )
ssh_keys = [] ssh_keys = []
with salt.utils.files.fopen(key_filename) as rfh: with salt.utils.files.fopen(key_filename) as rfh:
@ -746,7 +742,7 @@ def get_key_filename(vm_):
key_filename = os.path.expanduser(key_filename) key_filename = os.path.expanduser(key_filename)
if not os.path.isfile(key_filename): if not os.path.isfile(key_filename):
raise SaltCloudConfigError( raise SaltCloudConfigError(
"The defined ssh_private_key '{}' does not exist".format(key_filename) f"The defined ssh_private_key '{key_filename}' does not exist"
) )
return key_filename return key_filename
@ -941,7 +937,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -972,7 +968,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -1142,9 +1138,7 @@ def _get_data_volumes(vm_):
# Verify the required 'disk_size' property is present in the cloud # Verify the required 'disk_size' property is present in the cloud
# profile config # profile config
if "disk_size" not in volumes[key].keys(): if "disk_size" not in volumes[key].keys():
raise SaltCloudConfigError( raise SaltCloudConfigError(f"The volume '{key}' is missing 'disk_size'")
"The volume '{}' is missing 'disk_size'".format(key)
)
# Use 'HDD' if no 'disk_type' property is present in cloud profile # Use 'HDD' if no 'disk_type' property is present in cloud profile
if "disk_type" not in volumes[key].keys(): if "disk_type" not in volumes[key].keys():
volumes[key]["disk_type"] = "HDD" volumes[key]["disk_type"] = "HDD"
@ -1187,7 +1181,7 @@ def _get_firewall_rules(firewall_rules):
# profile config # profile config
if "protocol" not in firewall_rules[key].keys(): if "protocol" not in firewall_rules[key].keys():
raise SaltCloudConfigError( raise SaltCloudConfigError(
"The firewall rule '{}' is missing 'protocol'".format(key) f"The firewall rule '{key}' is missing 'protocol'"
) )
ret.append( ret.append(
FirewallRule( FirewallRule(

View file

@ -135,7 +135,7 @@ def _authenticate():
) )
connect_data = {"username": username, "password": passwd} connect_data = {"username": username, "password": passwd}
full_url = "https://{}:{}/api2/json/access/ticket".format(url, port) full_url = f"https://{url}:{port}/api2/json/access/ticket"
response = requests.post( response = requests.post(
full_url, verify=verify_ssl, data=connect_data, timeout=120 full_url, verify=verify_ssl, data=connect_data, timeout=120
@ -155,7 +155,7 @@ def query(conn_type, option, post_data=None):
log.debug("Not authenticated yet, doing that now..") log.debug("Not authenticated yet, doing that now..")
_authenticate() _authenticate()
full_url = "https://{}:{}/api2/json/{}".format(url, port, option) full_url = f"https://{url}:{port}/api2/json/{option}"
log.debug("%s: %s (%s)", conn_type, full_url, post_data) log.debug("%s: %s (%s)", conn_type, full_url, post_data)
@ -450,9 +450,7 @@ def avail_images(call=None, location="local"):
ret = {} ret = {}
for host_name, host_details in avail_locations().items(): for host_name, host_details in avail_locations().items():
for item in query( for item in query("get", f"nodes/{host_name}/storage/{location}/content"):
"get", "nodes/{}/storage/{}/content".format(host_name, location)
):
ret[item["volid"]] = item ret[item["volid"]] = item
return ret return ret
@ -559,7 +557,7 @@ def _dictionary_to_stringlist(input_dict):
setting1=value1,setting2=value2 setting1=value1,setting2=value2
""" """
return ",".join("{}={}".format(k, input_dict[k]) for k in sorted(input_dict.keys())) return ",".join(f"{k}={input_dict[k]}" for k in sorted(input_dict.keys()))
def _reconfigure_clone(vm_, vmid): def _reconfigure_clone(vm_, vmid):
@ -715,7 +713,7 @@ def create(vm_):
# wait until the vm has been created so we can start it # wait until the vm has been created so we can start it
if not wait_for_created(data["upid"], timeout=300): if not wait_for_created(data["upid"], timeout=300):
return {"Error": "Unable to create {}, command timed out".format(name)} return {"Error": f"Unable to create {name}, command timed out"}
if vm_.get("clone") is True: if vm_.get("clone") is True:
_reconfigure_clone(vm_, vmid) _reconfigure_clone(vm_, vmid)
@ -728,7 +726,7 @@ def create(vm_):
# Wait until the VM has fully started # Wait until the VM has fully started
log.debug('Waiting for state "running" for vm %s on %s', vmid, host) log.debug('Waiting for state "running" for vm %s on %s', vmid, host)
if not wait_for_state(vmid, "running"): if not wait_for_state(vmid, "running"):
return {"Error": "Unable to start {}, command timed out".format(name)} return {"Error": f"Unable to start {name}, command timed out"}
if agent_get_ip is True: if agent_get_ip is True:
try: try:
@ -868,7 +866,7 @@ def _import_api():
Load this json content into global variable "api" Load this json content into global variable "api"
""" """
global api global api
full_url = "https://{}:{}/pve-docs/api-viewer/apidoc.js".format(url, port) full_url = f"https://{url}:{port}/pve-docs/api-viewer/apidoc.js"
returned_data = requests.get(full_url, verify=verify_ssl, timeout=120) returned_data = requests.get(full_url, verify=verify_ssl, timeout=120)
re_filter = re.compile(" (?:pveapi|apiSchema) = (.*)^;", re.DOTALL | re.MULTILINE) re_filter = re.compile(" (?:pveapi|apiSchema) = (.*)^;", re.DOTALL | re.MULTILINE)
@ -1102,12 +1100,12 @@ def get_vmconfig(vmid, node=None, node_type="openvz"):
if node is None: if node is None:
# We need to figure out which node this VM is on. # We need to figure out which node this VM is on.
for host_name, host_details in avail_locations().items(): for host_name, host_details in avail_locations().items():
for item in query("get", "nodes/{}/{}".format(host_name, node_type)): for item in query("get", f"nodes/{host_name}/{node_type}"):
if item["vmid"] == vmid: if item["vmid"] == vmid:
node = host_name node = host_name
# If we reached this point, we have all the information we need # If we reached this point, we have all the information we need
data = query("get", "nodes/{}/{}/{}/config".format(node, node_type, vmid)) data = query("get", f"nodes/{node}/{node_type}/{vmid}/config")
return data return data
@ -1179,7 +1177,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -1193,7 +1191,7 @@ def destroy(name, call=None):
# wait until stopped # wait until stopped
if not wait_for_state(vmobj["vmid"], "stopped"): if not wait_for_state(vmobj["vmid"], "stopped"):
return {"Error": "Unable to stop {}, command timed out".format(name)} return {"Error": f"Unable to stop {name}, command timed out"}
# required to wait a bit here, otherwise the VM is sometimes # required to wait a bit here, otherwise the VM is sometimes
# still locked and destroy fails. # still locked and destroy fails.
@ -1203,7 +1201,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -1213,7 +1211,7 @@ def destroy(name, call=None):
name, _get_active_provider_name().split(":")[0], __opts__ name, _get_active_provider_name().split(":")[0], __opts__
) )
return {"Destroyed": "{} was destroyed.".format(name)} return {"Destroyed": f"{name} was destroyed."}
def set_vm_status(status, name=None, vmid=None): def set_vm_status(status, name=None, vmid=None):
@ -1302,7 +1300,7 @@ def start(name, vmid=None, call=None):
# xxx: TBD: Check here whether the status was actually changed to 'started' # xxx: TBD: Check here whether the status was actually changed to 'started'
return {"Started": "{} was started.".format(name)} return {"Started": f"{name} was started."}
def stop(name, vmid=None, call=None): def stop(name, vmid=None, call=None):
@ -1324,7 +1322,7 @@ def stop(name, vmid=None, call=None):
# xxx: TBD: Check here whether the status was actually changed to 'stopped' # xxx: TBD: Check here whether the status was actually changed to 'stopped'
return {"Stopped": "{} was stopped.".format(name)} return {"Stopped": f"{name} was stopped."}
def shutdown(name=None, vmid=None, call=None): def shutdown(name=None, vmid=None, call=None):
@ -1348,4 +1346,4 @@ def shutdown(name=None, vmid=None, call=None):
# xxx: TBD: Check here whether the status was actually changed to 'stopped' # xxx: TBD: Check here whether the status was actually changed to 'stopped'
return {"Shutdown": "{} was shutdown.".format(name)} return {"Shutdown": f"{name} was shutdown."}

View file

@ -108,7 +108,7 @@ def _compute_signature(parameters, access_key_secret, method, path):
""" """
parameters["signature_method"] = "HmacSHA256" parameters["signature_method"] = "HmacSHA256"
string_to_sign = "{}\n{}\n".format(method.upper(), path) string_to_sign = f"{method.upper()}\n{path}\n"
keys = sorted(parameters.keys()) keys = sorted(parameters.keys())
pairs = [] pairs = []
@ -166,9 +166,9 @@ def query(params=None):
for sk, sv in value[i - 1].items(): for sk, sv in value[i - 1].items():
if isinstance(sv, dict) or isinstance(sv, list): if isinstance(sv, dict) or isinstance(sv, list):
sv = salt.utils.json.dumps(sv, separators=(",", ":")) sv = salt.utils.json.dumps(sv, separators=(",", ":"))
real_parameters["{}.{}.{}".format(key, i, sk)] = sv real_parameters[f"{key}.{i}.{sk}"] = sv
else: else:
real_parameters["{}.{}".format(key, i)] = value[i - 1] real_parameters[f"{key}.{i}"] = value[i - 1]
else: else:
real_parameters[key] = value real_parameters[key] = value
@ -252,7 +252,7 @@ def _get_location(vm_=None):
return vm_location return vm_location
raise SaltCloudNotFound( raise SaltCloudNotFound(
"The specified location, '{}', could not be found.".format(vm_location) f"The specified location, '{vm_location}', could not be found."
) )
@ -320,9 +320,7 @@ def _get_image(vm_):
if vm_image in images: if vm_image in images:
return vm_image return vm_image
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified image, '{vm_image}', could not be found.")
"The specified image, '{}', could not be found.".format(vm_image)
)
def show_image(kwargs, call=None): def show_image(kwargs, call=None):
@ -442,9 +440,7 @@ def _get_size(vm_):
if vm_size in sizes: if vm_size in sizes:
return vm_size return vm_size
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified size, '{vm_size}', could not be found.")
"The specified size, '{}', could not be found.".format(vm_size)
)
def _show_normalized_node(full_node): def _show_normalized_node(full_node):
@ -626,7 +622,7 @@ def show_instance(instance_id, call=None, kwargs=None):
if items["total_count"] == 0: if items["total_count"] == 0:
raise SaltCloudNotFound( raise SaltCloudNotFound(
"The specified instance, '{}', could not be found.".format(instance_id) f"The specified instance, '{instance_id}', could not be found."
) )
full_node = items["instance_set"][0] full_node = items["instance_set"][0]
@ -878,7 +874,7 @@ def destroy(instance_id, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -894,7 +890,7 @@ def destroy(instance_id, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],

View file

@ -289,7 +289,7 @@ def create(vm_):
if ssh_host: if ssh_host:
log.info("trying to ping %s", ssh_host) log.info("trying to ping %s", ssh_host)
count = "n" if salt.utils.platform.is_windows() else "c" count = "n" if salt.utils.platform.is_windows() else "c"
cmd = "ping -{} 1 {}".format(count, ssh_host) cmd = f"ping -{count} 1 {ssh_host}"
good_ping = local.cmd(wol_host, "cmd.retcode", [cmd]) == 0 good_ping = local.cmd(wol_host, "cmd.retcode", [cmd]) == 0
if good_ping: if good_ping:
log.info("successful ping.") log.info("successful ping.")
@ -464,7 +464,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=opts["sock_dir"], sock_dir=opts["sock_dir"],
transport=opts["transport"], transport=opts["transport"],
@ -510,13 +510,13 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=opts["sock_dir"], sock_dir=opts["sock_dir"],
transport=opts["transport"], transport=opts["transport"],
) )
return {"Destroyed": "{} was destroyed.".format(name)} return {"Destroyed": f"{name} was destroyed."}
def reboot(name, call=None): def reboot(name, call=None):

View file

@ -160,7 +160,7 @@ def get_image(server_):
if server_image in (images[image]["name"], images[image]["id"]): if server_image in (images[image]["name"], images[image]["id"]):
return images[image]["id"] return images[image]["id"]
raise SaltCloudNotFound( raise SaltCloudNotFound(
"The specified image, '{}', could not be found.".format(server_image) f"The specified image, '{server_image}', could not be found."
) )
@ -225,7 +225,7 @@ def create(server_):
if key_filename is not None and not os.path.isfile(key_filename): if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError( raise SaltCloudConfigError(
"The defined key_filename '{}' does not exist".format(key_filename) f"The defined key_filename '{key_filename}' does not exist"
) )
ssh_password = config.get_cloud_config_value("ssh_password", server_, __opts__) ssh_password = config.get_cloud_config_value("ssh_password", server_, __opts__)
@ -346,10 +346,10 @@ def query(
) )
) )
path = "{}/{}/".format(base_path, method) path = f"{base_path}/{method}/"
if server_id: if server_id:
path += "{}/".format(server_id) path += f"{server_id}/"
if command: if command:
path += command path += command
@ -439,7 +439,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -457,7 +457,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],

View file

@ -269,7 +269,7 @@ def create(vm_):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"starting create", "starting create",
"salt/cloud/{}/creating".format(name), f"salt/cloud/{name}/creating",
args=__utils__["cloud.filter_event"]( args=__utils__["cloud.filter_event"](
"creating", vm_, ["name", "profile", "provider", "driver"] "creating", vm_, ["name", "profile", "provider", "driver"]
), ),
@ -395,7 +395,7 @@ def create(vm_):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"requesting instance", "requesting instance",
"salt/cloud/{}/requesting".format(name), f"salt/cloud/{name}/requesting",
args={ args={
"kwargs": __utils__["cloud.filter_event"]( "kwargs": __utils__["cloud.filter_event"](
"requesting", kwargs, list(kwargs) "requesting", kwargs, list(kwargs)
@ -513,7 +513,7 @@ def create(vm_):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"created instance", "created instance",
"salt/cloud/{}/created".format(name), f"salt/cloud/{name}/created",
args=__utils__["cloud.filter_event"]( args=__utils__["cloud.filter_event"](
"created", vm_, ["name", "profile", "provider", "driver"] "created", vm_, ["name", "profile", "provider", "driver"]
), ),
@ -620,7 +620,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -633,7 +633,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],

View file

@ -241,7 +241,7 @@ def create(vm_):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"starting create", "starting create",
"salt/cloud/{}/creating".format(name), f"salt/cloud/{name}/creating",
args=__utils__["cloud.filter_event"]( args=__utils__["cloud.filter_event"](
"creating", vm_, ["name", "profile", "provider", "driver"] "creating", vm_, ["name", "profile", "provider", "driver"]
), ),
@ -311,7 +311,7 @@ def create(vm_):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"requesting instance", "requesting instance",
"salt/cloud/{}/requesting".format(name), f"salt/cloud/{name}/requesting",
args={ args={
"kwargs": __utils__["cloud.filter_event"]( "kwargs": __utils__["cloud.filter_event"](
"requesting", kwargs, list(kwargs) "requesting", kwargs, list(kwargs)
@ -406,7 +406,7 @@ def create(vm_):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"created instance", "created instance",
"salt/cloud/{}/created".format(name), f"salt/cloud/{name}/created",
args=__utils__["cloud.filter_event"]( args=__utils__["cloud.filter_event"](
"created", vm_, ["name", "profile", "provider", "driver"] "created", vm_, ["name", "profile", "provider", "driver"]
), ),
@ -514,7 +514,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -535,7 +535,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],

View file

@ -123,7 +123,7 @@ def get_provider_client(name=None):
elif name == "vpc_client": elif name == "vpc_client":
client = vpc_client.VpcClient(crd, region, cpf) client = vpc_client.VpcClient(crd, region, cpf)
else: else:
raise SaltCloudSystemExit("Client name {} is not supported".format(name)) raise SaltCloudSystemExit(f"Client name {name} is not supported")
return client return client
@ -206,11 +206,11 @@ def avail_sizes(call=None):
ret[typeConfig.InstanceType] = { ret[typeConfig.InstanceType] = {
"Zone": typeConfig.Zone, "Zone": typeConfig.Zone,
"InstanceFamily": typeConfig.InstanceFamily, "InstanceFamily": typeConfig.InstanceFamily,
"Memory": "{}GB".format(typeConfig.Memory), "Memory": f"{typeConfig.Memory}GB",
"CPU": "{}-Core".format(typeConfig.CPU), "CPU": f"{typeConfig.CPU}-Core",
} }
if typeConfig.GPU: if typeConfig.GPU:
ret[typeConfig.InstanceType]["GPU"] = "{}-Core".format(typeConfig.GPU) ret[typeConfig.InstanceType]["GPU"] = f"{typeConfig.GPU}-Core"
return ret return ret
@ -714,7 +714,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -730,7 +730,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -782,9 +782,7 @@ def show_image(kwargs, call=None):
resp = client.DescribeImages(req) resp = client.DescribeImages(req)
if not resp.ImageSet: if not resp.ImageSet:
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified image '{image}' could not be found.")
"The specified image '{}' could not be found.".format(image)
)
ret = {} ret = {}
for image in resp.ImageSet: for image in resp.ImageSet:
@ -794,7 +792,7 @@ def show_image(kwargs, call=None):
"ImageSource": image.ImageSource, "ImageSource": image.ImageSource,
"Platform": image.Platform, "Platform": image.Platform,
"Architecture": image.Architecture, "Architecture": image.Architecture,
"ImageSize": "{}GB".format(image.ImageSize), "ImageSize": f"{image.ImageSize}GB",
"ImageState": image.ImageState, "ImageState": image.ImageState,
} }
@ -893,7 +891,7 @@ def _get_node(name):
) )
time.sleep(0.5) time.sleep(0.5)
raise SaltCloudNotFound("Failed to get instance info {}".format(name)) raise SaltCloudNotFound(f"Failed to get instance info {name}")
def _get_nodes(): def _get_nodes():
@ -940,7 +938,7 @@ def _get_images(image_type):
"ImageSource": image.ImageSource, "ImageSource": image.ImageSource,
"Platform": image.Platform, "Platform": image.Platform,
"Architecture": image.Architecture, "Architecture": image.Architecture,
"ImageSize": "{}GB".format(image.ImageSize), "ImageSize": f"{image.ImageSize}GB",
} }
return ret return ret
@ -958,9 +956,7 @@ def __get_image(vm_):
if vm_image in images: if vm_image in images:
return vm_image return vm_image
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified image '{vm_image}' could not be found.")
"The specified image '{}' could not be found.".format(vm_image)
)
def __get_size(vm_): def __get_size(vm_):
@ -975,9 +971,7 @@ def __get_size(vm_):
if vm_size in sizes: if vm_size in sizes:
return vm_size return vm_size
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified size '{vm_size}' could not be found.")
"The specified size '{}' could not be found.".format(vm_size)
)
def __get_securitygroups(vm_): def __get_securitygroups(vm_):

View file

@ -256,7 +256,7 @@ def create(vm_):
vm_.setdefault("ssh_port", ret["ssh_port"]) vm_.setdefault("ssh_port", ret["ssh_port"])
except (KeyError, TypeError): except (KeyError, TypeError):
raise SaltInvocationError( raise SaltInvocationError(
"Insufficient SSH addressing information for {}".format(name) f"Insufficient SSH addressing information for {name}"
) )
log.info( log.info(
@ -300,7 +300,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=opts["sock_dir"], sock_dir=opts["sock_dir"],
transport=opts["transport"], transport=opts["transport"],
@ -317,7 +317,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=opts["sock_dir"], sock_dir=opts["sock_dir"],
transport=opts["transport"], transport=opts["transport"],
@ -328,11 +328,11 @@ def destroy(name, call=None):
name, _get_active_provider_name().split(":")[0], opts name, _get_active_provider_name().split(":")[0], opts
) )
return {"Destroyed": "{} was destroyed.".format(name)} return {"Destroyed": f"{name} was destroyed."}
else: else:
return {"Error": "Error destroying {}".format(name)} return {"Error": f"Error destroying {name}"}
else: else:
return {"Error": "No response from {}. Cannot destroy.".format(name)} return {"Error": f"No response from {name}. Cannot destroy."}
# noinspection PyTypeChecker # noinspection PyTypeChecker

View file

@ -368,12 +368,12 @@ def destroy(name, call=None):
""" """
log.info("Attempting to delete instance %s", name) log.info("Attempting to delete instance %s", name)
if not vb_machine_exists(name): if not vb_machine_exists(name):
return "{} doesn't exist and can't be deleted".format(name) return f"{name} doesn't exist and can't be deleted"
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -384,7 +384,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],

View file

@ -306,7 +306,7 @@ def _add_new_hard_disk_helper(
disk_spec.device.key = random_key disk_spec.device.key = random_key
disk_spec.device.deviceInfo = vim.Description() disk_spec.device.deviceInfo = vim.Description()
disk_spec.device.deviceInfo.label = disk_label disk_spec.device.deviceInfo.label = disk_label
disk_spec.device.deviceInfo.summary = "{} GB".format(size_gb) disk_spec.device.deviceInfo.summary = f"{size_gb} GB"
disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo() disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
disk_spec.device.backing.thinProvisioned = thin_provision disk_spec.device.backing.thinProvisioned = thin_provision
@ -662,7 +662,7 @@ def _set_cd_or_dvd_backing_type(drive, device_type, mode, iso_path):
if datastore_ref: if datastore_ref:
drive.backing.datastore = datastore_ref drive.backing.datastore = datastore_ref
drive.deviceInfo.summary = "ISO {}".format(iso_path) drive.deviceInfo.summary = f"ISO {iso_path}"
elif device_type == "client_device": elif device_type == "client_device":
if mode == "passthrough": if mode == "passthrough":
@ -917,7 +917,7 @@ def _manage_devices(devices, vm=None, container_ref=None, new_vm_name=None):
else None else None
) )
if bus_sharing and bus_sharing in ["virtual", "physical", "no"]: if bus_sharing and bus_sharing in ["virtual", "physical", "no"]:
bus_sharing = "{}Sharing".format(bus_sharing) bus_sharing = f"{bus_sharing}Sharing"
if bus_sharing != device.sharedBus: if bus_sharing != device.sharedBus:
# Only edit the SCSI controller if bus_sharing is different # Only edit the SCSI controller if bus_sharing is different
scsi_spec = _edit_existing_scsi_controller( scsi_spec = _edit_existing_scsi_controller(
@ -1327,7 +1327,7 @@ def _format_instance_info_select(vm, selection):
if "size" in selection: if "size" in selection:
cpu = defaultto(vm, "config.hardware.numCPU") cpu = defaultto(vm, "config.hardware.numCPU")
ram = "{} MB".format(defaultto(vm, "config.hardware.memoryMB")) ram = "{} MB".format(defaultto(vm, "config.hardware.memoryMB"))
vm_select_info["size"] = "cpu: {}\nram: {}".format(cpu, ram) vm_select_info["size"] = f"cpu: {cpu}\nram: {ram}"
vm_select_info["size_dict"] = { vm_select_info["size_dict"] = {
"cpu": cpu, "cpu": cpu,
"memory": ram, "memory": ram,
@ -1610,7 +1610,7 @@ def _format_instance_info(vm):
if "config.guestFullName" in vm if "config.guestFullName" in vm
else "N/A" else "N/A"
), ),
"size": "cpu: {}\nram: {}".format(cpu, ram), "size": f"cpu: {cpu}\nram: {ram}",
"size_dict": {"cpu": cpu, "memory": ram}, "size_dict": {"cpu": cpu, "memory": ram},
"state": ( "state": (
str(vm["summary.runtime.powerState"]) str(vm["summary.runtime.powerState"])
@ -1642,7 +1642,7 @@ def _format_instance_info(vm):
def _get_snapshots(snapshot_list, current_snapshot=None, parent_snapshot_path=""): def _get_snapshots(snapshot_list, current_snapshot=None, parent_snapshot_path=""):
snapshots = {} snapshots = {}
for snapshot in snapshot_list: for snapshot in snapshot_list:
snapshot_path = "{}/{}".format(parent_snapshot_path, snapshot.name) snapshot_path = f"{parent_snapshot_path}/{snapshot.name}"
snapshots[snapshot_path] = { snapshots[snapshot_path] = {
"name": snapshot.name, "name": snapshot.name,
"description": snapshot.description, "description": snapshot.description,
@ -1777,7 +1777,7 @@ def test_vcenter_connection(kwargs=None, call=None):
# Get the service instance object # Get the service instance object
_get_si() _get_si()
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
return "failed to connect: {}".format(exc) return f"failed to connect: {exc}"
return "connection successful" return "connection successful"
@ -2027,7 +2027,7 @@ def list_nodes(kwargs=None, call=None):
if "config.guestFullName" in vm if "config.guestFullName" in vm
else "N/A" else "N/A"
), ),
"size": "cpu: {}\nram: {}".format(cpu, ram), "size": f"cpu: {cpu}\nram: {ram}",
"size_dict": {"cpu": cpu, "memory": ram}, "size_dict": {"cpu": cpu, "memory": ram},
"state": ( "state": (
str(vm["summary.runtime.powerState"]) str(vm["summary.runtime.powerState"])
@ -2684,7 +2684,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -2730,7 +2730,7 @@ def destroy(name, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -3135,7 +3135,7 @@ def create(vm_):
) )
if not datastore_ref: if not datastore_ref:
raise SaltCloudSystemExit( raise SaltCloudSystemExit(
"Specified datastore: '{}' does not exist".format(datastore) f"Specified datastore: '{datastore}' does not exist"
) )
if host: if host:
@ -3151,7 +3151,7 @@ def create(vm_):
# If the hardware version is specified and if it is different from the current # If the hardware version is specified and if it is different from the current
# hardware version, then schedule a hardware version upgrade # hardware version, then schedule a hardware version upgrade
if hardware_version and object_ref is not None: if hardware_version and object_ref is not None:
hardware_version = "vmx-{:02}".format(hardware_version) hardware_version = f"vmx-{hardware_version:02}"
if hardware_version != object_ref.config.version: if hardware_version != object_ref.config.version:
log.debug( log.debug(
"Scheduling hardware version upgrade from %s to %s", "Scheduling hardware version upgrade from %s to %s",
@ -3181,7 +3181,7 @@ def create(vm_):
elif memory_unit.lower() == "gb": elif memory_unit.lower() == "gb":
memory_mb = int(float(memory_num) * 1024.0) memory_mb = int(float(memory_num) * 1024.0)
else: else:
err_msg = "Invalid memory type specified: '{}'".format(memory_unit) err_msg = f"Invalid memory type specified: '{memory_unit}'"
log.error(err_msg) log.error(err_msg)
return {"Error": err_msg} return {"Error": err_msg}
except (TypeError, ValueError): except (TypeError, ValueError):
@ -3629,7 +3629,7 @@ def rescan_hba(kwargs=None, call=None):
if hba: if hba:
log.info("Rescanning HBA %s on host %s", hba, host_name) log.info("Rescanning HBA %s on host %s", hba, host_name)
host_ref.configManager.storageSystem.RescanHba(hba) host_ref.configManager.storageSystem.RescanHba(hba)
ret = "rescanned HBA {}".format(hba) ret = f"rescanned HBA {hba}"
else: else:
log.info("Rescanning all HBAs on host %s", host_name) log.info("Rescanning all HBAs on host %s", host_name)
host_ref.configManager.storageSystem.RescanAllHba() host_ref.configManager.storageSystem.RescanAllHba()
@ -3907,7 +3907,7 @@ def list_hbas(kwargs=None, call=None):
if hba_type and hba_type not in ["parallel", "block", "iscsi", "fibre"]: if hba_type and hba_type not in ["parallel", "block", "iscsi", "fibre"]:
raise SaltCloudSystemExit( raise SaltCloudSystemExit(
"Specified hba type {} currently not supported.".format(hba_type) f"Specified hba type {hba_type} currently not supported."
) )
host_list = salt.utils.vmware.get_mors_with_properties( host_list = salt.utils.vmware.get_mors_with_properties(
@ -4280,10 +4280,10 @@ def revert_to_snapshot(name, kwargs=None, call=None):
task = vm_ref.RevertToCurrentSnapshot(suppressPowerOn=suppress_power_on) task = vm_ref.RevertToCurrentSnapshot(suppressPowerOn=suppress_power_on)
else: else:
log.debug("Reverting VM %s to snapshot %s", name, snapshot_name) log.debug("Reverting VM %s to snapshot %s", name, snapshot_name)
msg = "reverted to snapshot {}".format(snapshot_name) msg = f"reverted to snapshot {snapshot_name}"
snapshot_ref = _get_snapshot_ref_by_name(vm_ref, snapshot_name) snapshot_ref = _get_snapshot_ref_by_name(vm_ref, snapshot_name)
if snapshot_ref is None: if snapshot_ref is None:
return "specified snapshot '{}' does not exist".format(snapshot_name) return f"specified snapshot '{snapshot_name}' does not exist"
task = snapshot_ref.snapshot.Revert(suppressPowerOn=suppress_power_on) task = snapshot_ref.snapshot.Revert(suppressPowerOn=suppress_power_on)
salt.utils.vmware.wait_for_task(task, name, "revert to snapshot", 5, "info") salt.utils.vmware.wait_for_task(task, name, "revert to snapshot", 5, "info")
@ -4421,7 +4421,7 @@ def convert_to_template(name, kwargs=None, call=None):
vm_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, name) vm_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, name)
if vm_ref.config.template: if vm_ref.config.template:
raise SaltCloudSystemExit("{} already a template".format(name)) raise SaltCloudSystemExit(f"{name} already a template")
try: try:
vm_ref.MarkAsTemplate() vm_ref.MarkAsTemplate()
@ -4435,7 +4435,7 @@ def convert_to_template(name, kwargs=None, call=None):
) )
return "failed to convert to teamplate" return "failed to convert to teamplate"
return "{} converted to template".format(name) return f"{name} converted to template"
def add_host(kwargs=None, call=None): def add_host(kwargs=None, call=None):
@ -4557,7 +4557,7 @@ def add_host(kwargs=None, call=None):
("echo", "-n"), stdout=subprocess.PIPE, stderr=subprocess.PIPE ("echo", "-n"), stdout=subprocess.PIPE, stderr=subprocess.PIPE
) )
p2 = subprocess.Popen( p2 = subprocess.Popen(
("openssl", "s_client", "-connect", "{}:443".format(host_name)), ("openssl", "s_client", "-connect", f"{host_name}:443"),
stdin=p1.stdout, stdin=p1.stdout,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stderr=subprocess.PIPE,
@ -4587,12 +4587,12 @@ def add_host(kwargs=None, call=None):
try: try:
if cluster_name: if cluster_name:
task = cluster_ref.AddHost(spec=spec, asConnected=True) task = cluster_ref.AddHost(spec=spec, asConnected=True)
ret = "added host system to cluster {}".format(cluster_name) ret = f"added host system to cluster {cluster_name}"
if datacenter_name: if datacenter_name:
task = datacenter_ref.hostFolder.AddStandaloneHost( task = datacenter_ref.hostFolder.AddStandaloneHost(
spec=spec, addConnected=True spec=spec, addConnected=True
) )
ret = "added host system to datacenter {}".format(datacenter_name) ret = f"added host system to datacenter {datacenter_name}"
salt.utils.vmware.wait_for_task(task, host_name, "add host system", 5, "info") salt.utils.vmware.wait_for_task(task, host_name, "add host system", 5, "info")
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
if isinstance(exc, vim.fault.SSLVerifyFault): if isinstance(exc, vim.fault.SSLVerifyFault):

View file

@ -124,7 +124,7 @@ def _get_session():
Get a connection to the XenServer host Get a connection to the XenServer host
""" """
api_version = "1.0" api_version = "1.0"
originator = "salt_cloud_{}_driver".format(__virtualname__) originator = f"salt_cloud_{__virtualname__}_driver"
url = config.get_cloud_config_value( url = config.get_cloud_config_value(
"url", get_configured_provider(), __opts__, search_global=False "url", get_configured_provider(), __opts__, search_global=False
) )
@ -550,7 +550,7 @@ def create(vm_):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"starting create", "starting create",
"salt/cloud/{}/creating".format(name), f"salt/cloud/{name}/creating",
args={"name": name, "profile": vm_["profile"], "provider": vm_["driver"]}, args={"name": name, "profile": vm_["profile"], "provider": vm_["driver"]},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -580,7 +580,7 @@ def create(vm_):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"requesting instance", "requesting instance",
"salt/cloud/{}/requesting".format(name), f"salt/cloud/{name}/requesting",
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
) )
@ -623,7 +623,7 @@ def create(vm_):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"created instance", "created instance",
"salt/cloud/{}/created".format(name), f"salt/cloud/{name}/created",
args={"name": name, "profile": vm_["profile"], "provider": vm_["driver"]}, args={"name": name, "profile": vm_["profile"], "provider": vm_["driver"]},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -923,7 +923,7 @@ def reboot(name, call=None, session=None):
_run_async_task(task, session) _run_async_task(task, session)
return show_instance(name) return show_instance(name)
else: else:
return "{} is not running to be rebooted".format(name) return f"{name} is not running to be rebooted"
def _get_vm(name=None, session=None): def _get_vm(name=None, session=None):
@ -984,7 +984,7 @@ def destroy(name=None, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -1009,7 +1009,7 @@ def destroy(name=None, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -1134,7 +1134,7 @@ def vif_list(name, call=None, kwargs=None):
x = 0 x = 0
for vif in vifs: for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif) vif_record = session.xenapi.VIF.get_record(vif)
data["vif-{}".format(x)] = vif_record data[f"vif-{x}"] = vif_record
x += 1 x += 1
ret[name] = data ret[name] = data
return ret return ret
@ -1168,7 +1168,7 @@ def vbd_list(name=None, call=None):
x = 0 x = 0
for vbd in vbds: for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd) vbd_record = session.xenapi.VBD.get_record(vbd)
data["vbd-{}".format(x)] = vbd_record data[f"vbd-{x}"] = vbd_record
x += 1 x += 1
ret = data ret = data
return ret return ret
@ -1219,7 +1219,7 @@ def destroy_vm_vdis(name=None, session=None, call=None):
vdi_record = session.xenapi.VDI.get_record(vbd_record["VDI"]) vdi_record = session.xenapi.VDI.get_record(vbd_record["VDI"])
if "iso" not in vdi_record["name_label"]: if "iso" not in vdi_record["name_label"]:
session.xenapi.VDI.destroy(vbd_record["VDI"]) session.xenapi.VDI.destroy(vbd_record["VDI"])
ret["vdi-{}".format(x)] = vdi_record["name_label"] ret[f"vdi-{x}"] = vdi_record["name_label"]
x += 1 x += 1
return ret return ret

View file

@ -61,7 +61,7 @@ def check_libcloud_version(reqver=LIBCLOUD_MINIMAL_VERSION, why=None):
) )
) )
if why: if why:
errormsg += " for {}".format(why) errormsg += f" for {why}"
errormsg += ". Please upgrade." errormsg += ". Please upgrade."
raise ImportError(errormsg) raise ImportError(errormsg)
@ -186,7 +186,7 @@ def get_location(conn, vm_):
return img return img
raise SaltCloudNotFound( raise SaltCloudNotFound(
"The specified location, '{}', could not be found.".format(vm_location) f"The specified location, '{vm_location}', could not be found."
) )
@ -204,9 +204,7 @@ def get_image(conn, vm_):
if vm_image and vm_image in (img_id, img_name): if vm_image and vm_image in (img_id, img_name):
return img return img
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified image, '{vm_image}', could not be found.")
"The specified image, '{}', could not be found.".format(vm_image)
)
def get_size(conn, vm_): def get_size(conn, vm_):
@ -224,9 +222,7 @@ def get_size(conn, vm_):
str(size.name), str(size.name),
): ):
return size return size
raise SaltCloudNotFound( raise SaltCloudNotFound(f"The specified size, '{vm_size}', could not be found.")
"The specified size, '{}', could not be found.".format(vm_size)
)
def script(vm_): def script(vm_):
@ -257,7 +253,7 @@ def destroy(name, conn=None, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroying instance", "destroying instance",
"salt/cloud/{}/destroying".format(name), f"salt/cloud/{name}/destroying",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -296,7 +292,7 @@ def destroy(name, conn=None, call=None):
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"destroyed instance", "destroyed instance",
"salt/cloud/{}/destroyed".format(name), f"salt/cloud/{name}/destroyed",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],
@ -338,8 +334,8 @@ def reboot(name, conn=None):
# Fire reboot action # Fire reboot action
__utils__["cloud.fire_event"]( __utils__["cloud.fire_event"](
"event", "event",
"{} has been rebooted".format(name), f"{name} has been rebooted",
"salt/cloud/{}/rebooting".format(name), f"salt/cloud/{name}/rebooting",
args={"name": name}, args={"name": name},
sock_dir=__opts__["sock_dir"], sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"], transport=__opts__["transport"],

View file

@ -2026,7 +2026,7 @@ def _read_conf_file(path):
try: try:
conf_opts = salt.utils.yaml.safe_load(conf_file) or {} conf_opts = salt.utils.yaml.safe_load(conf_file) or {}
except salt.utils.yaml.YAMLError as err: except salt.utils.yaml.YAMLError as err:
message = "Error parsing configuration file: {} - {}".format(path, err) message = f"Error parsing configuration file: {path} - {err}"
log.error(message) log.error(message)
if path.endswith("_schedule.conf"): if path.endswith("_schedule.conf"):
# Create empty dictionary of config options # Create empty dictionary of config options
@ -2123,7 +2123,7 @@ def load_config(path, env_var, default_path=None, exit_on_config_errors=True):
# If the configuration file is missing, attempt to copy the template, # If the configuration file is missing, attempt to copy the template,
# after removing the first header line. # after removing the first header line.
if not os.path.isfile(path): if not os.path.isfile(path):
template = "{}.template".format(path) template = f"{path}.template"
if os.path.isfile(template): if os.path.isfile(template):
log.debug("Writing %s based on %s", path, template) log.debug("Writing %s based on %s", path, template)
with salt.utils.files.fopen(path, "w") as out: with salt.utils.files.fopen(path, "w") as out:
@ -2800,7 +2800,7 @@ def apply_cloud_config(overrides, defaults=None):
if alias not in config["providers"]: if alias not in config["providers"]:
config["providers"][alias] = {} config["providers"][alias] = {}
detail["provider"] = "{}:{}".format(alias, driver) detail["provider"] = f"{alias}:{driver}"
config["providers"][alias][driver] = detail config["providers"][alias][driver] = detail
elif isinstance(details, dict): elif isinstance(details, dict):
if "driver" not in details: if "driver" not in details:
@ -2817,7 +2817,7 @@ def apply_cloud_config(overrides, defaults=None):
if alias not in config["providers"]: if alias not in config["providers"]:
config["providers"][alias] = {} config["providers"][alias] = {}
details["provider"] = "{}:{}".format(alias, driver) details["provider"] = f"{alias}:{driver}"
config["providers"][alias][driver] = details config["providers"][alias][driver] = details
# Migrate old configuration # Migrate old configuration
@ -3088,7 +3088,7 @@ def apply_cloud_providers_config(overrides, defaults=None):
for entry in val: for entry in val:
if "driver" not in entry: if "driver" not in entry:
entry["driver"] = "-only-extendable-{}".format(ext_count) entry["driver"] = f"-only-extendable-{ext_count}"
ext_count += 1 ext_count += 1
if key not in providers: if key not in providers:
@ -3131,7 +3131,7 @@ def apply_cloud_providers_config(overrides, defaults=None):
details["driver"], provider_alias, alias, provider details["driver"], provider_alias, alias, provider
) )
) )
details["extends"] = "{}:{}".format(alias, provider) details["extends"] = f"{alias}:{provider}"
# change provider details '-only-extendable-' to extended # change provider details '-only-extendable-' to extended
# provider name # provider name
details["driver"] = provider details["driver"] = provider
@ -3152,10 +3152,10 @@ def apply_cloud_providers_config(overrides, defaults=None):
) )
else: else:
if driver in providers.get(extends): if driver in providers.get(extends):
details["extends"] = "{}:{}".format(extends, driver) details["extends"] = f"{extends}:{driver}"
elif "-only-extendable-" in providers.get(extends): elif "-only-extendable-" in providers.get(extends):
details["extends"] = "{}:{}".format( details["extends"] = "{}:{}".format(
extends, "-only-extendable-{}".format(ext_count) extends, f"-only-extendable-{ext_count}"
) )
else: else:
# We're still not aware of what we're trying to extend # We're still not aware of what we're trying to extend
@ -3869,7 +3869,7 @@ def _update_discovery_config(opts):
for key in opts["discovery"]: for key in opts["discovery"]:
if key not in discovery_config: if key not in discovery_config:
raise salt.exceptions.SaltConfigurationError( raise salt.exceptions.SaltConfigurationError(
"Unknown discovery option: {}".format(key) f"Unknown discovery option: {key}"
) )
if opts.get("__role") != "minion": if opts.get("__role") != "minion":
for key in ["attempts", "pause", "match"]: for key in ["attempts", "pause", "match"]:

View file

@ -25,7 +25,7 @@ class DefaultIncludeConfig(StringItem):
description = __doc__ description = __doc__
def __init__(self, default=None, pattern=None, **kwargs): def __init__(self, default=None, pattern=None, **kwargs):
default = "{}/*.conf".format(self.__confd_directory__) default = f"{self.__confd_directory__}/*.conf"
pattern = r"(?:.*)/\*\.conf" pattern = r"(?:.*)/\*\.conf"
super().__init__(default=default, pattern=pattern, **kwargs) super().__init__(default=default, pattern=pattern, **kwargs)

View file

@ -128,8 +128,8 @@ def gen_keys(keydir, keyname, keysize, user=None, passphrase=None):
:return: Path on the filesystem to the RSA private key :return: Path on the filesystem to the RSA private key
""" """
base = os.path.join(keydir, keyname) base = os.path.join(keydir, keyname)
priv = "{}.pem".format(base) priv = f"{base}.pem"
pub = "{}.pub".format(base) pub = f"{base}.pub"
if HAS_M2: if HAS_M2:
gen = RSA.gen_key(keysize, 65537, lambda: None) gen = RSA.gen_key(keysize, 65537, lambda: None)
@ -449,7 +449,7 @@ class MasterKeys(dict):
try: try:
key = get_rsa_key(path, passphrase) key = get_rsa_key(path, passphrase)
except key_error as e: except key_error as e:
message = "Unable to read key: {}; passphrase may be incorrect".format(path) message = f"Unable to read key: {path}; passphrase may be incorrect"
log.error(message) log.error(message)
raise MasterExit(message) raise MasterExit(message)
log.debug("Loaded %s key: %s", name, path) log.debug("Loaded %s key: %s", name, path)

View file

@ -140,7 +140,7 @@ def extract_masters(opts, masters="master", port=None, raise_if_empty=True):
entries = opts.get(masters, []) entries = opts.get(masters, [])
if not entries: if not entries:
emsg = "Invalid or missing opts['{}'].".format(masters) emsg = f"Invalid or missing opts['{masters}']."
log.error(emsg) log.error(emsg)
if raise_if_empty: if raise_if_empty:
raise ValueError(emsg) raise ValueError(emsg)

View file

@ -88,7 +88,7 @@ def clean_fsbackend(opts):
# Clear remote fileserver backend caches so they get recreated # Clear remote fileserver backend caches so they get recreated
for backend in ("git", "hg", "svn"): for backend in ("git", "hg", "svn"):
if backend in opts["fileserver_backend"]: if backend in opts["fileserver_backend"]:
env_cache = os.path.join(opts["cachedir"], "{}fs".format(backend), "envs.p") env_cache = os.path.join(opts["cachedir"], f"{backend}fs", "envs.p")
if os.path.isfile(env_cache): if os.path.isfile(env_cache):
log.debug("Clearing %sfs env cache", backend) log.debug("Clearing %sfs env cache", backend)
try: try:
@ -99,7 +99,7 @@ def clean_fsbackend(opts):
) )
file_lists_dir = os.path.join( file_lists_dir = os.path.join(
opts["cachedir"], "file_lists", "{}fs".format(backend) opts["cachedir"], "file_lists", f"{backend}fs"
) )
try: try:
file_lists_caches = os.listdir(file_lists_dir) file_lists_caches = os.listdir(file_lists_dir)
@ -177,7 +177,7 @@ def mk_key(opts, user):
opts["cachedir"], ".{}_key".format(user.replace("\\", "_")) opts["cachedir"], ".{}_key".format(user.replace("\\", "_"))
) )
else: else:
keyfile = os.path.join(opts["cachedir"], ".{}_key".format(user)) keyfile = os.path.join(opts["cachedir"], f".{user}_key")
if os.path.exists(keyfile): if os.path.exists(keyfile):
log.debug("Removing stale keyfile: %s", keyfile) log.debug("Removing stale keyfile: %s", keyfile)
@ -589,7 +589,7 @@ class RemoteFuncs:
minions = _res["minions"] minions = _res["minions"]
minion_side_acl = {} # Cache minion-side ACL minion_side_acl = {} # Cache minion-side ACL
for minion in minions: for minion in minions:
mine_data = self.cache.fetch("minions/{}".format(minion), "mine") mine_data = self.cache.fetch(f"minions/{minion}", "mine")
if not isinstance(mine_data, dict): if not isinstance(mine_data, dict):
continue continue
for function in functions_allowed: for function in functions_allowed:
@ -616,7 +616,7 @@ class RemoteFuncs:
continue continue
salt.utils.dictupdate.set_dict_key_value( salt.utils.dictupdate.set_dict_key_value(
minion_side_acl, minion_side_acl,
"{}:{}".format(minion, function), f"{minion}:{function}",
get_minion, get_minion,
) )
if salt.utils.mine.minion_side_acl_denied( if salt.utils.mine.minion_side_acl_denied(
@ -1176,7 +1176,7 @@ class LocalFuncs:
fun = load.pop("fun") fun = load.pop("fun")
tag = salt.utils.event.tagify(jid, prefix="wheel") tag = salt.utils.event.tagify(jid, prefix="wheel")
data = { data = {
"fun": "wheel.{}".format(fun), "fun": f"wheel.{fun}",
"jid": jid, "jid": jid,
"tag": tag, "tag": tag,
"user": username, "user": username,
@ -1260,7 +1260,7 @@ class LocalFuncs:
# Setup authorization list variable and error information # Setup authorization list variable and error information
auth_list = auth_check.get("auth_list", []) auth_list = auth_check.get("auth_list", [])
error = auth_check.get("error") error = auth_check.get("error")
err_msg = 'Authentication failure of type "{}" occurred.'.format(auth_type) err_msg = f'Authentication failure of type "{auth_type}" occurred.'
if error: if error:
# Authentication error occurred: do not continue. # Authentication error occurred: do not continue.

View file

@ -45,8 +45,8 @@ class _Constant:
def __repr__(self): def __repr__(self):
if self.value: if self.value:
return "<Constant.{} value={}>".format(self.name, self.value) return f"<Constant.{self.name} value={self.value}>"
return "<Constant.{}>".format(self.name) return f"<Constant.{self.name}>"
# Default delimiter for multi-level traversal in targeting # Default delimiter for multi-level traversal in targeting

View file

@ -48,13 +48,13 @@ def start_engines(opts, proc_mgr, proxy=None):
engine_name = engine engine_name = engine
del engine_opts["engine_module"] del engine_opts["engine_module"]
else: else:
fun = "{}.start".format(engine) fun = f"{engine}.start"
if fun in engines: if fun in engines:
start_func = engines[fun] start_func = engines[fun]
if engine_name: if engine_name:
name = "Engine({}, name={})".format(start_func.__module__, engine_name) name = f"Engine({start_func.__module__}, name={engine_name})"
else: else:
name = "Engine({})".format(start_func.__module__) name = f"Engine({start_func.__module__})"
log.info("Starting %s", name) log.info("Starting %s", name)
proc_mgr.add_process( proc_mgr.add_process(
Engine, Engine,

View file

@ -173,16 +173,16 @@ class IRCClient:
event.source, nick, user, host, event.code, channel, command, line event.source, nick, user, host, event.code, channel, command, line
) )
if (self._allow_nick(nick) or self._allow_host(host)) and hasattr( if (self._allow_nick(nick) or self._allow_host(host)) and hasattr(
self, "_command_{}".format(command) self, f"_command_{command}"
): ):
getattr(self, "_command_{}".format(command))(privevent) getattr(self, f"_command_{command}")(privevent)
def _command_echo(self, event): def _command_echo(self, event):
message = "PRIVMSG {} :{}".format(event.channel, event.line) message = f"PRIVMSG {event.channel} :{event.line}"
self.send_message(message) self.send_message(message)
def _command_ping(self, event): def _command_ping(self, event):
message = "PRIVMSG {} :{}: pong".format(event.channel, event.nick) message = f"PRIVMSG {event.channel} :{event.nick}: pong"
self.send_message(message) self.send_message(message)
def _command_event(self, event): def _command_event(self, event):
@ -210,7 +210,7 @@ class IRCClient:
payload = {"data": []} payload = {"data": []}
fire("salt/engines/ircbot/" + tag, payload) fire("salt/engines/ircbot/" + tag, payload)
message = "PRIVMSG {} :{}: TaDa!".format(event.channel, event.nick) message = f"PRIVMSG {event.channel} :{event.nick}: TaDa!"
self.send_message(message) self.send_message(message)
def _message(self, raw): def _message(self, raw):
@ -219,7 +219,7 @@ class IRCClient:
if event.code == "PING": if event.code == "PING":
salt.ext.tornado.ioloop.IOLoop.current().spawn_callback( salt.ext.tornado.ioloop.IOLoop.current().spawn_callback(
self.send_message, "PONG {}".format(event.line) self.send_message, f"PONG {event.line}"
) )
elif event.code == "PRIVMSG": elif event.code == "PRIVMSG":
salt.ext.tornado.ioloop.IOLoop.current().spawn_callback( salt.ext.tornado.ioloop.IOLoop.current().spawn_callback(
@ -230,13 +230,13 @@ class IRCClient:
def join_channel(self, channel): def join_channel(self, channel):
if not channel.startswith("#"): if not channel.startswith("#"):
channel = "#" + channel channel = "#" + channel
self.send_message("JOIN {}".format(channel)) self.send_message(f"JOIN {channel}")
def on_connect(self): def on_connect(self):
logging.info("on_connect") logging.info("on_connect")
if self.sasl is True: if self.sasl is True:
self.send_message("CAP REQ :sasl") self.send_message("CAP REQ :sasl")
self.send_message("NICK {}".format(self.nick)) self.send_message(f"NICK {self.nick}")
self.send_message("USER saltstack 0 * :saltstack") self.send_message("USER saltstack 0 * :saltstack")
if self.password: if self.password:
if self.sasl is True: if self.sasl is True:
@ -244,7 +244,7 @@ class IRCClient:
"{0}\x00{0}\x00{1}".format(self.username, self.password).encode() "{0}\x00{0}\x00{1}".format(self.username, self.password).encode()
) )
self.send_message("AUTHENTICATE PLAIN") self.send_message("AUTHENTICATE PLAIN")
self.send_message("AUTHENTICATE {}".format(authstring)) self.send_message(f"AUTHENTICATE {authstring}")
self.send_message("CAP END") self.send_message("CAP END")
else: else:
self.send_message( self.send_message(

View file

@ -189,7 +189,7 @@ def _get_domain_event_detail(event, detail):
if event_name == "unknown": if event_name == "unknown":
return event_name, "unknown" return event_name, "unknown"
prefix = "VIR_DOMAIN_EVENT_{}_".format(event_name.upper()) prefix = f"VIR_DOMAIN_EVENT_{event_name.upper()}_"
detail_name = _get_libvirt_enum_string(prefix, detail) detail_name = _get_libvirt_enum_string(prefix, detail)
return event_name, detail_name return event_name, detail_name
@ -333,9 +333,7 @@ def _domain_event_graphics_cb(
transform address structure into event data piece transform address structure into event data piece
""" """
return { return {
"family": _get_libvirt_enum_string( "family": _get_libvirt_enum_string(f"{prefix}_ADDRESS_", addr["family"]),
"{}_ADDRESS_".format(prefix), addr["family"]
),
"node": addr["node"], "node": addr["node"],
"service": addr["service"], "service": addr["service"],
} }
@ -680,14 +678,14 @@ def _register_callback(cnx, tag_prefix, obj, event, real_id):
""" """
libvirt_name = real_id libvirt_name = real_id
if real_id is None: if real_id is None:
libvirt_name = "VIR_{}_EVENT_ID_{}".format(obj, event).upper() libvirt_name = f"VIR_{obj}_EVENT_ID_{event}".upper()
if not hasattr(libvirt, libvirt_name): if not hasattr(libvirt, libvirt_name):
log.warning('Skipping "%s/%s" events: libvirt too old', obj, event) log.warning('Skipping "%s/%s" events: libvirt too old', obj, event)
return None return None
libvirt_id = getattr(libvirt, libvirt_name) libvirt_id = getattr(libvirt, libvirt_name)
callback_name = "_{}_event_{}_cb".format(obj, event) callback_name = f"_{obj}_event_{event}_cb"
callback = globals().get(callback_name, None) callback = globals().get(callback_name, None)
if callback is None: if callback is None:
log.error("Missing function %s in engine", callback_name) log.error("Missing function %s in engine", callback_name)

View file

@ -209,7 +209,7 @@ def _zmq(address, port, **kwargs):
socket = context.socket(zmq.SUB) socket = context.socket(zmq.SUB)
if salt.utils.network.is_ipv6(address): if salt.utils.network.is_ipv6(address):
socket.ipv6 = True socket.ipv6 = True
socket.connect("tcp://{addr}:{port}".format(addr=address, port=port)) socket.connect(f"tcp://{address}:{port}")
socket.setsockopt(zmq.SUBSCRIBE, b"") socket.setsockopt(zmq.SUBSCRIBE, b"")
return socket.recv return socket.recv

View file

@ -59,7 +59,7 @@ def _get_serializer(output):
return getattr(serializers, output) return getattr(serializers, output)
except AttributeError: except AttributeError:
raise CommandExecutionError( raise CommandExecutionError(
"Unknown serializer `{}` found for output option".format(output) f"Unknown serializer `{output}` found for output option"
) )

View file

@ -747,7 +747,7 @@ class SlackClient:
results = {} results = {}
for jid in outstanding_jids: for jid in outstanding_jids:
# results[jid] = runner.cmd('jobs.lookup_jid', [jid]) # results[jid] = runner.cmd('jobs.lookup_jid', [jid])
if self.master_minion.returners["{}.get_jid".format(source)](jid): if self.master_minion.returners[f"{source}.get_jid"](jid):
job_result = runner.cmd("jobs.list_job", [jid]) job_result = runner.cmd("jobs.list_job", [jid])
jid_result = job_result.get("Result", {}) jid_result = job_result.get("Result", {})
jid_function = job_result.get("Function", {}) jid_function = job_result.get("Function", {})
@ -838,7 +838,7 @@ class SlackClient:
channel.send_message(return_prefix) channel.send_message(return_prefix)
ts = time.time() ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime("%Y%m%d%H%M%S%f") st = datetime.datetime.fromtimestamp(ts).strftime("%Y%m%d%H%M%S%f")
filename = "salt-results-{}.yaml".format(st) filename = f"salt-results-{st}.yaml"
r = self.sc.api_call( r = self.sc.api_call(
"files.upload", "files.upload",
channels=channel.id, channels=channel.id,
@ -944,4 +944,4 @@ def start(
) )
client.run_commands_from_slack_async(message_generator, fire_all, tag, control) client.run_commands_from_slack_async(message_generator, fire_all, tag, control)
except Exception: # pylint: disable=broad-except except Exception: # pylint: disable=broad-except
raise Exception("{}".format(traceback.format_exc())) raise Exception(f"{traceback.format_exc()}")

View file

@ -284,7 +284,7 @@ class SaltRenderError(SaltException):
self.buffer = buf self.buffer = buf
self.context = "" self.context = ""
if trace: if trace:
exc_str += "\n{}\n".format(trace) exc_str += f"\n{trace}\n"
if self.line_num and self.buffer: if self.line_num and self.buffer:
# Avoid circular import # Avoid circular import
import salt.utils.templates import salt.utils.templates

View file

@ -22,7 +22,7 @@ def __virtual__():
"Docker executor is only meant to be used with Docker Proxy Minions", "Docker executor is only meant to be used with Docker Proxy Minions",
) )
if __opts__.get("proxy", {}).get("proxytype") != __virtualname__: if __opts__.get("proxy", {}).get("proxytype") != __virtualname__:
return False, "Proxytype does not match: {}".format(__virtualname__) return False, f"Proxytype does not match: {__virtualname__}"
return True return True

View file

@ -64,7 +64,7 @@ def execute(opts, data, func, args, kwargs):
for arg in args: for arg in args:
cmd.append(shlex.quote(str(arg))) cmd.append(shlex.quote(str(arg)))
for key in kwargs: for key in kwargs:
cmd.append(shlex.quote("{}={}".format(key, kwargs[key]))) cmd.append(shlex.quote(f"{key}={kwargs[key]}"))
cmd_ret = __salt__["cmd.run_all"](cmd, use_vt=True, python_shell=False) cmd_ret = __salt__["cmd.run_all"](cmd, use_vt=True, python_shell=False)

View file

@ -107,7 +107,7 @@ class Client:
Make sure that this path is intended for the salt master and trim it Make sure that this path is intended for the salt master and trim it
""" """
if not path.startswith("salt://"): if not path.startswith("salt://"):
raise MinionError("Unsupported path: {}".format(path)) raise MinionError(f"Unsupported path: {path}")
file_path, saltenv = salt.utils.url.parse(path) file_path, saltenv = salt.utils.url.parse(path)
return file_path return file_path
@ -273,7 +273,7 @@ class Client:
for fn_ in self.file_list_emptydirs(saltenv): for fn_ in self.file_list_emptydirs(saltenv):
fn_ = salt.utils.data.decode(fn_) fn_ = salt.utils.data.decode(fn_)
if fn_.startswith(path): if fn_.startswith(path):
minion_dir = "{}/{}".format(dest, fn_) minion_dir = f"{dest}/{fn_}"
if not os.path.isdir(minion_dir): if not os.path.isdir(minion_dir):
os.makedirs(minion_dir) os.makedirs(minion_dir)
ret.append(minion_dir) ret.append(minion_dir)
@ -438,7 +438,7 @@ class Client:
ret.append( ret.append(
self.get_file( self.get_file(
salt.utils.url.create(fn_), salt.utils.url.create(fn_),
"{}/{}".format(dest, minion_relpath), f"{dest}/{minion_relpath}",
True, True,
saltenv, saltenv,
gzip, gzip,
@ -457,7 +457,7 @@ class Client:
# Remove the leading directories from path to derive # Remove the leading directories from path to derive
# the relative path on the minion. # the relative path on the minion.
minion_relpath = fn_[len(prefix) :].lstrip("/") minion_relpath = fn_[len(prefix) :].lstrip("/")
minion_mkdir = "{}/{}".format(dest, minion_relpath) minion_mkdir = f"{dest}/{minion_relpath}"
if not os.path.isdir(minion_mkdir): if not os.path.isdir(minion_mkdir):
os.makedirs(minion_mkdir) os.makedirs(minion_mkdir)
ret.append(minion_mkdir) ret.append(minion_mkdir)
@ -508,9 +508,7 @@ class Client:
if url_scheme in ("file", ""): if url_scheme in ("file", ""):
# Local filesystem # Local filesystem
if not os.path.isabs(url_path): if not os.path.isabs(url_path):
raise CommandExecutionError( raise CommandExecutionError(f"Path '{url_path}' is not absolute")
"Path '{}' is not absolute".format(url_path)
)
if dest is None: if dest is None:
with salt.utils.files.fopen(url_path, "rb") as fp_: with salt.utils.files.fopen(url_path, "rb") as fp_:
data = fp_.read() data = fp_.read()
@ -584,9 +582,7 @@ class Client:
) )
return dest return dest
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
raise MinionError( raise MinionError(f"Could not fetch from {url}. Exception: {exc}")
"Could not fetch from {}. Exception: {}".format(url, exc)
)
if url_data.scheme == "ftp": if url_data.scheme == "ftp":
try: try:
ftp = ftplib.FTP() # nosec ftp = ftplib.FTP() # nosec
@ -597,7 +593,7 @@ class Client:
ftp.login(url_data.username, url_data.password) ftp.login(url_data.username, url_data.password)
remote_file_path = url_data.path.lstrip("/") remote_file_path = url_data.path.lstrip("/")
with salt.utils.files.fopen(dest, "wb") as fp_: with salt.utils.files.fopen(dest, "wb") as fp_:
ftp.retrbinary("RETR {}".format(remote_file_path), fp_.write) ftp.retrbinary(f"RETR {remote_file_path}", fp_.write)
ftp.quit() ftp.quit()
return dest return dest
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
@ -631,7 +627,7 @@ class Client:
swift_conn.get_object(url_data.netloc, url_data.path[1:], dest) swift_conn.get_object(url_data.netloc, url_data.path[1:], dest)
return dest return dest
except Exception: # pylint: disable=broad-except except Exception: # pylint: disable=broad-except
raise MinionError("Could not fetch from {}".format(url)) raise MinionError(f"Could not fetch from {url}")
get_kwargs = {} get_kwargs = {}
if url_data.username is not None and url_data.scheme in ("http", "https"): if url_data.username is not None and url_data.scheme in ("http", "https"):
@ -654,7 +650,7 @@ class Client:
fixed_url = url fixed_url = url
destfp = None destfp = None
dest_etag = "{}.etag".format(dest) dest_etag = f"{dest}.etag"
try: try:
# Tornado calls streaming_callback on redirect response bodies. # Tornado calls streaming_callback on redirect response bodies.
# But we need streaming to support fetching large files (> RAM # But we need streaming to support fetching large files (> RAM
@ -768,7 +764,7 @@ class Client:
result.append(chunk) result.append(chunk)
else: else:
dest_tmp = "{}.part".format(dest) dest_tmp = f"{dest}.part"
# We need an open filehandle to use in the on_chunk callback, # We need an open filehandle to use in the on_chunk callback,
# that's why we're not using a with clause here. # that's why we're not using a with clause here.
# pylint: disable=resource-leakage # pylint: disable=resource-leakage
@ -830,7 +826,7 @@ class Client:
) )
) )
except urllib.error.URLError as exc: except urllib.error.URLError as exc:
raise MinionError("Error reading {}: {}".format(url, exc.reason)) raise MinionError(f"Error reading {url}: {exc.reason}")
finally: finally:
if destfp is not None: if destfp is not None:
destfp.close() destfp.close()

View file

@ -320,9 +320,9 @@ def clear_lock(clear_func, role, remote=None, lock_type="update"):
Returns the return data from ``clear_func``. Returns the return data from ``clear_func``.
""" """
msg = "Clearing {} lock for {} remotes".format(lock_type, role) msg = f"Clearing {lock_type} lock for {role} remotes"
if remote: if remote:
msg += " matching {}".format(remote) msg += f" matching {remote}"
log.debug(msg) log.debug(msg)
return clear_func(remote=remote, lock_type=lock_type) return clear_func(remote=remote, lock_type=lock_type)
@ -375,12 +375,12 @@ class Fileserver:
# Only subtracting backends from enabled ones # Only subtracting backends from enabled ones
ret = self.opts["fileserver_backend"] ret = self.opts["fileserver_backend"]
for sub in back: for sub in back:
if "{}.envs".format(sub[1:]) in server_funcs: if f"{sub[1:]}.envs" in server_funcs:
ret.remove(sub[1:]) ret.remove(sub[1:])
return ret return ret
for sub in back: for sub in back:
if "{}.envs".format(sub) in server_funcs: if f"{sub}.envs" in server_funcs:
ret.append(sub) ret.append(sub)
return ret return ret
@ -408,7 +408,7 @@ class Fileserver:
cleared = [] cleared = []
errors = [] errors = []
for fsb in back: for fsb in back:
fstr = "{}.clear_cache".format(fsb) fstr = f"{fsb}.clear_cache"
if fstr in self.servers: if fstr in self.servers:
log.debug("Clearing %s fileserver cache", fsb) log.debug("Clearing %s fileserver cache", fsb)
failed = self.servers[fstr]() failed = self.servers[fstr]()
@ -416,7 +416,7 @@ class Fileserver:
errors.extend(failed) errors.extend(failed)
else: else:
cleared.append( cleared.append(
"The {} fileserver cache was successfully cleared".format(fsb) f"The {fsb} fileserver cache was successfully cleared"
) )
return cleared, errors return cleared, errors
@ -430,17 +430,15 @@ class Fileserver:
locked = [] locked = []
errors = [] errors = []
for fsb in back: for fsb in back:
fstr = "{}.lock".format(fsb) fstr = f"{fsb}.lock"
if fstr in self.servers: if fstr in self.servers:
msg = "Setting update lock for {} remotes".format(fsb) msg = f"Setting update lock for {fsb} remotes"
if remote: if remote:
if not isinstance(remote, str): if not isinstance(remote, str):
errors.append( errors.append(f"Badly formatted remote pattern '{remote}'")
"Badly formatted remote pattern '{}'".format(remote)
)
continue continue
else: else:
msg += " matching {}".format(remote) msg += f" matching {remote}"
log.debug(msg) log.debug(msg)
good, bad = self.servers[fstr](remote=remote) good, bad = self.servers[fstr](remote=remote)
locked.extend(good) locked.extend(good)
@ -463,7 +461,7 @@ class Fileserver:
cleared = [] cleared = []
errors = [] errors = []
for fsb in back: for fsb in back:
fstr = "{}.clear_lock".format(fsb) fstr = f"{fsb}.clear_lock"
if fstr in self.servers: if fstr in self.servers:
good, bad = clear_lock(self.servers[fstr], fsb, remote=remote) good, bad = clear_lock(self.servers[fstr], fsb, remote=remote)
cleared.extend(good) cleared.extend(good)
@ -477,7 +475,7 @@ class Fileserver:
""" """
back = self.backends(back) back = self.backends(back)
for fsb in back: for fsb in back:
fstr = "{}.update".format(fsb) fstr = f"{fsb}.update"
if fstr in self.servers: if fstr in self.servers:
log.debug("Updating %s fileserver cache", fsb) log.debug("Updating %s fileserver cache", fsb)
self.servers[fstr](**kwargs) self.servers[fstr](**kwargs)
@ -490,7 +488,7 @@ class Fileserver:
back = self.backends(back) back = self.backends(back)
ret = {} ret = {}
for fsb in back: for fsb in back:
fstr = "{}.update_intervals".format(fsb) fstr = f"{fsb}.update_intervals"
if fstr in self.servers: if fstr in self.servers:
ret[fsb] = self.servers[fstr]() ret[fsb] = self.servers[fstr]()
return ret return ret
@ -504,7 +502,7 @@ class Fileserver:
if sources: if sources:
ret = {} ret = {}
for fsb in back: for fsb in back:
fstr = "{}.envs".format(fsb) fstr = f"{fsb}.envs"
kwargs = ( kwargs = (
{"ignore_cache": True} {"ignore_cache": True}
if "ignore_cache" in _argspec(self.servers[fstr]).args if "ignore_cache" in _argspec(self.servers[fstr]).args
@ -534,7 +532,7 @@ class Fileserver:
""" """
back = self.backends(back) back = self.backends(back)
for fsb in back: for fsb in back:
fstr = "{}.init".format(fsb) fstr = f"{fsb}.init"
if fstr in self.servers: if fstr in self.servers:
self.servers[fstr]() self.servers[fstr]()
@ -596,7 +594,7 @@ class Fileserver:
saltenv = str(saltenv) saltenv = str(saltenv)
for fsb in back: for fsb in back:
fstr = "{}.find_file".format(fsb) fstr = f"{fsb}.find_file"
if fstr in self.servers: if fstr in self.servers:
fnd = self.servers[fstr](path, saltenv, **kwargs) fnd = self.servers[fstr](path, saltenv, **kwargs)
if fnd.get("path"): if fnd.get("path"):
@ -766,7 +764,7 @@ class Fileserver:
load["saltenv"] = str(load["saltenv"]) load["saltenv"] = str(load["saltenv"])
for fsb in self.backends(load.pop("fsbackend", None)): for fsb in self.backends(load.pop("fsbackend", None)):
fstr = "{}.file_list".format(fsb) fstr = f"{fsb}.file_list"
if fstr in self.servers: if fstr in self.servers:
ret.update(self.servers[fstr](load)) ret.update(self.servers[fstr](load))
# some *fs do not handle prefix. Ensure it is filtered # some *fs do not handle prefix. Ensure it is filtered
@ -791,7 +789,7 @@ class Fileserver:
load["saltenv"] = str(load["saltenv"]) load["saltenv"] = str(load["saltenv"])
for fsb in self.backends(None): for fsb in self.backends(None):
fstr = "{}.file_list_emptydirs".format(fsb) fstr = f"{fsb}.file_list_emptydirs"
if fstr in self.servers: if fstr in self.servers:
ret.update(self.servers[fstr](load)) ret.update(self.servers[fstr](load))
# some *fs do not handle prefix. Ensure it is filtered # some *fs do not handle prefix. Ensure it is filtered
@ -816,7 +814,7 @@ class Fileserver:
load["saltenv"] = str(load["saltenv"]) load["saltenv"] = str(load["saltenv"])
for fsb in self.backends(load.pop("fsbackend", None)): for fsb in self.backends(load.pop("fsbackend", None)):
fstr = "{}.dir_list".format(fsb) fstr = f"{fsb}.dir_list"
if fstr in self.servers: if fstr in self.servers:
ret.update(self.servers[fstr](load)) ret.update(self.servers[fstr](load))
# some *fs do not handle prefix. Ensure it is filtered # some *fs do not handle prefix. Ensure it is filtered
@ -841,7 +839,7 @@ class Fileserver:
load["saltenv"] = str(load["saltenv"]) load["saltenv"] = str(load["saltenv"])
for fsb in self.backends(load.pop("fsbackend", None)): for fsb in self.backends(load.pop("fsbackend", None)):
symlstr = "{}.symlink_list".format(fsb) symlstr = f"{fsb}.symlink_list"
if symlstr in self.servers: if symlstr in self.servers:
ret = self.servers[symlstr](load) ret = self.servers[symlstr](load)
# some *fs do not handle prefix. Ensure it is filtered # some *fs do not handle prefix. Ensure it is filtered

View file

@ -239,7 +239,7 @@ def init():
per_remote_defaults = {} per_remote_defaults = {}
for param in PER_REMOTE_OVERRIDES: for param in PER_REMOTE_OVERRIDES:
per_remote_defaults[param] = str(__opts__["hgfs_{}".format(param)]) per_remote_defaults[param] = str(__opts__[f"hgfs_{param}"])
for remote in __opts__["hgfs_remotes"]: for remote in __opts__["hgfs_remotes"]:
repo_conf = copy.deepcopy(per_remote_defaults) repo_conf = copy.deepcopy(per_remote_defaults)
@ -355,7 +355,7 @@ def init():
with salt.utils.files.fopen(hgconfpath, "w+") as hgconfig: with salt.utils.files.fopen(hgconfpath, "w+") as hgconfig:
hgconfig.write("[paths]\n") hgconfig.write("[paths]\n")
hgconfig.write( hgconfig.write(
salt.utils.stringutils.to_str("default = {}\n".format(repo_url)) salt.utils.stringutils.to_str(f"default = {repo_url}\n")
) )
repo_conf.update( repo_conf.update(
@ -365,7 +365,7 @@ def init():
"hash": repo_hash, "hash": repo_hash,
"cachedir": rp_, "cachedir": rp_,
"lockfile": os.path.join( "lockfile": os.path.join(
__opts__["cachedir"], "hgfs", "{}.update.lk".format(repo_hash) __opts__["cachedir"], "hgfs", f"{repo_hash}.update.lk"
), ),
} }
) )
@ -379,7 +379,7 @@ def init():
try: try:
with salt.utils.files.fopen(remote_map, "w+") as fp_: with salt.utils.files.fopen(remote_map, "w+") as fp_:
timestamp = datetime.now().strftime("%d %b %Y %H:%M:%S.%f") timestamp = datetime.now().strftime("%d %b %Y %H:%M:%S.%f")
fp_.write("# hgfs_remote map as of {}\n".format(timestamp)) fp_.write(f"# hgfs_remote map as of {timestamp}\n")
for repo in repos: for repo in repos:
fp_.write( fp_.write(
salt.utils.stringutils.to_str( salt.utils.stringutils.to_str(
@ -444,7 +444,7 @@ def clear_cache():
try: try:
shutil.rmtree(rdir) shutil.rmtree(rdir)
except OSError as exc: except OSError as exc:
errors.append("Unable to delete {}: {}".format(rdir, exc)) errors.append(f"Unable to delete {rdir}: {exc}")
return errors return errors
@ -694,14 +694,12 @@ def find_file(path, tgt_env="base", **kwargs): # pylint: disable=W0613
dest = os.path.join(__opts__["cachedir"], "hgfs/refs", tgt_env, path) dest = os.path.join(__opts__["cachedir"], "hgfs/refs", tgt_env, path)
hashes_glob = os.path.join( hashes_glob = os.path.join(
__opts__["cachedir"], "hgfs/hash", tgt_env, "{}.hash.*".format(path) __opts__["cachedir"], "hgfs/hash", tgt_env, f"{path}.hash.*"
) )
blobshadest = os.path.join( blobshadest = os.path.join(
__opts__["cachedir"], "hgfs/hash", tgt_env, "{}.hash.blob_sha1".format(path) __opts__["cachedir"], "hgfs/hash", tgt_env, f"{path}.hash.blob_sha1"
)
lk_fn = os.path.join(
__opts__["cachedir"], "hgfs/hash", tgt_env, "{}.lk".format(path)
) )
lk_fn = os.path.join(__opts__["cachedir"], "hgfs/hash", tgt_env, f"{path}.lk")
destdir = os.path.dirname(dest) destdir = os.path.dirname(dest)
hashdir = os.path.dirname(blobshadest) hashdir = os.path.dirname(blobshadest)
if not os.path.isdir(destdir): if not os.path.isdir(destdir):
@ -746,7 +744,7 @@ def find_file(path, tgt_env="base", **kwargs): # pylint: disable=W0613
return fnd return fnd
try: try:
repo["repo"].cat( repo["repo"].cat(
[salt.utils.stringutils.to_bytes("path:{}".format(repo_path))], [salt.utils.stringutils.to_bytes(f"path:{repo_path}")],
rev=ref[2], rev=ref[2],
output=dest, output=dest,
) )

View file

@ -219,9 +219,7 @@ def update():
os.makedirs(mtime_map_path_dir) os.makedirs(mtime_map_path_dir)
with salt.utils.files.fopen(mtime_map_path, "wb") as fp_: with salt.utils.files.fopen(mtime_map_path, "wb") as fp_:
for file_path, mtime in new_mtime_map.items(): for file_path, mtime in new_mtime_map.items():
fp_.write( fp_.write(salt.utils.stringutils.to_bytes(f"{file_path}:{mtime}\n"))
salt.utils.stringutils.to_bytes("{}:{}\n".format(file_path, mtime))
)
if __opts__.get("fileserver_events", False): if __opts__.get("fileserver_events", False):
# if there is a change, fire an event # if there is a change, fire an event
@ -349,11 +347,11 @@ def _file_lists(load, form):
return [] return []
list_cache = os.path.join( list_cache = os.path.join(
list_cachedir, list_cachedir,
"{}.p".format(salt.utils.files.safe_filename_leaf(actual_saltenv)), f"{salt.utils.files.safe_filename_leaf(actual_saltenv)}.p",
) )
w_lock = os.path.join( w_lock = os.path.join(
list_cachedir, list_cachedir,
".{}.w".format(salt.utils.files.safe_filename_leaf(actual_saltenv)), f".{salt.utils.files.safe_filename_leaf(actual_saltenv)}.w",
) )
cache_match, refresh_cache, save_cache = salt.fileserver.check_file_list_cache( cache_match, refresh_cache, save_cache = salt.fileserver.check_file_list_cache(
__opts__, form, list_cache, w_lock __opts__, form, list_cache, w_lock

View file

@ -136,7 +136,7 @@ def init():
per_remote_defaults = {} per_remote_defaults = {}
for param in PER_REMOTE_OVERRIDES: for param in PER_REMOTE_OVERRIDES:
per_remote_defaults[param] = str(__opts__["svnfs_{}".format(param)]) per_remote_defaults[param] = str(__opts__[f"svnfs_{param}"])
for remote in __opts__["svnfs_remotes"]: for remote in __opts__["svnfs_remotes"]:
repo_conf = copy.deepcopy(per_remote_defaults) repo_conf = copy.deepcopy(per_remote_defaults)
@ -239,7 +239,7 @@ def init():
try: try:
with salt.utils.files.fopen(remote_map, "w+") as fp_: with salt.utils.files.fopen(remote_map, "w+") as fp_:
timestamp = datetime.now().strftime("%d %b %Y %H:%M:%S.%f") timestamp = datetime.now().strftime("%d %b %Y %H:%M:%S.%f")
fp_.write("# svnfs_remote map as of {}\n".format(timestamp)) fp_.write(f"# svnfs_remote map as of {timestamp}\n")
for repo_conf in repos: for repo_conf in repos:
fp_.write( fp_.write(
salt.utils.stringutils.to_str( salt.utils.stringutils.to_str(
@ -306,7 +306,7 @@ def clear_cache():
try: try:
shutil.rmtree(rdir) shutil.rmtree(rdir)
except OSError as exc: except OSError as exc:
errors.append("Unable to delete {}: {}".format(rdir, exc)) errors.append(f"Unable to delete {rdir}: {exc}")
return errors return errors

View file

@ -289,7 +289,7 @@ def _linux_gpu_data():
devs = [] devs = []
try: try:
lspci_out = __salt__["cmd.run"]("{} -vmm".format(lspci)) lspci_out = __salt__["cmd.run"](f"{lspci} -vmm")
cur_dev = {} cur_dev = {}
error = False error = False
@ -363,7 +363,7 @@ def _netbsd_gpu_data():
for line in pcictl_out.splitlines(): for line in pcictl_out.splitlines():
for vendor in known_vendors: for vendor in known_vendors:
vendor_match = re.match( vendor_match = re.match(
r"[0-9:]+ ({}) (.+) \(VGA .+\)".format(vendor), line, re.IGNORECASE rf"[0-9:]+ ({vendor}) (.+) \(VGA .+\)", line, re.IGNORECASE
) )
if vendor_match: if vendor_match:
gpus.append( gpus.append(
@ -425,18 +425,18 @@ def _bsd_cpudata(osdata):
if sysctl: if sysctl:
cmds.update( cmds.update(
{ {
"num_cpus": "{} -n hw.ncpu".format(sysctl), "num_cpus": f"{sysctl} -n hw.ncpu",
"cpuarch": "{} -n hw.machine".format(sysctl), "cpuarch": f"{sysctl} -n hw.machine",
"cpu_model": "{} -n hw.model".format(sysctl), "cpu_model": f"{sysctl} -n hw.model",
} }
) )
if arch and osdata["kernel"] == "OpenBSD": if arch and osdata["kernel"] == "OpenBSD":
cmds["cpuarch"] = "{} -s".format(arch) cmds["cpuarch"] = f"{arch} -s"
if osdata["kernel"] == "Darwin": if osdata["kernel"] == "Darwin":
cmds["cpu_model"] = "{} -n machdep.cpu.brand_string".format(sysctl) cmds["cpu_model"] = f"{sysctl} -n machdep.cpu.brand_string"
cmds["cpu_flags"] = "{} -n machdep.cpu.features".format(sysctl) cmds["cpu_flags"] = f"{sysctl} -n machdep.cpu.features"
grains = {k: __salt__["cmd.run"](v) for k, v in cmds.items()} grains = {k: __salt__["cmd.run"](v) for k, v in cmds.items()}
@ -521,7 +521,7 @@ def _aix_cpudata(): # pragma: no cover
grains = {} grains = {}
cmd = salt.utils.path.which("prtconf") cmd = salt.utils.path.which("prtconf")
if cmd: if cmd:
data = __salt__["cmd.run"]("{}".format(cmd)) + os.linesep data = __salt__["cmd.run"](f"{cmd}") + os.linesep
for dest, regstring in ( for dest, regstring in (
("cpuarch", r"(?im)^\s*Processor\s+Type:\s+(\S+)"), ("cpuarch", r"(?im)^\s*Processor\s+Type:\s+(\S+)"),
("cpu_flags", r"(?im)^\s*Processor\s+Version:\s+(\S+)"), ("cpu_flags", r"(?im)^\s*Processor\s+Version:\s+(\S+)"),
@ -567,9 +567,9 @@ def _osx_memdata():
sysctl = salt.utils.path.which("sysctl") sysctl = salt.utils.path.which("sysctl")
if sysctl: if sysctl:
mem = __salt__["cmd.run"]("{} -n hw.memsize".format(sysctl)) mem = __salt__["cmd.run"](f"{sysctl} -n hw.memsize")
swap_total = ( swap_total = (
__salt__["cmd.run"]("{} -n vm.swapusage".format(sysctl)) __salt__["cmd.run"](f"{sysctl} -n vm.swapusage")
.split()[2] .split()[2]
.replace(",", ".") .replace(",", ".")
) )
@ -594,20 +594,20 @@ def _bsd_memdata(osdata):
sysctl = salt.utils.path.which("sysctl") sysctl = salt.utils.path.which("sysctl")
if sysctl: if sysctl:
mem = __salt__["cmd.run"]("{} -n hw.physmem".format(sysctl)) mem = __salt__["cmd.run"](f"{sysctl} -n hw.physmem")
if osdata["kernel"] == "NetBSD" and mem.startswith("-"): if osdata["kernel"] == "NetBSD" and mem.startswith("-"):
mem = __salt__["cmd.run"]("{} -n hw.physmem64".format(sysctl)) mem = __salt__["cmd.run"](f"{sysctl} -n hw.physmem64")
grains["mem_total"] = int(mem) // 1024 // 1024 grains["mem_total"] = int(mem) // 1024 // 1024
if osdata["kernel"] in ["OpenBSD", "NetBSD"]: if osdata["kernel"] in ["OpenBSD", "NetBSD"]:
swapctl = salt.utils.path.which("swapctl") swapctl = salt.utils.path.which("swapctl")
swap_data = __salt__["cmd.run"]("{} -sk".format(swapctl)) swap_data = __salt__["cmd.run"](f"{swapctl} -sk")
if swap_data == "no swap devices configured": if swap_data == "no swap devices configured":
swap_total = 0 swap_total = 0
else: else:
swap_total = swap_data.split(" ")[1] swap_total = swap_data.split(" ")[1]
else: else:
swap_total = __salt__["cmd.run"]("{} -n vm.swap_total".format(sysctl)) swap_total = __salt__["cmd.run"](f"{sysctl} -n vm.swap_total")
grains["swap_total"] = int(swap_total) // 1024 // 1024 grains["swap_total"] = int(swap_total) // 1024 // 1024
return grains return grains
@ -625,7 +625,7 @@ def _sunos_memdata(): # pragma: no cover
grains["mem_total"] = int(comps[2].strip()) grains["mem_total"] = int(comps[2].strip())
swap_cmd = salt.utils.path.which("swap") swap_cmd = salt.utils.path.which("swap")
swap_data = __salt__["cmd.run"]("{} -s".format(swap_cmd)).split() swap_data = __salt__["cmd.run"](f"{swap_cmd} -s").split()
try: try:
swap_avail = int(swap_data[-2][:-1]) swap_avail = int(swap_data[-2][:-1])
swap_used = int(swap_data[-4][:-1]) swap_used = int(swap_data[-4][:-1])
@ -653,7 +653,7 @@ def _aix_memdata(): # pragma: no cover
swap_cmd = salt.utils.path.which("swap") swap_cmd = salt.utils.path.which("swap")
if swap_cmd: if swap_cmd:
swap_data = __salt__["cmd.run"]("{} -s".format(swap_cmd)).split() swap_data = __salt__["cmd.run"](f"{swap_cmd} -s").split()
try: try:
swap_total = (int(swap_data[-2]) + int(swap_data[-6])) * 4 swap_total = (int(swap_data[-2]) + int(swap_data[-6])) * 4
except ValueError: except ValueError:
@ -706,7 +706,7 @@ def _aix_get_machine_id(): # pragma: no cover
grains = {} grains = {}
cmd = salt.utils.path.which("lsattr") cmd = salt.utils.path.which("lsattr")
if cmd: if cmd:
data = __salt__["cmd.run"]("{} -El sys0".format(cmd)) + os.linesep data = __salt__["cmd.run"](f"{cmd} -El sys0") + os.linesep
uuid_regexes = [re.compile(r"(?im)^\s*os_uuid\s+(\S+)\s+(.*)")] uuid_regexes = [re.compile(r"(?im)^\s*os_uuid\s+(\S+)\s+(.*)")]
for regex in uuid_regexes: for regex in uuid_regexes:
res = regex.search(data) res = regex.search(data)
@ -1033,7 +1033,7 @@ def _virtual(osdata):
subtype_cmd = "{} -c current get -H -o value {}-role".format( subtype_cmd = "{} -c current get -H -o value {}-role".format(
command, role command, role
) )
ret = __salt__["cmd.run"]("{}".format(subtype_cmd)) ret = __salt__["cmd.run"](f"{subtype_cmd}")
if ret == "true": if ret == "true":
roles.append(role) roles.append(role)
if roles: if roles:
@ -1179,14 +1179,14 @@ def _virtual(osdata):
elif osdata["kernel"] == "FreeBSD": elif osdata["kernel"] == "FreeBSD":
kenv = salt.utils.path.which("kenv") kenv = salt.utils.path.which("kenv")
if kenv: if kenv:
product = __salt__["cmd.run"]("{} smbios.system.product".format(kenv)) product = __salt__["cmd.run"](f"{kenv} smbios.system.product")
maker = __salt__["cmd.run"]("{} smbios.system.maker".format(kenv)) maker = __salt__["cmd.run"](f"{kenv} smbios.system.maker")
if product.startswith("VMware"): if product.startswith("VMware"):
grains["virtual"] = "VMware" grains["virtual"] = "VMware"
if product.startswith("VirtualBox"): if product.startswith("VirtualBox"):
grains["virtual"] = "VirtualBox" grains["virtual"] = "VirtualBox"
if maker.startswith("Xen"): if maker.startswith("Xen"):
grains["virtual_subtype"] = "{} {}".format(maker, product) grains["virtual_subtype"] = f"{maker} {product}"
grains["virtual"] = "xen" grains["virtual"] = "xen"
if maker.startswith("Microsoft") and product.startswith("Virtual"): if maker.startswith("Microsoft") and product.startswith("Virtual"):
grains["virtual"] = "VirtualPC" grains["virtual"] = "VirtualPC"
@ -1197,9 +1197,9 @@ def _virtual(osdata):
if maker.startswith("Amazon EC2"): if maker.startswith("Amazon EC2"):
grains["virtual"] = "Nitro" grains["virtual"] = "Nitro"
if sysctl: if sysctl:
hv_vendor = __salt__["cmd.run"]("{} -n hw.hv_vendor".format(sysctl)) hv_vendor = __salt__["cmd.run"](f"{sysctl} -n hw.hv_vendor")
model = __salt__["cmd.run"]("{} -n hw.model".format(sysctl)) model = __salt__["cmd.run"](f"{sysctl} -n hw.model")
jail = __salt__["cmd.run"]("{} -n security.jail.jailed".format(sysctl)) jail = __salt__["cmd.run"](f"{sysctl} -n security.jail.jailed")
if "bhyve" in hv_vendor: if "bhyve" in hv_vendor:
grains["virtual"] = "bhyve" grains["virtual"] = "bhyve"
elif "QEMU Virtual CPU" in model: elif "QEMU Virtual CPU" in model:
@ -1215,22 +1215,19 @@ def _virtual(osdata):
elif osdata["kernel"] == "NetBSD": elif osdata["kernel"] == "NetBSD":
if sysctl: if sysctl:
if "QEMU Virtual CPU" in __salt__["cmd.run"]( if "QEMU Virtual CPU" in __salt__["cmd.run"](
"{} -n machdep.cpu_brand".format(sysctl) f"{sysctl} -n machdep.cpu_brand"
): ):
grains["virtual"] = "kvm" grains["virtual"] = "kvm"
elif "invalid" not in __salt__["cmd.run"]( elif "invalid" not in __salt__["cmd.run"](
"{} -n machdep.xen.suspend".format(sysctl) f"{sysctl} -n machdep.xen.suspend"
): ):
grains["virtual"] = "Xen PV DomU" grains["virtual"] = "Xen PV DomU"
elif "VMware" in __salt__["cmd.run"]( elif "VMware" in __salt__["cmd.run"](
"{} -n machdep.dmi.system-vendor".format(sysctl) f"{sysctl} -n machdep.dmi.system-vendor"
): ):
grains["virtual"] = "VMware" grains["virtual"] = "VMware"
# NetBSD has Xen dom0 support # NetBSD has Xen dom0 support
elif ( elif __salt__["cmd.run"](f"{sysctl} -n machdep.idle-mechanism") == "xen":
__salt__["cmd.run"]("{} -n machdep.idle-mechanism".format(sysctl))
== "xen"
):
if os.path.isfile("/var/run/xenconsoled.pid"): if os.path.isfile("/var/run/xenconsoled.pid"):
grains["virtual_subtype"] = "Xen Dom0" grains["virtual_subtype"] = "Xen Dom0"
elif osdata["kernel"] == "SunOS": elif osdata["kernel"] == "SunOS":
@ -1238,7 +1235,7 @@ def _virtual(osdata):
# check the zonename here as fallback # check the zonename here as fallback
zonename = salt.utils.path.which("zonename") zonename = salt.utils.path.which("zonename")
if zonename: if zonename:
zone = __salt__["cmd.run"]("{}".format(zonename)) zone = __salt__["cmd.run"](f"{zonename}")
if zone != "global": if zone != "global":
grains["virtual"] = "zone" grains["virtual"] = "zone"
@ -1267,7 +1264,7 @@ def _virtual(osdata):
r".*Product Name: ([^\r\n]*).*", output, flags=re.DOTALL r".*Product Name: ([^\r\n]*).*", output, flags=re.DOTALL
) )
if product: if product:
grains["virtual_subtype"] = "Amazon EC2 ({})".format(product[1]) grains["virtual_subtype"] = f"Amazon EC2 ({product[1]})"
elif re.match(r".*Version: [^\r\n]+\.amazon.*", output, flags=re.DOTALL): elif re.match(r".*Version: [^\r\n]+\.amazon.*", output, flags=re.DOTALL):
grains["virtual_subtype"] = "Amazon EC2" grains["virtual_subtype"] = "Amazon EC2"
@ -1299,9 +1296,7 @@ def _virtual_hv(osdata):
try: try:
version = {} version = {}
for fn in ("major", "minor", "extra"): for fn in ("major", "minor", "extra"):
with salt.utils.files.fopen( with salt.utils.files.fopen(f"/sys/hypervisor/version/{fn}", "r") as fhr:
"/sys/hypervisor/version/{}".format(fn), "r"
) as fhr:
version[fn] = salt.utils.stringutils.to_unicode(fhr.read().strip()) version[fn] = salt.utils.stringutils.to_unicode(fhr.read().strip())
grains["virtual_hv_version"] = "{}.{}{}".format( grains["virtual_hv_version"] = "{}.{}{}".format(
version["major"], version["minor"], version["extra"] version["major"], version["minor"], version["extra"]
@ -1457,7 +1452,7 @@ def _windows_os_release_grain(caption, product_type):
# ie: R2 # ie: R2
if re.match(r"^R\d+$", item): if re.match(r"^R\d+$", item):
release = item release = item
os_release = "{}Server{}".format(version, release) os_release = f"{version}Server{release}"
else: else:
for item in caption.split(" "): for item in caption.split(" "):
# If it's a number, decimal number, Thin or Vista, then it's the # If it's a number, decimal number, Thin or Vista, then it's the
@ -1703,7 +1698,7 @@ def _linux_devicetree_platform_data():
try: try:
# /proc/device-tree should be used instead of /sys/firmware/devicetree/base # /proc/device-tree should be used instead of /sys/firmware/devicetree/base
# see https://github.com/torvalds/linux/blob/v5.13/Documentation/ABI/testing/sysfs-firmware-ofw#L14 # see https://github.com/torvalds/linux/blob/v5.13/Documentation/ABI/testing/sysfs-firmware-ofw#L14
loc = "/proc/device-tree/{}".format(path) loc = f"/proc/device-tree/{path}"
if os.path.isfile(loc): if os.path.isfile(loc):
with salt.utils.files.fopen(loc, mode="r") as f: with salt.utils.files.fopen(loc, mode="r") as f:
return f.read().rstrip("\x00") # all strings are null-terminated return f.read().rstrip("\x00") # all strings are null-terminated
@ -1942,18 +1937,13 @@ def _linux_bin_exists(binary):
""" """
for search_cmd in ("which", "type -ap"): for search_cmd in ("which", "type -ap"):
try: try:
return __salt__["cmd.retcode"]("{} {}".format(search_cmd, binary)) == 0 return __salt__["cmd.retcode"](f"{search_cmd} {binary}") == 0
except salt.exceptions.CommandExecutionError: except salt.exceptions.CommandExecutionError:
pass pass
try: try:
return ( return (
len( len(__salt__["cmd.run_all"](f"whereis -b {binary}")["stdout"].split()) > 1
__salt__["cmd.run_all"]("whereis -b {}".format(binary))[
"stdout"
].split()
)
> 1
) )
except salt.exceptions.CommandExecutionError: except salt.exceptions.CommandExecutionError:
return False return False
@ -1971,7 +1961,7 @@ def _parse_lsb_release():
pass pass
else: else:
# Adds lsb_distrib_{id,release,codename,description} # Adds lsb_distrib_{id,release,codename,description}
ret["lsb_{}".format(key.lower())] = value.rstrip() ret[f"lsb_{key.lower()}"] = value.rstrip()
except OSError as exc: except OSError as exc:
log.trace("Failed to parse /etc/lsb-release: %s", exc) log.trace("Failed to parse /etc/lsb-release: %s", exc)
return ret return ret
@ -2716,7 +2706,7 @@ def os_data():
osbuild = __salt__["cmd.run"]("sw_vers -buildVersion") osbuild = __salt__["cmd.run"]("sw_vers -buildVersion")
grains["os"] = "MacOS" grains["os"] = "MacOS"
grains["os_family"] = "MacOS" grains["os_family"] = "MacOS"
grains["osfullname"] = "{} {}".format(osname, osrelease) grains["osfullname"] = f"{osname} {osrelease}"
grains["osrelease"] = osrelease grains["osrelease"] = osrelease
grains["osbuild"] = osbuild grains["osbuild"] = osbuild
grains["init"] = "launchd" grains["init"] = "launchd"
@ -3257,7 +3247,7 @@ def _hw_data(osdata):
"productname": "DeviceDesc", "productname": "DeviceDesc",
} }
for grain_name, cmd_key in hwdata.items(): for grain_name, cmd_key in hwdata.items():
result = __salt__["cmd.run_all"]("fw_printenv {}".format(cmd_key)) result = __salt__["cmd.run_all"](f"fw_printenv {cmd_key}")
if result["retcode"] == 0: if result["retcode"] == 0:
uboot_keyval = result["stdout"].split("=") uboot_keyval = result["stdout"].split("=")
grains[grain_name] = _clean_value(grain_name, uboot_keyval[1]) grains[grain_name] = _clean_value(grain_name, uboot_keyval[1])
@ -3277,7 +3267,7 @@ def _hw_data(osdata):
"uuid": "smbios.system.uuid", "uuid": "smbios.system.uuid",
} }
for key, val in fbsd_hwdata.items(): for key, val in fbsd_hwdata.items():
value = __salt__["cmd.run"]("{} {}".format(kenv, val)) value = __salt__["cmd.run"](f"{kenv} {val}")
grains[key] = _clean_value(key, value) grains[key] = _clean_value(key, value)
elif osdata["kernel"] == "OpenBSD": elif osdata["kernel"] == "OpenBSD":
sysctl = salt.utils.path.which("sysctl") sysctl = salt.utils.path.which("sysctl")
@ -3289,7 +3279,7 @@ def _hw_data(osdata):
"uuid": "hw.uuid", "uuid": "hw.uuid",
} }
for key, oid in hwdata.items(): for key, oid in hwdata.items():
value = __salt__["cmd.run"]("{} -n {}".format(sysctl, oid)) value = __salt__["cmd.run"](f"{sysctl} -n {oid}")
if not value.endswith(" value is not available"): if not value.endswith(" value is not available"):
grains[key] = _clean_value(key, value) grains[key] = _clean_value(key, value)
elif osdata["kernel"] == "NetBSD": elif osdata["kernel"] == "NetBSD":
@ -3304,7 +3294,7 @@ def _hw_data(osdata):
"uuid": "machdep.dmi.system-uuid", "uuid": "machdep.dmi.system-uuid",
} }
for key, oid in nbsd_hwdata.items(): for key, oid in nbsd_hwdata.items():
result = __salt__["cmd.run_all"]("{} -n {}".format(sysctl, oid)) result = __salt__["cmd.run_all"](f"{sysctl} -n {oid}")
if result["retcode"] == 0: if result["retcode"] == 0:
grains[key] = _clean_value(key, result["stdout"]) grains[key] = _clean_value(key, result["stdout"])
elif osdata["kernel"] == "Darwin": elif osdata["kernel"] == "Darwin":
@ -3312,7 +3302,7 @@ def _hw_data(osdata):
sysctl = salt.utils.path.which("sysctl") sysctl = salt.utils.path.which("sysctl")
hwdata = {"productname": "hw.model"} hwdata = {"productname": "hw.model"}
for key, oid in hwdata.items(): for key, oid in hwdata.items():
value = __salt__["cmd.run"]("{} -b {}".format(sysctl, oid)) value = __salt__["cmd.run"](f"{sysctl} -b {oid}")
if not value.endswith(" is invalid"): if not value.endswith(" is invalid"):
grains[key] = _clean_value(key, value) grains[key] = _clean_value(key, value)
elif osdata["kernel"] == "SunOS" and osdata["cpuarch"].startswith("sparc"): elif osdata["kernel"] == "SunOS" and osdata["cpuarch"].startswith("sparc"):
@ -3326,7 +3316,7 @@ def _hw_data(osdata):
("/usr/sbin/virtinfo", "-a"), ("/usr/sbin/virtinfo", "-a"),
): ):
if salt.utils.path.which(cmd): # Also verifies that cmd is executable if salt.utils.path.which(cmd): # Also verifies that cmd is executable
data += __salt__["cmd.run"]("{} {}".format(cmd, args)) data += __salt__["cmd.run"](f"{cmd} {args}")
data += "\n" data += "\n"
sn_regexes = [ sn_regexes = [
@ -3441,7 +3431,7 @@ def _hw_data(osdata):
elif osdata["kernel"] == "AIX": elif osdata["kernel"] == "AIX":
cmd = salt.utils.path.which("prtconf") cmd = salt.utils.path.which("prtconf")
if cmd: if cmd:
data = __salt__["cmd.run"]("{}".format(cmd)) + os.linesep data = __salt__["cmd.run"](f"{cmd}") + os.linesep
for dest, regstring in ( for dest, regstring in (
("serialnumber", r"(?im)^\s*Machine\s+Serial\s+Number:\s+(\S+)"), ("serialnumber", r"(?im)^\s*Machine\s+Serial\s+Number:\s+(\S+)"),
("systemfirmware", r"(?im)^\s*Firmware\s+Version:\s+(.*)"), ("systemfirmware", r"(?im)^\s*Firmware\s+Version:\s+(.*)"),
@ -3523,14 +3513,14 @@ def default_gateway():
for line in out.splitlines(): for line in out.splitlines():
if line.startswith("default"): if line.startswith("default"):
grains["ip_gw"] = True grains["ip_gw"] = True
grains["ip{}_gw".format(ip_version)] = True grains[f"ip{ip_version}_gw"] = True
try: try:
via, gw_ip = line.split()[1:3] via, gw_ip = line.split()[1:3]
except ValueError: except ValueError:
pass pass
else: else:
if via == "via": if via == "via":
grains["ip{}_gw".format(ip_version)] = gw_ip grains[f"ip{ip_version}_gw"] = gw_ip
break break
except Exception: # pylint: disable=broad-except except Exception: # pylint: disable=broad-except
continue continue

View file

@ -91,14 +91,14 @@ def _freebsd_geom():
geom = salt.utils.path.which("geom") geom = salt.utils.path.which("geom")
ret = {"disks": {}, "ssds": []} ret = {"disks": {}, "ssds": []}
devices = __salt__["cmd.run"]("{} disk list".format(geom)) devices = __salt__["cmd.run"](f"{geom} disk list")
devices = devices.split("\n\n") devices = devices.split("\n\n")
def parse_geom_attribs(device): def parse_geom_attribs(device):
tmp = {} tmp = {}
for line in device.split("\n"): for line in device.split("\n"):
for attrib in _geom_attribs: for attrib in _geom_attribs:
search = re.search(r"{}:\s(.*)".format(attrib), line) search = re.search(rf"{attrib}:\s(.*)", line)
if search: if search:
value = _datavalue( value = _datavalue(
_geomconsts._datatypes.get(attrib), search.group(1) _geomconsts._datatypes.get(attrib), search.group(1)
@ -174,7 +174,7 @@ def _windows_disks():
info = line.split() info = line.split()
if len(info) != 2 or not info[0].isdigit() or not info[1].isdigit(): if len(info) != 2 or not info[0].isdigit() or not info[1].isdigit():
continue continue
device = r"\\.\PhysicalDrive{}".format(info[0]) device = rf"\\.\PhysicalDrive{info[0]}"
mediatype = info[1] mediatype = info[1]
if mediatype == "3": if mediatype == "3":
log.trace("Device %s reports itself as an HDD", device) log.trace("Device %s reports itself as an HDD", device)

View file

@ -33,14 +33,12 @@ def _linux_lvm():
ret = {} ret = {}
cmd = salt.utils.path.which("lvm") cmd = salt.utils.path.which("lvm")
if cmd: if cmd:
vgs = __salt__["cmd.run_all"]("{} vgs -o vg_name --noheadings".format(cmd)) vgs = __salt__["cmd.run_all"](f"{cmd} vgs -o vg_name --noheadings")
for vg in vgs["stdout"].splitlines(): for vg in vgs["stdout"].splitlines():
vg = vg.strip() vg = vg.strip()
ret[vg] = [] ret[vg] = []
lvs = __salt__["cmd.run_all"]( lvs = __salt__["cmd.run_all"](f"{cmd} lvs -o lv_name --noheadings {vg}")
"{} lvs -o lv_name --noheadings {}".format(cmd, vg)
)
for lv in lvs["stdout"].splitlines(): for lv in lvs["stdout"].splitlines():
ret[vg].append(lv.strip()) ret[vg].append(lv.strip())
@ -52,11 +50,11 @@ def _linux_lvm():
def _aix_lvm(): def _aix_lvm():
ret = {} ret = {}
cmd = salt.utils.path.which("lsvg") cmd = salt.utils.path.which("lsvg")
vgs = __salt__["cmd.run"]("{}".format(cmd)) vgs = __salt__["cmd.run"](f"{cmd}")
for vg in vgs.splitlines(): for vg in vgs.splitlines():
ret[vg] = [] ret[vg] = []
lvs = __salt__["cmd.run"]("{} -l {}".format(cmd, vg)) lvs = __salt__["cmd.run"](f"{cmd} -l {vg}")
for lvline in lvs.splitlines()[2:]: for lvline in lvs.splitlines()[2:]:
lv = lvline.split(" ", 1)[0] lv = lvline.split(" ", 1)[0]
ret[vg].append(lv) ret[vg].append(lv)

View file

@ -62,7 +62,7 @@ def _user_mdata(mdata_list=None, mdata_get=None):
log.warning("mdata-list returned an error, skipping mdata grains.") log.warning("mdata-list returned an error, skipping mdata grains.")
continue continue
mdata_value = __salt__["cmd.run"]( mdata_value = __salt__["cmd.run"](
"{} {}".format(mdata_get, mdata_grain), ignore_retcode=True f"{mdata_get} {mdata_grain}", ignore_retcode=True
) )
if not mdata_grain.startswith("sdc:"): if not mdata_grain.startswith("sdc:"):
@ -108,7 +108,7 @@ def _sdc_mdata(mdata_list=None, mdata_get=None):
for mdata_grain in sdc_text_keys + sdc_json_keys: for mdata_grain in sdc_text_keys + sdc_json_keys:
mdata_value = __salt__["cmd.run"]( mdata_value = __salt__["cmd.run"](
"{} sdc:{}".format(mdata_get, mdata_grain), ignore_retcode=True f"{mdata_get} sdc:{mdata_grain}", ignore_retcode=True
) )
if mdata_value.startswith("ERROR:"): if mdata_value.startswith("ERROR:"):
log.warning( log.warning(

View file

@ -24,7 +24,7 @@ import salt.utils.stringutils
# metadata server information # metadata server information
IP = "169.254.169.254" IP = "169.254.169.254"
HOST = "http://{}/".format(IP) HOST = f"http://{IP}/"
def __virtual__(): def __virtual__():

View file

@ -177,7 +177,7 @@ class KeyCLI:
if cmd in ("accept", "reject", "delete") and args is None: if cmd in ("accept", "reject", "delete") and args is None:
args = self.opts.get("match_dict", {}).get("minions") args = self.opts.get("match_dict", {}).get("minions")
fstr = "key.{}".format(cmd) fstr = f"key.{cmd}"
fun = self.client.functions[fstr] fun = self.client.functions[fstr]
args, kwargs = self._get_args_kwargs(fun, args) args, kwargs = self._get_args_kwargs(fun, args)
@ -230,7 +230,7 @@ class KeyCLI:
stat_str = statuses[0] stat_str = statuses[0]
else: else:
stat_str = "{} or {}".format(", ".join(statuses[:-1]), statuses[-1]) stat_str = "{} or {}".format(", ".join(statuses[:-1]), statuses[-1])
msg = "The key glob '{}' does not match any {} keys.".format(match, stat_str) msg = f"The key glob '{match}' does not match any {stat_str} keys."
print(msg) print(msg)
def run(self): def run(self):
@ -291,7 +291,7 @@ class KeyCLI:
else: else:
salt.output.display_output({"return": ret}, "key", opts=self.opts) salt.output.display_output({"return": ret}, "key", opts=self.opts)
except salt.exceptions.SaltException as exc: except salt.exceptions.SaltException as exc:
ret = "{}".format(exc) ret = f"{exc}"
if not self.opts.get("quiet", False): if not self.opts.get("quiet", False):
salt.output.display_output(ret, "nested", self.opts) salt.output.display_output(ret, "nested", self.opts)
return ret return ret
@ -311,7 +311,7 @@ class Key:
self.opts = opts self.opts = opts
kind = self.opts.get("__role", "") # application kind kind = self.opts.get("__role", "") # application kind
if kind not in salt.utils.kinds.APPL_KINDS: if kind not in salt.utils.kinds.APPL_KINDS:
emsg = "Invalid application kind = '{}'.".format(kind) emsg = f"Invalid application kind = '{kind}'."
log.error(emsg) log.error(emsg)
raise ValueError(emsg) raise ValueError(emsg)
self.event = salt.utils.event.get_event( self.event = salt.utils.event.get_event(
@ -377,7 +377,7 @@ class Key:
# check given pub-key # check given pub-key
if pub: if pub:
if not os.path.isfile(pub): if not os.path.isfile(pub):
return "Public-key {} does not exist".format(pub) return f"Public-key {pub} does not exist"
# default to master.pub # default to master.pub
else: else:
mpub = self.opts["pki_dir"] + "/" + "master.pub" mpub = self.opts["pki_dir"] + "/" + "master.pub"
@ -387,7 +387,7 @@ class Key:
# check given priv-key # check given priv-key
if priv: if priv:
if not os.path.isfile(priv): if not os.path.isfile(priv):
return "Private-key {} does not exist".format(priv) return f"Private-key {priv} does not exist"
# default to master_sign.pem # default to master_sign.pem
else: else:
mpriv = self.opts["pki_dir"] + "/" + "master_sign.pem" mpriv = self.opts["pki_dir"] + "/" + "master_sign.pem"
@ -467,7 +467,7 @@ class Key:
if clist: if clist:
for minion in clist: for minion in clist:
if minion not in minions and minion not in preserve_minions: if minion not in minions and minion not in preserve_minions:
cache.flush("{}/{}".format(self.ACC, minion)) cache.flush(f"{self.ACC}/{minion}")
def check_master(self): def check_master(self):
""" """
@ -663,7 +663,7 @@ class Key:
pass pass
for keydir, key in invalid_keys: for keydir, key in invalid_keys:
matches[keydir].remove(key) matches[keydir].remove(key)
sys.stderr.write("Unable to accept invalid key for {}.\n".format(key)) sys.stderr.write(f"Unable to accept invalid key for {key}.\n")
return self.name_match(match) if match is not None else self.dict_match(matches) return self.name_match(match) if match is not None else self.dict_match(matches)
def accept_all(self): def accept_all(self):

Some files were not shown because too many files have changed in this diff Show more