mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
No string formatting in log calls
This commit is contained in:
parent
c3b222d8d4
commit
1afa3565ce
72 changed files with 671 additions and 740 deletions
|
@ -123,7 +123,6 @@ disable=R,
|
|||
do-not-assign-a-lambda-expression-use-a-def,
|
||||
3rd-party-local-module-not-gated,
|
||||
pep8-reserved-keywords,
|
||||
str-format-in-logging,
|
||||
import-outside-toplevel,
|
||||
deprecated-method,
|
||||
repr-flag-used-in-string,
|
||||
|
|
|
@ -397,12 +397,12 @@ class SSH:
|
|||
)
|
||||
)
|
||||
log.info(
|
||||
"The host {} has been added to the roster {}".format(
|
||||
self.opts.get("tgt", ""), roster_file
|
||||
)
|
||||
"The host %s has been added to the roster %s",
|
||||
self.opts.get("tgt", ""),
|
||||
roster_file,
|
||||
)
|
||||
else:
|
||||
log.error("Unable to update roster {}: access denied".format(roster_file))
|
||||
log.error("Unable to update roster %s: access denied", roster_file)
|
||||
|
||||
def _update_targets(self):
|
||||
"""
|
||||
|
@ -771,11 +771,11 @@ class SSH:
|
|||
jid, job_load
|
||||
)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
log.exception(exc)
|
||||
log.error(
|
||||
"Could not save load with returner %s: %s",
|
||||
self.opts["master_job_cache"],
|
||||
exc,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
if self.opts.get("verbose"):
|
||||
|
@ -1048,29 +1048,22 @@ class Single:
|
|||
if self.ssh_pre_flight:
|
||||
if not self.opts.get("ssh_run_pre_flight", False) and self.check_thin_dir():
|
||||
log.info(
|
||||
"{} thin dir already exists. Not running ssh_pre_flight script".format(
|
||||
self.thin_dir
|
||||
)
|
||||
"%s thin dir already exists. Not running ssh_pre_flight script",
|
||||
self.thin_dir,
|
||||
)
|
||||
elif not os.path.exists(self.ssh_pre_flight):
|
||||
log.error(
|
||||
"The ssh_pre_flight script {} does not exist".format(
|
||||
self.ssh_pre_flight
|
||||
)
|
||||
"The ssh_pre_flight script %s does not exist", self.ssh_pre_flight
|
||||
)
|
||||
else:
|
||||
stdout, stderr, retcode = self.run_ssh_pre_flight()
|
||||
if retcode != 0:
|
||||
log.error(
|
||||
"Error running ssh_pre_flight script {}".format(
|
||||
self.ssh_pre_file
|
||||
)
|
||||
"Error running ssh_pre_flight script %s", self.ssh_pre_file
|
||||
)
|
||||
return stdout, stderr, retcode
|
||||
log.info(
|
||||
"Successfully ran the ssh_pre_flight script: {}".format(
|
||||
self.ssh_pre_file
|
||||
)
|
||||
"Successfully ran the ssh_pre_flight script: %s", self.ssh_pre_file
|
||||
)
|
||||
|
||||
if self.opts.get("raw_shell", False):
|
||||
|
|
|
@ -988,7 +988,7 @@ class Cloud:
|
|||
pool_size = self.opts["pool_size"]
|
||||
else:
|
||||
pool_size = len(parallel_data)
|
||||
log.info("Destroying in parallel mode; " "Cloud pool size: %s", pool_size)
|
||||
log.info("Destroying in parallel mode; Cloud pool size: %s", pool_size)
|
||||
|
||||
# kick off the parallel destroy
|
||||
output_multip = enter_mainloop(
|
||||
|
@ -1256,13 +1256,12 @@ class Cloud:
|
|||
)
|
||||
if ret:
|
||||
log.info(
|
||||
"Synchronized the following dynamic modules: "
|
||||
" {}".format(ret)
|
||||
"Synchronized the following dynamic modules: %s", ret
|
||||
)
|
||||
break
|
||||
except KeyError as exc:
|
||||
log.exception(
|
||||
"Failed to create VM %s. Configuration value %s needs " "to be set",
|
||||
"Failed to create VM %s. Configuration value %s needs to be set",
|
||||
vm_["name"],
|
||||
exc,
|
||||
)
|
||||
|
@ -1716,7 +1715,7 @@ class Map(Cloud):
|
|||
pass
|
||||
elif self.opts.get("map_pillar") not in self.opts.get("maps"):
|
||||
log.error(
|
||||
"The specified map not found in pillar at " "'cloud:maps:%s'",
|
||||
"The specified map not found in pillar at 'cloud:maps:%s'",
|
||||
self.opts["map_pillar"],
|
||||
)
|
||||
raise SaltCloudNotFound()
|
||||
|
@ -1976,8 +1975,7 @@ class Map(Cloud):
|
|||
break
|
||||
|
||||
log.warning(
|
||||
"'%s' already exists, removing from " "the create map.",
|
||||
name,
|
||||
"%r already exists, removing from the create map.", name,
|
||||
)
|
||||
|
||||
if "existing" not in ret:
|
||||
|
@ -2112,7 +2110,7 @@ class Map(Cloud):
|
|||
out = self.create(master_profile, local_master=local_master)
|
||||
|
||||
if not isinstance(out, dict):
|
||||
log.debug("Master creation details is not a dictionary: {}".format(out))
|
||||
log.debug("Master creation details is not a dictionary: %s", out)
|
||||
|
||||
elif "Errors" in out:
|
||||
raise SaltCloudSystemExit(
|
||||
|
|
|
@ -346,7 +346,7 @@ def ignore_cidr(vm_, ip):
|
|||
cidrs = [cidrs]
|
||||
for cidr in cidrs or []:
|
||||
if ip_address(ip) in ip_network(cidr):
|
||||
log.warning("IP '{}' found within '{}'; ignoring it.".format(ip, cidr))
|
||||
log.warning("IP %r found within %r; ignoring it.", ip, cidr)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
|
|
@ -488,7 +488,7 @@ def _refresh_buckets_cache_file(cache_file):
|
|||
continue
|
||||
else:
|
||||
log.warning(
|
||||
"S3 Error! Do you have any files " "in your S3 bucket?"
|
||||
"S3 Error! Do you have any files in your S3 bucket?"
|
||||
)
|
||||
return {}
|
||||
|
||||
|
@ -532,7 +532,7 @@ def _refresh_buckets_cache_file(cache_file):
|
|||
continue
|
||||
else:
|
||||
log.warning(
|
||||
"S3 Error! Do you have any files " "in your S3 bucket?"
|
||||
"S3 Error! Do you have any files in your S3 bucket?"
|
||||
)
|
||||
return {}
|
||||
|
||||
|
@ -594,7 +594,7 @@ def _read_buckets_cache_file(cache_file):
|
|||
KeyError,
|
||||
ValueError,
|
||||
) as exc:
|
||||
log.debug("Exception reading buckets cache file: '{}'".format(exc))
|
||||
log.debug("Exception reading buckets cache file: '%s'", exc)
|
||||
data = None
|
||||
|
||||
return data
|
||||
|
|
|
@ -112,9 +112,8 @@ def _sdc_mdata(mdata_list=None, mdata_get=None):
|
|||
)
|
||||
if mdata_value.startswith("ERROR:"):
|
||||
log.warning(
|
||||
"unable to read sdc:{} via mdata-get, mdata grain may be incomplete.".format(
|
||||
mdata_grain,
|
||||
)
|
||||
"unable to read sdc:%s via mdata-get, mdata grain may be incomplete.",
|
||||
mdata_grain,
|
||||
)
|
||||
continue
|
||||
|
||||
|
|
|
@ -105,9 +105,8 @@ def post_master_init(self, master):
|
|||
self.opts["mine_functions"] = general_proxy_mines + specific_proxy_mines
|
||||
except TypeError as terr:
|
||||
log.error(
|
||||
"Unable to merge mine functions from the pillar in the opts, for proxy {}".format(
|
||||
self.opts["id"]
|
||||
)
|
||||
"Unable to merge mine functions from the pillar in the opts, for proxy %s",
|
||||
self.opts["id"],
|
||||
)
|
||||
|
||||
fq_proxyname = self.opts["proxy"]["proxytype"]
|
||||
|
@ -792,9 +791,8 @@ def handle_decoded_payload(self, data):
|
|||
process_count = len(salt.utils.minion.running(self.opts))
|
||||
while process_count >= process_count_max:
|
||||
log.warning(
|
||||
"Maximum number of processes reached while executing jid {}, waiting...".format(
|
||||
data["jid"]
|
||||
)
|
||||
"Maximum number of processes reached while executing jid %s, waiting...",
|
||||
data["jid"],
|
||||
)
|
||||
yield salt.ext.tornado.gen.sleep(10)
|
||||
process_count = len(salt.utils.minion.running(self.opts))
|
||||
|
|
|
@ -312,9 +312,9 @@ def bootstrap(force=False, source=None):
|
|||
|
||||
# Download Chocolatey installer
|
||||
try:
|
||||
log.debug("Downloading Chocolatey: {}".format(os.path.basename(url)))
|
||||
log.debug("Downloading Chocolatey: %s", os.path.basename(url))
|
||||
script = __salt__["cp.get_url"](path=url, dest=dest)
|
||||
log.debug("Script: {}".format(script))
|
||||
log.debug("Script: %s", script)
|
||||
except MinionError:
|
||||
err = "Failed to download Chocolatey Installer"
|
||||
if source:
|
||||
|
@ -323,7 +323,7 @@ def bootstrap(force=False, source=None):
|
|||
|
||||
# If this is a nupkg download we need to unzip it first
|
||||
if os.path.splitext(os.path.basename(dest))[1] == ".nupkg":
|
||||
log.debug("Unzipping Chocolatey: {}".format(dest))
|
||||
log.debug("Unzipping Chocolatey: %s", dest)
|
||||
__salt__["archive.unzip"](
|
||||
zip_file=dest,
|
||||
dest=os.path.join(os.path.dirname(dest), "chocolatey"),
|
||||
|
@ -339,7 +339,7 @@ def bootstrap(force=False, source=None):
|
|||
)
|
||||
|
||||
# Run the Chocolatey bootstrap
|
||||
log.debug("Installing Chocolatey: {}".format(script))
|
||||
log.debug("Installing Chocolatey: %s", script)
|
||||
result = __salt__["cmd.script"](
|
||||
script, cwd=os.path.dirname(script), shell="powershell", python_shell=True
|
||||
)
|
||||
|
@ -375,7 +375,7 @@ def unbootstrap():
|
|||
choco_dir = os.environ.get("ChocolateyInstall", False)
|
||||
if choco_dir:
|
||||
if os.path.exists(choco_dir):
|
||||
log.debug("Removing Chocolatey directory: {}".format(choco_dir))
|
||||
log.debug("Removing Chocolatey directory: %s", choco_dir)
|
||||
__salt__["file.remove"](path=choco_dir, force=True)
|
||||
removed.append("Removed Directory: {}".format(choco_dir))
|
||||
else:
|
||||
|
@ -385,14 +385,14 @@ def unbootstrap():
|
|||
]
|
||||
for path in known_paths:
|
||||
if os.path.exists(path):
|
||||
log.debug("Removing Chocolatey directory: {}".format(path))
|
||||
log.debug("Removing Chocolatey directory: %s", path)
|
||||
__salt__["file.remove"](path=path, force=True)
|
||||
removed.append("Removed Directory: {}".format(path))
|
||||
|
||||
# Delete all Chocolatey environment variables
|
||||
for env_var in __salt__["environ.items"]():
|
||||
if env_var.lower().startswith("chocolatey"):
|
||||
log.debug("Removing Chocolatey environment variable: {}" "".format(env_var))
|
||||
log.debug("Removing Chocolatey environment variable: %s", env_var)
|
||||
__salt__["environ.setval"](
|
||||
key=env_var, val=False, false_unsets=True, permanent="HKLM"
|
||||
)
|
||||
|
@ -404,7 +404,7 @@ def unbootstrap():
|
|||
# Remove Chocolatey from the path:
|
||||
for path in __salt__["win_path.get_path"]():
|
||||
if "chocolatey" in path.lower():
|
||||
log.debug("Removing Chocolatey path item: {}" "".format(path))
|
||||
log.debug("Removing Chocolatey path item: %s", path)
|
||||
__salt__["win_path.remove"](path=path, rehash=True)
|
||||
removed.append("Removed Path Item: {}".format(path))
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ def _hwinfo_parse_short(report):
|
|||
current_result = value
|
||||
key_counter = 0
|
||||
else:
|
||||
log.error("Error parsing hwinfo short output: {}".format(line))
|
||||
log.error("Error parsing hwinfo short output: %s", line)
|
||||
|
||||
return result
|
||||
|
||||
|
|
|
@ -903,9 +903,7 @@ def _get_create_kwargs(
|
|||
client_args = get_client_args(["create_container", "host_config"])
|
||||
except CommandExecutionError as exc:
|
||||
log.error(
|
||||
"docker.create: Error getting client args: '%s'",
|
||||
exc.__str__(),
|
||||
exc_info=True,
|
||||
"docker.create: Error getting client args: '%s'", exc, exc_info=True,
|
||||
)
|
||||
raise CommandExecutionError("Failed to get client args: {}".format(exc))
|
||||
|
||||
|
@ -2099,7 +2097,7 @@ def resolve_tag(name, **kwargs):
|
|||
return False
|
||||
except KeyError:
|
||||
log.error(
|
||||
"Inspecting docker image '%s' returned an unexpected data " "structure: %s",
|
||||
"Inspecting docker image '%s' returned an unexpected data structure: %s",
|
||||
name,
|
||||
inspect_result,
|
||||
)
|
||||
|
@ -2513,7 +2511,7 @@ def version():
|
|||
|
||||
|
||||
def _create_networking_config(networks):
|
||||
log.debug("creating networking config from {}".format(networks))
|
||||
log.debug("creating networking config from %s", networks)
|
||||
return _client_wrapper(
|
||||
"create_networking_config",
|
||||
{
|
||||
|
@ -3270,7 +3268,7 @@ def create(
|
|||
)
|
||||
|
||||
log.debug(
|
||||
"docker.create: creating container %susing the following " "arguments: %s",
|
||||
"docker.create: creating container %susing the following arguments: %s",
|
||||
"with name '{}' ".format(name) if name is not None else "",
|
||||
kwargs,
|
||||
)
|
||||
|
@ -3435,7 +3433,7 @@ def run_container(
|
|||
raise SaltInvocationError("Invalid format for networks argument")
|
||||
|
||||
log.debug(
|
||||
"docker.create: creating container %susing the following " "arguments: %s",
|
||||
"docker.create: creating container %susing the following arguments: %s",
|
||||
"with name '{}' ".format(name) if name is not None else "",
|
||||
kwargs,
|
||||
)
|
||||
|
|
|
@ -28,7 +28,7 @@ import time
|
|||
import urllib.parse
|
||||
from collections import namedtuple
|
||||
from collections.abc import Iterable, Mapping
|
||||
from functools import reduce # pylint: disable=redefined-builtin
|
||||
from functools import reduce
|
||||
|
||||
import salt.utils.args
|
||||
import salt.utils.atomicfile
|
||||
|
@ -4895,7 +4895,7 @@ def extract_hash(
|
|||
|
||||
if partial:
|
||||
log.debug(
|
||||
"file.extract_hash: Returning the partially identified %s hash " "'%s'",
|
||||
"file.extract_hash: Returning the partially identified %s hash '%s'",
|
||||
partial["hash_type"],
|
||||
partial["hsum"],
|
||||
)
|
||||
|
@ -5101,9 +5101,11 @@ def check_perms(
|
|||
current_serange,
|
||||
) = get_selinux_context(name).split(":")
|
||||
log.debug(
|
||||
"Current selinux context user:{} role:{} type:{} range:{}".format(
|
||||
current_seuser, current_serole, current_setype, current_serange
|
||||
)
|
||||
"Current selinux context user:%s role:%s type:%s range:%s",
|
||||
current_seuser,
|
||||
current_serole,
|
||||
current_setype,
|
||||
current_serange,
|
||||
)
|
||||
except ValueError:
|
||||
log.error("Unable to get current selinux attributes")
|
||||
|
@ -5172,7 +5174,7 @@ def check_perms(
|
|||
range=requested_serange,
|
||||
persist=True,
|
||||
)
|
||||
log.debug("selinux set result: {}".format(result))
|
||||
log.debug("selinux set result: %s", result)
|
||||
(
|
||||
current_seuser,
|
||||
current_serole,
|
||||
|
@ -5614,9 +5616,11 @@ def check_file_meta(
|
|||
current_serange,
|
||||
) = get_selinux_context(name).split(":")
|
||||
log.debug(
|
||||
"Current selinux context user:{} role:{} type:{} range:{}".format(
|
||||
current_seuser, current_serole, current_setype, current_serange
|
||||
)
|
||||
"Current selinux context user:%s role:%s type:%s range:%s",
|
||||
current_seuser,
|
||||
current_serole,
|
||||
current_setype,
|
||||
current_serange,
|
||||
)
|
||||
except ValueError as exc:
|
||||
log.error("Unable to get current selinux attributes")
|
||||
|
|
|
@ -412,9 +412,7 @@ class _Ini(_Section):
|
|||
def refresh(self, inicontents=None):
|
||||
if inicontents is None:
|
||||
if not os.path.exists(self.name):
|
||||
log.trace(
|
||||
"File {} does not exist and will be created".format(self.name)
|
||||
)
|
||||
log.trace("File %s does not exist and will be created", self.name)
|
||||
return
|
||||
try:
|
||||
with salt.utils.files.fopen(self.name) as rfh:
|
||||
|
|
|
@ -143,10 +143,10 @@ class HandleFileCopy:
|
|||
def __exit__(self, exc_type, exc_value, exc_traceback):
|
||||
if self._cached_file is not None:
|
||||
salt.utils.files.safe_rm(self._cached_file)
|
||||
log.debug("Deleted cached file: {}".format(self._cached_file))
|
||||
log.debug("Deleted cached file: %s", self._cached_file)
|
||||
if self._cached_folder is not None:
|
||||
__salt__["file.rmdir"](self._cached_folder)
|
||||
log.debug("Deleted cached folder: {}".format(self._cached_folder))
|
||||
log.debug("Deleted cached folder: %s", self._cached_folder)
|
||||
|
||||
|
||||
def _timeout_decorator(function):
|
||||
|
@ -225,16 +225,14 @@ def _timeout_decorator_cleankwargs(function):
|
|||
def _restart_connection():
|
||||
minion_id = __opts__.get("proxyid", "") or __opts__.get("id", "")
|
||||
log.info(
|
||||
"Junos exception occurred {} (junos proxy) is down. Restarting.".format(
|
||||
minion_id
|
||||
)
|
||||
"Junos exception occurred %s (junos proxy) is down. Restarting.", minion_id
|
||||
)
|
||||
__salt__["event.fire_master"](
|
||||
{}, "junos/proxy/{}/stop".format(__opts__["proxy"]["host"])
|
||||
)
|
||||
__proxy__["junos.shutdown"](__opts__) # safely close connection
|
||||
__proxy__["junos.init"](__opts__) # reopen connection
|
||||
log.debug("Junos exception occurred, restarted {} (junos proxy)!".format(minion_id))
|
||||
log.debug("Junos exception occurred, restarted %s (junos proxy)!", minion_id)
|
||||
|
||||
|
||||
@_timeout_decorator_cleankwargs
|
||||
|
@ -382,7 +380,7 @@ def rpc(cmd=None, dest=None, **kwargs):
|
|||
log.warning('Filter ignored as it is only used with "get-config" rpc')
|
||||
|
||||
if "dest" in op:
|
||||
log.warning("dest in op, rpc may reject this for cmd {}".format(cmd))
|
||||
log.warning("dest in op, rpc may reject this for cmd '%s'", cmd)
|
||||
|
||||
try:
|
||||
reply = getattr(conn.rpc, cmd.replace("-", "_"))({"format": format_}, **op)
|
||||
|
@ -654,8 +652,10 @@ def rollback(**kwargs):
|
|||
ids_passed = ids_passed + 1
|
||||
|
||||
if ids_passed > 1:
|
||||
log.warning("junos.rollback called with more than one possible ID.")
|
||||
log.warning("Use only one of the positional argument, `id`, or `d_id` kwargs")
|
||||
log.warning(
|
||||
"junos.rollback called with more than one possible ID. "
|
||||
"Use only one of the positional argument, `id`, or `d_id` kwargs"
|
||||
)
|
||||
|
||||
ret = {}
|
||||
conn = __proxy__["junos.conn"]()
|
||||
|
@ -689,8 +689,8 @@ def rollback(**kwargs):
|
|||
fp.write(salt.utils.stringutils.to_str(diff))
|
||||
else:
|
||||
log.info(
|
||||
"No diff between current configuration and \
|
||||
rollbacked configuration, so no diff file created"
|
||||
"No diff between current configuration and "
|
||||
"rollbacked configuration, so no diff file created"
|
||||
)
|
||||
|
||||
try:
|
||||
|
@ -755,8 +755,10 @@ def diff(**kwargs):
|
|||
id_ = kwargs.pop("id", 0)
|
||||
ids_passed = ids_passed + 1
|
||||
if ids_passed > 1:
|
||||
log.warning("junos.rollback called with more than one possible ID.")
|
||||
log.warning("Use only one of the positional argument, `id`, or `d_id` kwargs")
|
||||
log.warning(
|
||||
"junos.rollback called with more than one possible ID. "
|
||||
"Use only one of the positional argument, `id`, or `d_id` kwargs"
|
||||
)
|
||||
|
||||
if kwargs:
|
||||
salt.utils.args.invalid_kwargs(kwargs)
|
||||
|
|
|
@ -340,7 +340,7 @@ def token_list(kubeconfig=None, rootfs=None):
|
|||
# break the parser.
|
||||
values = re.findall(r"(\S+(?:\s\S+)*)", line)
|
||||
if len(header) != len(values):
|
||||
log.error("Error parsing line: {}".format(line))
|
||||
log.error("Error parsing line: '%s'", line)
|
||||
continue
|
||||
tokens.append({key: value for key, value in zip(header, values)})
|
||||
return tokens
|
||||
|
|
|
@ -96,7 +96,7 @@ def _add(app, endpoint, payload):
|
|||
try:
|
||||
return getattr(getattr(nb, app), endpoint).create(**payload)
|
||||
except pynetbox.RequestError as e:
|
||||
log.error("{}, {}, {}".format(e.req.request.headers, e.request_body, e.error))
|
||||
log.error("%s, %s, %s", e.req.request.headers, e.request_body, e.error)
|
||||
return False
|
||||
|
||||
|
||||
|
@ -376,7 +376,7 @@ def create_device(name, role, model, manufacturer, site):
|
|||
|
||||
status = {"label": "Active", "value": 1}
|
||||
except pynetbox.RequestError as e:
|
||||
log.error("{}, {}, {}".format(e.req.request.headers, e.request_body, e.error))
|
||||
log.error("%s, %s, %s", e.req.request.headers, e.request_body, e.error)
|
||||
return False
|
||||
|
||||
payload = {
|
||||
|
@ -419,7 +419,7 @@ def update_device(name, **kwargs):
|
|||
nb_device.save()
|
||||
return {"dcim": {"devices": kwargs}}
|
||||
except pynetbox.RequestError as e:
|
||||
log.error("{}, {}, {}".format(e.req.request.headers, e.request_body, e.error))
|
||||
log.error("%s, %s, %s", e.req.request.headers, e.request_body, e.error)
|
||||
return False
|
||||
|
||||
|
||||
|
@ -802,9 +802,7 @@ def update_interface(device_name, interface_name, **kwargs):
|
|||
nb_interface.save()
|
||||
return {"dcim": {"interfaces": {nb_interface.id: dict(nb_interface)}}}
|
||||
except pynetbox.RequestError as e:
|
||||
log.error(
|
||||
"{}, {}, {}".format(e.req.request.headers, e.request_body, e.error)
|
||||
)
|
||||
log.error("%s, %s, %s", e.req.request.headers, e.request_body, e.error)
|
||||
return False
|
||||
|
||||
|
||||
|
|
|
@ -1406,7 +1406,7 @@ def mod_hostname(hostname):
|
|||
if "Static hostname" in line[0]:
|
||||
o_hostname = line[1].strip()
|
||||
else:
|
||||
log.debug("{} was unable to get hostname".format(hostname_cmd))
|
||||
log.debug("%s was unable to get hostname", hostname_cmd)
|
||||
o_hostname = __salt__["network.get_hostname"]()
|
||||
elif not __utils__["platform.is_sunos"]():
|
||||
# don't run hostname -f because -f is not supported on all platforms
|
||||
|
@ -1421,9 +1421,9 @@ def mod_hostname(hostname):
|
|||
)
|
||||
if result["retcode"] != 0:
|
||||
log.debug(
|
||||
"{} was unable to set hostname. Error: {}".format(
|
||||
hostname_cmd, result["stderr"],
|
||||
)
|
||||
"%s was unable to set hostname. Error: %s",
|
||||
hostname_cmd,
|
||||
result["stderr"],
|
||||
)
|
||||
return False
|
||||
elif not __utils__["platform.is_sunos"]():
|
||||
|
@ -2121,6 +2121,6 @@ def fqdns():
|
|||
fqdns.update(item)
|
||||
|
||||
elapsed = time.time() - start
|
||||
log.debug("Elapsed time getting FQDNs: {} seconds".format(elapsed))
|
||||
log.debug("Elapsed time getting FQDNs: %s seconds", elapsed)
|
||||
|
||||
return {"fqdns": sorted(list(fqdns))}
|
||||
|
|
|
@ -225,7 +225,7 @@ def cmd(command, *args, **kwargs):
|
|||
if k.startswith("__pub_"):
|
||||
kwargs.pop(k)
|
||||
local_command = ".".join(["nxos", command])
|
||||
log.info("local command: {}".format(local_command))
|
||||
log.info("local command: %s", local_command)
|
||||
if local_command not in __salt__:
|
||||
return False
|
||||
return __salt__[local_command](*args, **kwargs)
|
||||
|
|
|
@ -119,7 +119,7 @@ def check_upgrade_impact(system_image, kickstart_image=None, issu=True, **kwargs
|
|||
if issu and ki is None:
|
||||
cmd = cmd + " non-disruptive"
|
||||
|
||||
log.info("Check upgrade impact using command: '{}'".format(cmd))
|
||||
log.info("Check upgrade impact using command: '%s'", cmd)
|
||||
kwargs.update({"timeout": kwargs.get("timeout", 900)})
|
||||
error_pattern_list = [
|
||||
"Another install procedure may be in progress",
|
||||
|
@ -195,7 +195,7 @@ def upgrade(system_image, kickstart_image=None, issu=True, **kwargs):
|
|||
if impact["invalid_command"]:
|
||||
impact = False
|
||||
continue
|
||||
log.info("Impact data gathered:\n{}".format(impact))
|
||||
log.info("Impact data gathered:\n%s", impact)
|
||||
|
||||
# Check to see if conditions are sufficent to return the impact
|
||||
# data and not proceed with the actual upgrade.
|
||||
|
@ -273,7 +273,7 @@ def _upgrade(system_image, kickstart_image, issu, **kwargs):
|
|||
logmsg += "\nDisruptive Upgrade/Downgrade requested."
|
||||
|
||||
log.info(logmsg)
|
||||
log.info("Begin upgrade using command: '{}'".format(cmd))
|
||||
log.info("Begin upgrade using command: '%s'", cmd)
|
||||
|
||||
kwargs.update({"timeout": kwargs.get("timeout", 900)})
|
||||
error_pattern_list = ["Another install procedure may be in progress"]
|
||||
|
@ -347,7 +347,7 @@ def _parse_upgrade_data(data):
|
|||
upgrade_result["upgrade_data"] = data
|
||||
for line in data.split("\n"):
|
||||
|
||||
log.info("Processing line: ({})".format(line))
|
||||
log.info("Processing line: (%s)", line)
|
||||
|
||||
# Check to see if upgrade is disruptive or non-disruptive
|
||||
if re.search(r"non-disruptive", line):
|
||||
|
|
|
@ -364,9 +364,8 @@ def modify(
|
|||
for f in val.upper():
|
||||
if f not in ["N", "D", "H", "L", "X"]:
|
||||
log.warning(
|
||||
"pdbedit.modify - unknown {f} flag for account_control, ignored".format(
|
||||
f=f
|
||||
)
|
||||
"pdbedit.modify - unknown %s flag for account_control, ignored",
|
||||
f,
|
||||
)
|
||||
else:
|
||||
new.append(f)
|
||||
|
|
|
@ -1399,7 +1399,7 @@ def list_upgrades(bin_env=None, user=None, cwd=None):
|
|||
if match:
|
||||
name, version_ = match.groups()
|
||||
else:
|
||||
logger.error("Can't parse line '{}'".format(line))
|
||||
logger.error("Can't parse line %r", line)
|
||||
continue
|
||||
packages[name] = version_
|
||||
|
||||
|
|
|
@ -607,7 +607,7 @@ def enable(name, start=False, **kwargs):
|
|||
salt.utils.files.fopen(down_file, "w").close()
|
||||
# pylint: enable=resource-leakage
|
||||
except OSError:
|
||||
log.error("Unable to create file {}".format(down_file))
|
||||
log.error("Unable to create file %s", down_file)
|
||||
return False
|
||||
|
||||
# enable the service
|
||||
|
@ -616,7 +616,7 @@ def enable(name, start=False, **kwargs):
|
|||
|
||||
except OSError:
|
||||
# (attempt to) remove temp down_file anyway
|
||||
log.error("Unable to create symlink {}".format(down_file))
|
||||
log.error("Unable to create symlink %s", down_file)
|
||||
if not start:
|
||||
os.unlink(down_file)
|
||||
return False
|
||||
|
|
|
@ -70,14 +70,13 @@ def get_release_number(name):
|
|||
version_map = salt.version.SaltStackVersion.LNAMES
|
||||
version = version_map.get(name)
|
||||
if version is None:
|
||||
log.info("Version {} not found.".format(name))
|
||||
log.info("Version %s not found.", name)
|
||||
return None
|
||||
|
||||
try:
|
||||
if version[1] == 0:
|
||||
log.info(
|
||||
"Version {} found, but no release number has been assigned "
|
||||
"yet.".format(name)
|
||||
"Version %s found, but no release number has been assigned yet.", name
|
||||
)
|
||||
return "No version assigned."
|
||||
except IndexError:
|
||||
|
@ -102,7 +101,7 @@ def equal(name):
|
|||
salt '*' salt_version.equal 'Oxygen'
|
||||
"""
|
||||
if _check_release_cmp(name) == 0:
|
||||
log.info("The minion's version code name matches '{}'.".format(name))
|
||||
log.info("The minion's version code name matches '%s'.", name)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
@ -123,7 +122,7 @@ def greater_than(name):
|
|||
salt '*' salt_version.greater_than 'Oxygen'
|
||||
"""
|
||||
if _check_release_cmp(name) == 1:
|
||||
log.info("The minion's version code name is greater than '{}'.".format(name))
|
||||
log.info("The minion's version code name is greater than '%s'.", name)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
@ -144,7 +143,7 @@ def less_than(name):
|
|||
salt '*' salt_version.less_than 'Oxygen'
|
||||
"""
|
||||
if _check_release_cmp(name) == -1:
|
||||
log.info("The minion's version code name is less than '{}'.".format(name))
|
||||
log.info("The minion's version code name is less than '%s'.", name)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
@ -161,7 +160,7 @@ def _check_release_cmp(name):
|
|||
"""
|
||||
map_version = get_release_number(name)
|
||||
if map_version is None:
|
||||
log.info("Release code name {} was not found.".format(name))
|
||||
log.info("Release code name %s was not found.", name)
|
||||
return None
|
||||
|
||||
current_version = str(salt.version.SaltStackVersion(*salt.version.__version_info__))
|
||||
|
|
|
@ -89,7 +89,7 @@ def _parse_image_meta(image=None, detail=False):
|
|||
name=name, version=version, published=published,
|
||||
)
|
||||
else:
|
||||
log.debug("smartos_image - encountered invalid image payload: {}".format(image))
|
||||
log.debug("smartos_image - encountered invalid image payload: %s", image)
|
||||
ret = {"Error": "This looks like an orphaned image, image payload was invalid."}
|
||||
|
||||
return ret
|
||||
|
|
|
@ -272,7 +272,7 @@ def update_user(email, profile="splunk", **kwargs):
|
|||
user = list_users(profile).get(email)
|
||||
|
||||
if not user:
|
||||
log.error("Failed to retrieve user {}".format(email))
|
||||
log.error("Failed to retrieve user %s", email)
|
||||
return False
|
||||
|
||||
property_map = {}
|
||||
|
|
|
@ -1793,9 +1793,9 @@ def proxy_reconnect(proxy_name, opts=None):
|
|||
# especially
|
||||
minion_id = opts.get("proxyid", "") or opts.get("id", "")
|
||||
log.info(
|
||||
"{} ({} proxy) is rebooting or shutting down. Don't probe connection.".format(
|
||||
minion_id, proxy_name
|
||||
)
|
||||
"%s (%s proxy) is rebooting or shutting down. Don't probe connection.",
|
||||
minion_id,
|
||||
proxy_name,
|
||||
)
|
||||
return True
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ def _post_message(message, chat_id, token):
|
|||
response = requests.post(url, data=parameters)
|
||||
result = response.json()
|
||||
|
||||
log.debug("Raw response of the telegram request is {}".format(response))
|
||||
log.debug("Raw response of the telegram request is %s", response)
|
||||
|
||||
except Exception: # pylint: disable=broad-except
|
||||
log.exception("Sending telegram api request failed")
|
||||
|
@ -125,9 +125,9 @@ def _post_message(message, chat_id, token):
|
|||
# Check if the Telegram Bot API returned successfully.
|
||||
if not result.get("ok", False):
|
||||
log.debug(
|
||||
"Sending telegram api request failed due to error {} ({})".format(
|
||||
result.get("error_code"), result.get("description")
|
||||
)
|
||||
"Sending telegram api request failed due to error %s (%s)",
|
||||
result.get("error_code"),
|
||||
result.get("description"),
|
||||
)
|
||||
return False
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ import os
|
|||
|
||||
from salt.utils.files import fopen
|
||||
|
||||
# Import third party modules
|
||||
try:
|
||||
import textfsm
|
||||
|
||||
|
@ -59,17 +58,13 @@ def _clitable_to_dict(objects, fsm_handler):
|
|||
Converts TextFSM cli_table object to list of dictionaries.
|
||||
"""
|
||||
objs = []
|
||||
log.debug("Cli Table:")
|
||||
log.debug(objects)
|
||||
log.debug("FSM handler:")
|
||||
log.debug(fsm_handler)
|
||||
log.debug("Cli Table: %s; FSM handler: %s", objects, fsm_handler)
|
||||
for row in objects:
|
||||
temp_dict = {}
|
||||
for index, element in enumerate(row):
|
||||
temp_dict[fsm_handler.header[index].lower()] = element
|
||||
objs.append(temp_dict)
|
||||
log.debug("Extraction result:")
|
||||
log.debug(objs)
|
||||
log.debug("Extraction result: %s", objs)
|
||||
return objs
|
||||
|
||||
|
||||
|
@ -178,8 +173,9 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv="base"):
|
|||
}
|
||||
"""
|
||||
ret = {"result": False, "comment": "", "out": None}
|
||||
log.debug("Using the saltenv: {}".format(saltenv))
|
||||
log.debug("Caching {} using the Salt fileserver".format(template_path))
|
||||
log.debug(
|
||||
"Caching %s(saltenv: %s) using the Salt fileserver", template_path, saltenv
|
||||
)
|
||||
tpl_cached_path = __salt__["cp.cache_file"](template_path, saltenv=saltenv)
|
||||
if tpl_cached_path is False:
|
||||
ret["comment"] = "Unable to read the TextFSM template from {}".format(
|
||||
|
@ -188,9 +184,7 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv="base"):
|
|||
log.error(ret["comment"])
|
||||
return ret
|
||||
try:
|
||||
log.debug(
|
||||
"Reading TextFSM template from cache path: {}".format(tpl_cached_path)
|
||||
)
|
||||
log.debug("Reading TextFSM template from cache path: %s", tpl_cached_path)
|
||||
# Disabling pylint W8470 to nto complain about fopen.
|
||||
# Unfortunately textFSM needs the file handle rather than the content...
|
||||
# pylint: disable=W8470
|
||||
|
@ -208,7 +202,7 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv="base"):
|
|||
)
|
||||
return ret
|
||||
if not raw_text and raw_text_file:
|
||||
log.debug("Trying to read the raw input from {}".format(raw_text_file))
|
||||
log.debug("Trying to read the raw input from %s", raw_text_file)
|
||||
raw_text = __salt__["cp.get_file_str"](raw_text_file, saltenv=saltenv)
|
||||
if raw_text is False:
|
||||
ret[
|
||||
|
@ -222,8 +216,7 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv="base"):
|
|||
ret["comment"] = "Please specify a valid input file or text."
|
||||
log.error(ret["comment"])
|
||||
return ret
|
||||
log.debug("Processing the raw text:")
|
||||
log.debug(raw_text)
|
||||
log.debug("Processing the raw text:\n%s", raw_text)
|
||||
objects = fsm_handler.ParseText(raw_text)
|
||||
ret["out"] = _clitable_to_dict(objects, fsm_handler)
|
||||
ret["result"] = True
|
||||
|
@ -396,9 +389,7 @@ def index(
|
|||
)
|
||||
if platform_grain_name:
|
||||
log.debug(
|
||||
"Using the {} grain to identify the platform name".format(
|
||||
platform_grain_name
|
||||
)
|
||||
"Using the %s grain to identify the platform name", platform_grain_name
|
||||
)
|
||||
platform = __grains__.get(platform_grain_name)
|
||||
if not platform:
|
||||
|
@ -408,7 +399,7 @@ def index(
|
|||
platform_grain_name
|
||||
)
|
||||
return ret
|
||||
log.info("Using platform: {}".format(platform))
|
||||
log.info("Using platform: %s", platform)
|
||||
else:
|
||||
ret[
|
||||
"comment"
|
||||
|
@ -426,8 +417,9 @@ def index(
|
|||
] = "No TextFSM templates path specified. Please configure in opts/pillar/function args."
|
||||
log.error(ret["comment"])
|
||||
return ret
|
||||
log.debug("Using the saltenv: {}".format(saltenv))
|
||||
log.debug("Caching {} using the Salt fileserver".format(textfsm_path))
|
||||
log.debug(
|
||||
"Caching %s(saltenv: %s) using the Salt fileserver", textfsm_path, saltenv
|
||||
)
|
||||
textfsm_cachedir_ret = __salt__["cp.cache_dir"](
|
||||
textfsm_path,
|
||||
saltenv=saltenv,
|
||||
|
@ -435,8 +427,7 @@ def index(
|
|||
include_pat=include_pat,
|
||||
exclude_pat=exclude_pat,
|
||||
)
|
||||
log.debug("Cache fun return:")
|
||||
log.debug(textfsm_cachedir_ret)
|
||||
log.debug("Cache fun return:\n%s", textfsm_cachedir_ret)
|
||||
if not textfsm_cachedir_ret:
|
||||
ret[
|
||||
"comment"
|
||||
|
@ -450,22 +441,18 @@ def index(
|
|||
"textfsm_index_file", "index"
|
||||
)
|
||||
index_file_path = os.path.join(textfsm_cachedir, index_file)
|
||||
log.debug("Using the cached index file: {}".format(index_file_path))
|
||||
log.debug("TextFSM templates cached under: {}".format(textfsm_cachedir))
|
||||
log.debug("Using the cached index file: %s", index_file_path)
|
||||
log.debug("TextFSM templates cached under: %s", textfsm_cachedir)
|
||||
textfsm_obj = clitable.CliTable(index_file_path, textfsm_cachedir)
|
||||
attrs = {"Command": command}
|
||||
platform_column_name = __opts__.get(
|
||||
"textfsm_platform_column_name"
|
||||
) or __pillar__.get("textfsm_platform_column_name", "Platform")
|
||||
log.info(
|
||||
"Using the TextFSM platform idenfiticator: {}".format(platform_column_name)
|
||||
)
|
||||
log.info("Using the TextFSM platform idenfiticator: %s", platform_column_name)
|
||||
attrs[platform_column_name] = platform
|
||||
log.debug(
|
||||
"Processing the TextFSM index file using the attributes: {}".format(attrs)
|
||||
)
|
||||
log.debug("Processing the TextFSM index file using the attributes: %s", attrs)
|
||||
if not output and output_file:
|
||||
log.debug("Processing the output from {}".format(output_file))
|
||||
log.debug("Processing the output from %s", output_file)
|
||||
output = __salt__["cp.get_file_str"](output_file, saltenv=saltenv)
|
||||
if output is False:
|
||||
ret[
|
||||
|
@ -479,8 +466,7 @@ def index(
|
|||
ret["comment"] = "Please specify a valid output text or file"
|
||||
log.error(ret["comment"])
|
||||
return ret
|
||||
log.debug("Processing the raw text:")
|
||||
log.debug(output)
|
||||
log.debug("Processing the raw text:\n%s", output)
|
||||
try:
|
||||
# Parse output through template
|
||||
textfsm_obj.ParseCmd(output, attrs)
|
||||
|
|
|
@ -597,7 +597,7 @@ def get_ssh_config(name, network_mask="", get_private_key=False):
|
|||
"{User}@{HostName} ifconfig".format(**ssh_config)
|
||||
)
|
||||
|
||||
log.info("Trying ssh -p {Port} {User}@{HostName} ifconfig".format(**ssh_config))
|
||||
log.info("Trying ssh -p %(Port)s %(User)s@%(HostName)s ifconfig", ssh_config)
|
||||
reply = __salt__["cmd.shell"](command)
|
||||
log.info("--->\n%s", reply)
|
||||
target_network_range = ipaddress.ip_network(network_mask, strict=False)
|
||||
|
|
|
@ -1395,7 +1395,7 @@ def _get_images_dir():
|
|||
find legacy virt.images, then tries virt:images.
|
||||
"""
|
||||
img_dir = __salt__["config.get"]("virt:images")
|
||||
log.debug("Image directory from config option `virt:images`" " is %s", img_dir)
|
||||
log.debug("Image directory from config option `virt:images` is %s", img_dir)
|
||||
return img_dir
|
||||
|
||||
|
||||
|
@ -1443,9 +1443,7 @@ def _zfs_image_create(
|
|||
)
|
||||
)
|
||||
elif destination_fs in existing_disk:
|
||||
log.info(
|
||||
"ZFS filesystem {} already exists. Skipping creation".format(destination_fs)
|
||||
)
|
||||
log.info("ZFS filesystem %s already exists. Skipping creation", destination_fs)
|
||||
blockdevice_path = os.path.join("/dev/zvol", pool, vm_name)
|
||||
return blockdevice_path
|
||||
|
||||
|
@ -5867,7 +5865,7 @@ def purge(vm_, dirs=False, removables=False, **kwargs):
|
|||
# TODO create solution for 'dataset is busy'
|
||||
time.sleep(3)
|
||||
fs_name = disks[disk]["file"][len("/dev/zvol/") :]
|
||||
log.info("Destroying VM ZFS volume {}".format(fs_name))
|
||||
log.info("Destroying VM ZFS volume %s", fs_name)
|
||||
__salt__["zfs.destroy"](name=fs_name, force=True)
|
||||
elif os.path.exists(disks[disk]["file"]):
|
||||
os.remove(disks[disk]["file"])
|
||||
|
@ -7702,7 +7700,7 @@ def network_info(name=None, **kwargs):
|
|||
for net in nets
|
||||
}
|
||||
except libvirt.libvirtError as err:
|
||||
log.debug("Silenced libvirt error: %s", str(err))
|
||||
log.debug("Silenced libvirt error: %s", err)
|
||||
finally:
|
||||
conn.close()
|
||||
return result
|
||||
|
@ -8539,7 +8537,7 @@ def pool_info(name=None, **kwargs):
|
|||
]
|
||||
result = {pool.name(): _pool_extract_infos(pool) for pool in pools}
|
||||
except libvirt.libvirtError as err:
|
||||
log.debug("Silenced libvirt error: %s", str(err))
|
||||
log.debug("Silenced libvirt error: %s", err)
|
||||
finally:
|
||||
conn.close()
|
||||
return result
|
||||
|
@ -8956,7 +8954,7 @@ def volume_infos(pool=None, volume=None, **kwargs):
|
|||
}
|
||||
return {pool_name: volumes for (pool_name, volumes) in vols.items() if volumes}
|
||||
except libvirt.libvirtError as err:
|
||||
log.debug("Silenced libvirt error: %s", str(err))
|
||||
log.debug("Silenced libvirt error: %s", err)
|
||||
finally:
|
||||
conn.close()
|
||||
return result
|
||||
|
|
|
@ -250,7 +250,7 @@ try:
|
|||
):
|
||||
|
||||
log.debug(
|
||||
"pyVmomi not loaded: Incompatible versions " "of Python. See Issue #29537."
|
||||
"pyVmomi not loaded: Incompatible versions of Python. See Issue #29537."
|
||||
)
|
||||
raise ImportError()
|
||||
HAS_PYVMOMI = True
|
||||
|
@ -3166,9 +3166,7 @@ def set_ntp_config(
|
|||
for host_name in host_names:
|
||||
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
|
||||
date_time_manager = _get_date_time_mgr(host_ref)
|
||||
log.debug(
|
||||
"Configuring NTP Servers '{}' for host '{}'.".format(ntp_servers, host_name)
|
||||
)
|
||||
log.debug("Configuring NTP Servers '%s' for host '%s'.", ntp_servers, host_name)
|
||||
|
||||
try:
|
||||
date_time_manager.UpdateDateTimeConfig(config=date_config)
|
||||
|
@ -3301,7 +3299,7 @@ def service_start(
|
|||
|
||||
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
|
||||
service_manager = _get_service_manager(host_ref)
|
||||
log.debug("Starting the '{}' service on {}.".format(service_name, host_name))
|
||||
log.debug("Starting the '%s' service on %s.", service_name, host_name)
|
||||
|
||||
# Start the service
|
||||
try:
|
||||
|
@ -3443,7 +3441,7 @@ def service_stop(
|
|||
|
||||
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
|
||||
service_manager = _get_service_manager(host_ref)
|
||||
log.debug("Stopping the '{}' service on {}.".format(service_name, host_name))
|
||||
log.debug("Stopping the '%s' service on %s.", service_name, host_name)
|
||||
|
||||
# Stop the service.
|
||||
try:
|
||||
|
@ -3583,7 +3581,7 @@ def service_restart(
|
|||
|
||||
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
|
||||
service_manager = _get_service_manager(host_ref)
|
||||
log.debug("Restarting the '{}' service on {}.".format(service_name, host_name))
|
||||
log.debug("Restarting the '%s' service on %s.", service_name, host_name)
|
||||
|
||||
# Restart the service.
|
||||
try:
|
||||
|
@ -4181,9 +4179,8 @@ def vsan_add_disks(
|
|||
continue
|
||||
|
||||
log.debug(
|
||||
"Successfully added disks to the VSAN system for host '{}'.".format(
|
||||
host_name
|
||||
)
|
||||
"Successfully added disks to the VSAN system for host '%s'.",
|
||||
host_name,
|
||||
)
|
||||
# We need to return ONLY the disk names, otherwise Message Pack can't deserialize the disk objects.
|
||||
disk_names = []
|
||||
|
@ -4419,7 +4416,7 @@ def _get_dvs_config_dict(dvs_name, dvs_config):
|
|||
dvs_config
|
||||
The DVS config
|
||||
"""
|
||||
log.trace("Building the dict of the DVS '{}' config".format(dvs_name))
|
||||
log.trace("Building the dict of the DVS '%s' config", dvs_name)
|
||||
conf_dict = {
|
||||
"name": dvs_name,
|
||||
"contact_email": dvs_config.contact.contact,
|
||||
|
@ -4445,9 +4442,7 @@ def _get_dvs_link_discovery_protocol(dvs_name, dvs_link_disc_protocol):
|
|||
dvs_link_disc_protocl
|
||||
The DVS link discovery protocol
|
||||
"""
|
||||
log.trace(
|
||||
"Building the dict of the DVS '{}' link discovery " "protocol".format(dvs_name)
|
||||
)
|
||||
log.trace("Building the dict of the DVS '%s' link discovery protocol", dvs_name)
|
||||
return {
|
||||
"operation": dvs_link_disc_protocol.operation,
|
||||
"protocol": dvs_link_disc_protocol.protocol,
|
||||
|
@ -4464,7 +4459,7 @@ def _get_dvs_product_info(dvs_name, dvs_product_info):
|
|||
dvs_product_info
|
||||
The DVS product info
|
||||
"""
|
||||
log.trace("Building the dict of the DVS '{}' product " "info".format(dvs_name))
|
||||
log.trace("Building the dict of the DVS '%s' product info", dvs_name)
|
||||
return {
|
||||
"name": dvs_product_info.name,
|
||||
"vendor": dvs_product_info.vendor,
|
||||
|
@ -4482,7 +4477,7 @@ def _get_dvs_capability(dvs_name, dvs_capability):
|
|||
dvs_capability
|
||||
The DVS capability
|
||||
"""
|
||||
log.trace("Building the dict of the DVS '{}' capability" "".format(dvs_name))
|
||||
log.trace("Building the dict of the DVS '%s' capability", dvs_name)
|
||||
return {
|
||||
"operation_supported": dvs_capability.dvsOperationSupported,
|
||||
"portgroup_operation_supported": dvs_capability.dvPortGroupOperationSupported,
|
||||
|
@ -4502,8 +4497,7 @@ def _get_dvs_infrastructure_traffic_resources(dvs_name, dvs_infra_traffic_ress):
|
|||
The DVS infrastructure traffic resources
|
||||
"""
|
||||
log.trace(
|
||||
"Building the dicts of the DVS '{}' infrastructure traffic "
|
||||
"resources".format(dvs_name)
|
||||
"Building the dicts of the DVS '%s' infrastructure traffic resources", dvs_name
|
||||
)
|
||||
res_dicts = []
|
||||
for res in dvs_infra_traffic_ress:
|
||||
|
@ -4753,7 +4747,7 @@ def create_dvs(dvs_dict, dvs_name, service_instance=None):
|
|||
|
||||
salt '*' vsphere.create_dvs dvs dict=$dvs_dict dvs_name=dvs_name
|
||||
"""
|
||||
log.trace("Creating dvs '{}' with dict = {}".format(dvs_name, dvs_dict))
|
||||
log.trace("Creating dvs '%s' with dict = %s", dvs_name, dvs_dict)
|
||||
proxy_type = get_proxy_type()
|
||||
if proxy_type == "esxdatacenter":
|
||||
datacenter = __salt__["esxdatacenter.get_details"]()["datacenter"]
|
||||
|
@ -4787,7 +4781,7 @@ def create_dvs(dvs_dict, dvs_name, service_instance=None):
|
|||
dvs_create_spec.configSpec.infrastructureTrafficResourceConfig,
|
||||
dvs_dict["infrastructure_traffic_resource_pools"],
|
||||
)
|
||||
log.trace("dvs_create_spec = {}".format(dvs_create_spec))
|
||||
log.trace("dvs_create_spec = %s", dvs_create_spec)
|
||||
salt.utils.vmware.create_dvs(dc_ref, dvs_name, dvs_create_spec)
|
||||
if "network_resource_management_enabled" in dvs_dict:
|
||||
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs_name])
|
||||
|
@ -4830,7 +4824,7 @@ def update_dvs(dvs_dict, dvs, service_instance=None):
|
|||
salt '*' vsphere.update_dvs dvs_dict=$dvs_dict dvs=dvs1
|
||||
"""
|
||||
# Remove ignored properties
|
||||
log.trace("Updating dvs '{}' with dict = {}".format(dvs, dvs_dict))
|
||||
log.trace("Updating dvs '%s' with dict = %s", dvs, dvs_dict)
|
||||
for prop in ["product_info", "capability", "uplink_names", "name"]:
|
||||
if prop in dvs_dict:
|
||||
del dvs_dict[prop]
|
||||
|
@ -4874,7 +4868,7 @@ def update_dvs(dvs_dict, dvs, service_instance=None):
|
|||
dvs_config.infrastructureTrafficResourceConfig,
|
||||
dvs_dict["infrastructure_traffic_resource_pools"],
|
||||
)
|
||||
log.trace("dvs_config= {}".format(dvs_config))
|
||||
log.trace("dvs_config= %s", dvs_config)
|
||||
salt.utils.vmware.update_dvs(dvs_ref, dvs_config_spec=dvs_config)
|
||||
if "network_resource_management_enabled" in dvs_dict:
|
||||
salt.utils.vmware.set_dvs_network_resource_management_enabled(
|
||||
|
@ -4893,7 +4887,7 @@ def _get_dvportgroup_out_shaping(pg_name, pg_default_port_config):
|
|||
pg_default_port_config
|
||||
The dafault port config of the portgroup
|
||||
"""
|
||||
log.trace("Retrieving portgroup's '{}' out shaping " "config".format(pg_name))
|
||||
log.trace("Retrieving portgroup's '%s' out shaping config", pg_name)
|
||||
out_shaping_policy = pg_default_port_config.outShapingPolicy
|
||||
if not out_shaping_policy:
|
||||
return {}
|
||||
|
@ -4915,7 +4909,7 @@ def _get_dvportgroup_security_policy(pg_name, pg_default_port_config):
|
|||
pg_default_port_config
|
||||
The dafault port config of the portgroup
|
||||
"""
|
||||
log.trace("Retrieving portgroup's '{}' security policy " "config".format(pg_name))
|
||||
log.trace("Retrieving portgroup's '%s' security policy config", pg_name)
|
||||
sec_policy = pg_default_port_config.securityPolicy
|
||||
if not sec_policy:
|
||||
return {}
|
||||
|
@ -4936,7 +4930,7 @@ def _get_dvportgroup_teaming(pg_name, pg_default_port_config):
|
|||
pg_default_port_config
|
||||
The dafault port config of the portgroup
|
||||
"""
|
||||
log.trace("Retrieving portgroup's '{}' teaming" "config".format(pg_name))
|
||||
log.trace("Retrieving portgroup's '%s' teaming config", pg_name)
|
||||
teaming_policy = pg_default_port_config.uplinkTeamingPolicy
|
||||
if not teaming_policy:
|
||||
return {}
|
||||
|
@ -5131,7 +5125,7 @@ def _apply_dvportgroup_out_shaping(pg_name, out_shaping, out_shaping_conf):
|
|||
out_shaping_conf
|
||||
The out shaping config
|
||||
"""
|
||||
log.trace("Building portgroup's '{}' out shaping " "policy".format(pg_name))
|
||||
log.trace("Building portgroup's '%s' out shaping policy", pg_name)
|
||||
if out_shaping_conf.get("average_bandwidth"):
|
||||
out_shaping.averageBandwidth = vim.LongPolicy()
|
||||
out_shaping.averageBandwidth.value = out_shaping_conf["average_bandwidth"]
|
||||
|
@ -5159,7 +5153,7 @@ def _apply_dvportgroup_security_policy(pg_name, sec_policy, sec_policy_conf):
|
|||
sec_policy_conf
|
||||
The out shaping config
|
||||
"""
|
||||
log.trace("Building portgroup's '{}' security policy ".format(pg_name))
|
||||
log.trace("Building portgroup's '%s' security policy", pg_name)
|
||||
if "allow_promiscuous" in sec_policy_conf:
|
||||
sec_policy.allowPromiscuous = vim.BoolPolicy()
|
||||
sec_policy.allowPromiscuous.value = sec_policy_conf["allow_promiscuous"]
|
||||
|
@ -5184,7 +5178,7 @@ def _apply_dvportgroup_teaming(pg_name, teaming, teaming_conf):
|
|||
teaming_conf
|
||||
The teaming config
|
||||
"""
|
||||
log.trace("Building portgroup's '{}' teaming".format(pg_name))
|
||||
log.trace("Building portgroup's '%s' teaming", pg_name)
|
||||
if "notify_switches" in teaming_conf:
|
||||
teaming.notifySwitches = vim.BoolPolicy()
|
||||
teaming.notifySwitches.value = teaming_conf["notify_switches"]
|
||||
|
@ -5260,7 +5254,7 @@ def _apply_dvportgroup_config(pg_name, pg_spec, pg_conf):
|
|||
pg_conf
|
||||
The portgroup config
|
||||
"""
|
||||
log.trace("Building portgroup's '{}' spec".format(pg_name))
|
||||
log.trace("Building portgroup's '%s' spec", pg_name)
|
||||
if "name" in pg_conf:
|
||||
pg_spec.name = pg_conf["name"]
|
||||
if "description" in pg_conf:
|
||||
|
@ -5331,8 +5325,10 @@ def create_dvportgroup(portgroup_dict, portgroup_name, dvs, service_instance=Non
|
|||
portgroup_name=pg1 dvs=dvs1
|
||||
"""
|
||||
log.trace(
|
||||
"Creating portgroup'{}' in dvs '{}' "
|
||||
"with dict = {}".format(portgroup_name, dvs, portgroup_dict)
|
||||
"Creating portgroup '%s' in dvs '%s' with dict = %s",
|
||||
portgroup_name,
|
||||
dvs,
|
||||
portgroup_dict,
|
||||
)
|
||||
proxy_type = get_proxy_type()
|
||||
if proxy_type == "esxdatacenter":
|
||||
|
@ -5382,8 +5378,10 @@ def update_dvportgroup(portgroup_dict, portgroup, dvs, service_instance=True):
|
|||
portgroup=pg1 dvs=dvs1
|
||||
"""
|
||||
log.trace(
|
||||
"Updating portgroup'{}' in dvs '{}' "
|
||||
"with dict = {}".format(portgroup, dvs, portgroup_dict)
|
||||
"Updating portgroup '%s' in dvs '%s' with dict = %s",
|
||||
portgroup,
|
||||
dvs,
|
||||
portgroup_dict,
|
||||
)
|
||||
proxy_type = get_proxy_type()
|
||||
if proxy_type == "esxdatacenter":
|
||||
|
@ -5447,7 +5445,7 @@ def remove_dvportgroup(portgroup, dvs, service_instance=None):
|
|||
|
||||
salt '*' vsphere.remove_dvportgroup portgroup=pg1 dvs=dvs1
|
||||
"""
|
||||
log.trace("Removing portgroup'{}' in dvs '{}' " "".format(portgroup, dvs))
|
||||
log.trace("Removing portgroup '%s' in dvs '%s'", portgroup, dvs)
|
||||
proxy_type = get_proxy_type()
|
||||
if proxy_type == "esxdatacenter":
|
||||
datacenter = __salt__["esxdatacenter.get_details"]()["datacenter"]
|
||||
|
@ -5603,7 +5601,7 @@ def list_capability_definitions(service_instance=None):
|
|||
|
||||
def _apply_policy_config(policy_spec, policy_dict):
|
||||
"""Applies a policy dictionary to a policy spec"""
|
||||
log.trace("policy_dict = {}".format(policy_dict))
|
||||
log.trace("policy_dict = %s", policy_dict)
|
||||
if policy_dict.get("name"):
|
||||
policy_spec.name = policy_dict["name"]
|
||||
if policy_dict.get("description"):
|
||||
|
@ -5647,7 +5645,7 @@ def _apply_policy_config(policy_spec, policy_dict):
|
|||
subprofile_spec.capability = cap_specs
|
||||
subprofiles.append(subprofile_spec)
|
||||
policy_spec.constraints.subProfiles = subprofiles
|
||||
log.trace("updated policy_spec = {}".format(policy_spec))
|
||||
log.trace("updated policy_spec = %s", policy_spec)
|
||||
return policy_spec
|
||||
|
||||
|
||||
|
@ -5678,9 +5676,7 @@ def create_storage_policy(policy_name, policy_dict, service_instance=None):
|
|||
salt '*' vsphere.create_storage_policy policy_name='policy name'
|
||||
policy_dict="$policy_dict"
|
||||
"""
|
||||
log.trace(
|
||||
"create storage policy '{}', dict = {}" "".format(policy_name, policy_dict)
|
||||
)
|
||||
log.trace("create storage policy '%s', dict = %s", policy_name, policy_dict)
|
||||
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
|
||||
policy_create_spec = pbm.profile.CapabilityBasedProfileCreateSpec()
|
||||
# Hardcode the storage profile resource type
|
||||
|
@ -5720,7 +5716,7 @@ def update_storage_policy(policy, policy_dict, service_instance=None):
|
|||
salt '*' vsphere.update_storage_policy policy='policy name'
|
||||
policy_dict="$policy_dict"
|
||||
"""
|
||||
log.trace("updating storage policy, dict = {}".format(policy_dict))
|
||||
log.trace("updating storage policy, dict = %s", policy_dict)
|
||||
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
|
||||
policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy])
|
||||
if not policies:
|
||||
|
@ -5757,9 +5753,7 @@ def list_default_storage_policy_of_datastore(datastore, service_instance=None):
|
|||
|
||||
salt '*' vsphere.list_default_storage_policy_of_datastore datastore=ds1
|
||||
"""
|
||||
log.trace(
|
||||
"Listing the default storage policy of datastore '{}'" "".format(datastore)
|
||||
)
|
||||
log.trace("Listing the default storage policy of datastore '%s'", datastore)
|
||||
# Find datastore
|
||||
target_ref = _get_proxy_target(service_instance)
|
||||
ds_refs = salt.utils.vmware.get_datastores(
|
||||
|
@ -5802,7 +5796,7 @@ def assign_default_storage_policy_to_datastore(
|
|||
salt '*' vsphere.assign_storage_policy_to_datastore
|
||||
policy='policy name' datastore=ds1
|
||||
"""
|
||||
log.trace("Assigning policy {} to datastore {}" "".format(policy, datastore))
|
||||
log.trace("Assigning policy %s to datastore %s", policy, datastore)
|
||||
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
|
||||
# Find policy
|
||||
policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy])
|
||||
|
@ -5902,9 +5896,7 @@ def _get_cluster_dict(cluster_name, cluster_ref):
|
|||
Reference to the cluster
|
||||
"""
|
||||
|
||||
log.trace(
|
||||
"Building a dictionary representation of cluster " "'{}'".format(cluster_name)
|
||||
)
|
||||
log.trace("Building a dictionary representation of cluster '%s'", cluster_name)
|
||||
props = salt.utils.vmware.get_properties_of_managed_object(
|
||||
cluster_ref, properties=["configurationEx"]
|
||||
)
|
||||
|
@ -5914,7 +5906,7 @@ def _get_cluster_dict(cluster_name, cluster_ref):
|
|||
}
|
||||
# Convert HA properties of interest
|
||||
ha_conf = props["configurationEx"].dasConfig
|
||||
log.trace("ha_conf = {}".format(ha_conf))
|
||||
log.trace("ha_conf = %s", ha_conf)
|
||||
res["ha"]["admission_control_enabled"] = ha_conf.admissionControlEnabled
|
||||
if ha_conf.admissionControlPolicy and isinstance(
|
||||
ha_conf.admissionControlPolicy,
|
||||
|
@ -5941,7 +5933,7 @@ def _get_cluster_dict(cluster_name, cluster_ref):
|
|||
res["ha"]["vm_monitoring"] = ha_conf.vmMonitoring
|
||||
# Convert DRS properties
|
||||
drs_conf = props["configurationEx"].drsConfig
|
||||
log.trace("drs_conf = {}".format(drs_conf))
|
||||
log.trace("drs_conf = %s", drs_conf)
|
||||
res["drs"]["vmotion_rate"] = 6 - drs_conf.vmotionRate
|
||||
res["drs"]["default_vm_behavior"] = drs_conf.defaultVmBehavior
|
||||
# vm_swap_placement
|
||||
|
@ -5956,7 +5948,7 @@ def _get_cluster_dict(cluster_name, cluster_ref):
|
|||
if int(vcenter_info.build) >= 3634794: # 60u2
|
||||
# VSAN API is fully supported by the VC starting with 60u2
|
||||
vsan_conf = salt.utils.vsan.get_cluster_vsan_info(cluster_ref)
|
||||
log.trace("vsan_conf = {}".format(vsan_conf))
|
||||
log.trace("vsan_conf = %s", vsan_conf)
|
||||
res["vsan"] = {
|
||||
"enabled": vsan_conf.enabled,
|
||||
"auto_claim_storage": vsan_conf.defaultConfig.autoClaimStorage,
|
||||
|
@ -6022,8 +6014,7 @@ def list_cluster(datacenter=None, cluster=None, service_instance=None):
|
|||
cluster_ref = _get_proxy_target(service_instance)
|
||||
cluster = __salt__["esxcluster.get_details"]()["cluster"]
|
||||
log.trace(
|
||||
"Retrieving representation of cluster '{}' in a "
|
||||
"{} proxy".format(cluster, proxy_type)
|
||||
"Retrieving representation of cluster '%s' in a %s proxy", cluster, proxy_type
|
||||
)
|
||||
return _get_cluster_dict(cluster, cluster_ref)
|
||||
|
||||
|
@ -6040,7 +6031,7 @@ def _apply_cluster_dict(cluster_spec, cluster_dict, vsan_spec=None, vsan_61=True
|
|||
VSAN 6.1 config needs to be applied differently than the post VSAN 6.1 way.
|
||||
The type of configuration desired is dictated by the flag vsan_61.
|
||||
"""
|
||||
log.trace("Applying cluster dict {}".format(cluster_dict))
|
||||
log.trace("Applying cluster dict %s", cluster_dict)
|
||||
if cluster_dict.get("ha"):
|
||||
ha_dict = cluster_dict["ha"]
|
||||
if not cluster_spec.dasConfig:
|
||||
|
@ -6150,7 +6141,7 @@ def _apply_cluster_dict(cluster_spec, cluster_dict, vsan_spec=None, vsan_61=True
|
|||
# If this remains set it caused an error
|
||||
vsan_config.defaultConfig.uuid = None
|
||||
vsan_config.defaultConfig.autoClaimStorage = vsan_dict["auto_claim_storage"]
|
||||
log.trace("cluster_spec = {}".format(cluster_spec))
|
||||
log.trace("cluster_spec = %s", cluster_spec)
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
|
@ -6344,7 +6335,7 @@ def update_cluster(cluster_dict, datacenter=None, cluster=None, service_instance
|
|||
# also if HA was previously disabled it can be enabled automatically if
|
||||
# desired
|
||||
if vsan_spec:
|
||||
log.trace("vsan_spec = {}".format(vsan_spec))
|
||||
log.trace("vsan_spec = %s", vsan_spec)
|
||||
salt.utils.vsan.reconfigure_cluster_vsan(cluster_ref, vsan_spec)
|
||||
|
||||
# We need to retrieve again the properties and reapply them
|
||||
|
@ -6404,7 +6395,7 @@ def list_datastores_via_proxy(
|
|||
"""
|
||||
target = _get_proxy_target(service_instance)
|
||||
target_name = salt.utils.vmware.get_managed_object_name(target)
|
||||
log.trace("target name = {}".format(target_name))
|
||||
log.trace("target name = %s", target_name)
|
||||
|
||||
# Default to getting all disks if no filtering is done
|
||||
get_all_datastores = (
|
||||
|
@ -6415,8 +6406,7 @@ def list_datastores_via_proxy(
|
|||
# Get the ids of the disks with the scsi addresses
|
||||
if backing_disk_scsi_addresses:
|
||||
log.debug(
|
||||
"Retrieving disk ids for scsi addresses "
|
||||
"'{}'".format(backing_disk_scsi_addresses)
|
||||
"Retrieving disk ids for scsi addresses '%s'", backing_disk_scsi_addresses
|
||||
)
|
||||
disk_ids = [
|
||||
d.canonicalName
|
||||
|
@ -6424,7 +6414,7 @@ def list_datastores_via_proxy(
|
|||
target, scsi_addresses=backing_disk_scsi_addresses
|
||||
)
|
||||
]
|
||||
log.debug("Found disk ids '{}'".format(disk_ids))
|
||||
log.debug("Found disk ids '%s'", disk_ids)
|
||||
backing_disk_ids = (
|
||||
backing_disk_ids.extend(disk_ids) if backing_disk_ids else disk_ids
|
||||
)
|
||||
|
@ -6552,9 +6542,7 @@ def rename_datastore(datastore_name, new_datastore_name, service_instance=None):
|
|||
salt '*' vsphere.rename_datastore old_name new_name
|
||||
"""
|
||||
# Argument validation
|
||||
log.trace(
|
||||
"Renaming datastore {} to {}" "".format(datastore_name, new_datastore_name)
|
||||
)
|
||||
log.trace("Renaming datastore %s to %s", datastore_name, new_datastore_name)
|
||||
target = _get_proxy_target(service_instance)
|
||||
datastores = salt.utils.vmware.get_datastores(
|
||||
service_instance, target, datastore_names=[datastore_name]
|
||||
|
@ -6586,7 +6574,7 @@ def remove_datastore(datastore, service_instance=None):
|
|||
|
||||
salt '*' vsphere.remove_datastore ds_name
|
||||
"""
|
||||
log.trace("Removing datastore '{}'".format(datastore))
|
||||
log.trace("Removing datastore '%s'", datastore)
|
||||
target = _get_proxy_target(service_instance)
|
||||
datastores = salt.utils.vmware.get_datastores(
|
||||
service_instance, reference=target, datastore_names=[datastore]
|
||||
|
@ -6659,7 +6647,7 @@ def add_license(key, description, safety_checks=True, service_instance=None):
|
|||
|
||||
salt '*' vsphere.add_license key=<license_key> desc='License desc'
|
||||
"""
|
||||
log.trace("Adding license '{}'".format(key))
|
||||
log.trace("Adding license '%s'", key)
|
||||
salt.utils.vmware.add_license(service_instance, key, description)
|
||||
return True
|
||||
|
||||
|
@ -6688,7 +6676,7 @@ def _get_entity(service_instance, entity):
|
|||
Entity dict in the format above
|
||||
"""
|
||||
|
||||
log.trace("Retrieving entity: {}".format(entity))
|
||||
log.trace("Retrieving entity: %s", entity)
|
||||
if entity["type"] == "cluster":
|
||||
dc_ref = salt.utils.vmware.get_datacenter(
|
||||
service_instance, entity["datacenter"]
|
||||
|
@ -6753,7 +6741,7 @@ def list_assigned_licenses(
|
|||
entity={type:cluster,datacenter:dc,cluster:cl}
|
||||
entiy_display_name=cl
|
||||
"""
|
||||
log.trace("Listing assigned licenses of entity {}" "".format(entity))
|
||||
log.trace("Listing assigned licenses of entity %s", entity)
|
||||
_validate_entity(entity)
|
||||
|
||||
assigned_licenses = salt.utils.vmware.get_assigned_licenses(
|
||||
|
@ -6816,7 +6804,7 @@ def assign_license(
|
|||
salt '*' vsphere.assign_license license_key=00000:00000
|
||||
license name=test entity={type:cluster,datacenter:dc,cluster:cl}
|
||||
"""
|
||||
log.trace("Assigning license {} to entity {}" "".format(license_key, entity))
|
||||
log.trace("Assigning license %s to entity %s", license_key, entity)
|
||||
_validate_entity(entity)
|
||||
if safety_checks:
|
||||
licenses = salt.utils.vmware.get_licenses(service_instance)
|
||||
|
@ -6918,9 +6906,12 @@ def list_disks(disk_ids=None, scsi_addresses=None, service_instance=None):
|
|||
"""
|
||||
host_ref = _get_proxy_target(service_instance)
|
||||
hostname = __proxy__["esxi.get_details"]()["esxi_host"]
|
||||
log.trace("Retrieving disks if host '{}'".format(hostname))
|
||||
log.trace("disk ids = {}".format(disk_ids))
|
||||
log.trace("scsi_addresses = {}".format(scsi_addresses))
|
||||
log.trace(
|
||||
"Retrieving disks of host '%s'; disc ids = %s; scsi_address = %s",
|
||||
hostname,
|
||||
disk_ids,
|
||||
scsi_addresses,
|
||||
)
|
||||
# Default to getting all disks if no filtering is done
|
||||
get_all_disks = True if not (disk_ids or scsi_addresses) else False
|
||||
ret_list = []
|
||||
|
@ -6986,18 +6977,16 @@ def erase_disk_partitions(disk_id=None, scsi_address=None, service_instance=None
|
|||
)
|
||||
disk_id = scsi_address_to_lun[scsi_address].canonicalName
|
||||
log.trace(
|
||||
"[{}] Got disk id '{}' for scsi address '{}'"
|
||||
"".format(hostname, disk_id, scsi_address)
|
||||
"[%s] Got disk id '%s' for scsi address '%s'",
|
||||
hostname,
|
||||
disk_id,
|
||||
scsi_address,
|
||||
)
|
||||
log.trace(
|
||||
"Erasing disk partitions on disk '{}' in host '{}'" "".format(disk_id, hostname)
|
||||
)
|
||||
log.trace("Erasing disk partitions on disk '%s' in host '%s'", disk_id, hostname)
|
||||
salt.utils.vmware.erase_disk_partitions(
|
||||
service_instance, host_ref, disk_id, hostname=hostname
|
||||
)
|
||||
log.info(
|
||||
"Erased disk partitions on disk '{}' on host '{}'" "".format(disk_id, hostname)
|
||||
)
|
||||
log.info("Erased disk partitions on disk '%s' on host '%s'", disk_id, hostname)
|
||||
return True
|
||||
|
||||
|
||||
|
@ -7045,12 +7034,12 @@ def list_disk_partitions(disk_id=None, scsi_address=None, service_instance=None)
|
|||
)
|
||||
disk_id = scsi_address_to_lun[scsi_address].canonicalName
|
||||
log.trace(
|
||||
"[{}] Got disk id '{}' for scsi address '{}'"
|
||||
"".format(hostname, disk_id, scsi_address)
|
||||
"[%s] Got disk id '%s' for scsi address '%s'",
|
||||
hostname,
|
||||
disk_id,
|
||||
scsi_address,
|
||||
)
|
||||
log.trace(
|
||||
"Listing disk partitions on disk '{}' in host '{}'" "".format(disk_id, hostname)
|
||||
)
|
||||
log.trace("Listing disk partitions on disk '%s' in host '%s'", disk_id, hostname)
|
||||
partition_info = salt.utils.vmware.get_disk_partition_info(host_ref, disk_id)
|
||||
ret_list = []
|
||||
# NOTE: 1. The layout view has an extra 'None' partition for free space
|
||||
|
@ -7105,7 +7094,7 @@ def list_diskgroups(cache_disk_ids=None, service_instance=None):
|
|||
"""
|
||||
host_ref = _get_proxy_target(service_instance)
|
||||
hostname = __proxy__["esxi.get_details"]()["esxi_host"]
|
||||
log.trace("Listing diskgroups in '{}'".format(hostname))
|
||||
log.trace("Listing diskgroups in '%s'", hostname)
|
||||
get_all_diskgroups = True if not cache_disk_ids else False
|
||||
ret_list = []
|
||||
for dg in salt.utils.vmware.get_diskgroups(
|
||||
|
@ -7333,7 +7322,7 @@ def remove_capacity_from_diskgroup(
|
|||
"No diskgroup with cache disk id '{}' was found in ESXi "
|
||||
"host '{}'".format(cache_disk_id, hostname)
|
||||
)
|
||||
log.trace("data_evacuation = {}".format(data_evacuation))
|
||||
log.trace("data_evacuation = %s", data_evacuation)
|
||||
salt.utils.vsan.remove_capacity_from_diskgroup(
|
||||
service_instance,
|
||||
host_ref,
|
||||
|
@ -7377,7 +7366,7 @@ def remove_diskgroup(cache_disk_id, data_accessibility=True, service_instance=No
|
|||
"No diskgroup with cache disk id '{}' was found in ESXi "
|
||||
"host '{}'".format(cache_disk_id, hostname)
|
||||
)
|
||||
log.trace("data accessibility = {}".format(data_accessibility))
|
||||
log.trace("data accessibility = %s", data_accessibility)
|
||||
salt.utils.vsan.remove_diskgroup(
|
||||
service_instance, host_ref, diskgroups[0], data_accessibility=data_accessibility
|
||||
)
|
||||
|
@ -7405,7 +7394,7 @@ def get_host_cache(service_instance=None):
|
|||
hostname = __proxy__["esxi.get_details"]()["esxi_host"]
|
||||
hci = salt.utils.vmware.get_host_cache(host_ref)
|
||||
if not hci:
|
||||
log.debug("Host cache not configured on host '{}'".format(hostname))
|
||||
log.debug("Host cache not configured on host '%s'", hostname)
|
||||
ret_dict["enabled"] = False
|
||||
return ret_dict
|
||||
|
||||
|
@ -8266,7 +8255,7 @@ def _get_proxy_target(service_instance):
|
|||
"ESXi host '{}' was not found".format(details["esxi_host"])
|
||||
)
|
||||
reference = references[0]
|
||||
log.trace("reference = {}".format(reference))
|
||||
log.trace("reference = %s", reference)
|
||||
return reference
|
||||
|
||||
|
||||
|
@ -8443,13 +8432,10 @@ def _apply_hardware_version(hardware_version, config_spec, operation="add"):
|
|||
the possibles values: 'add' and 'edit', the default value is 'add'
|
||||
"""
|
||||
log.trace(
|
||||
"Configuring virtual machine hardware "
|
||||
"version version={}".format(hardware_version)
|
||||
"Configuring virtual machine hardware version version=%s", hardware_version
|
||||
)
|
||||
if operation == "edit":
|
||||
log.trace(
|
||||
"Scheduling hardware version " "upgrade to {}".format(hardware_version)
|
||||
)
|
||||
log.trace("Scheduling hardware version upgrade to %s", hardware_version)
|
||||
scheduled_hardware_upgrade = vim.vm.ScheduledHardwareUpgradeInfo()
|
||||
scheduled_hardware_upgrade.upgradePolicy = "always"
|
||||
scheduled_hardware_upgrade.versionKey = hardware_version
|
||||
|
@ -8468,9 +8454,7 @@ def _apply_cpu_config(config_spec, cpu_props):
|
|||
cpu_props
|
||||
CPU properties dict
|
||||
"""
|
||||
log.trace(
|
||||
"Configuring virtual machine CPU " "settings cpu_props={}".format(cpu_props)
|
||||
)
|
||||
log.trace("Configuring virtual machine CPU settings cpu_props=%s", cpu_props)
|
||||
if "count" in cpu_props:
|
||||
config_spec.numCPUs = int(cpu_props["count"])
|
||||
if "cores_per_socket" in cpu_props:
|
||||
|
@ -8493,7 +8477,7 @@ def _apply_memory_config(config_spec, memory):
|
|||
memory
|
||||
Memory size and unit
|
||||
"""
|
||||
log.trace("Configuring virtual machine memory " "settings memory={}".format(memory))
|
||||
log.trace("Configuring virtual machine memory settings memory=%s", memory)
|
||||
if "size" in memory and "unit" in memory:
|
||||
try:
|
||||
if memory["unit"].lower() == "kb":
|
||||
|
@ -8546,9 +8530,7 @@ def _apply_advanced_config(config_spec, advanced_config, vm_extra_config=None):
|
|||
vm_extra_config
|
||||
Virtual machine vm_ref.config.extraConfig object
|
||||
"""
|
||||
log.trace(
|
||||
"Configuring advanced configuration " "parameters {}".format(advanced_config)
|
||||
)
|
||||
log.trace("Configuring advanced configuration parameters %s", advanced_config)
|
||||
if isinstance(advanced_config, str):
|
||||
raise salt.exceptions.ArgumentValueError(
|
||||
"The specified 'advanced_configs' configuration "
|
||||
|
@ -8620,9 +8602,7 @@ def _delete_advanced_config(config_spec, advanced_config, vm_extra_config):
|
|||
vm_extra_config
|
||||
Virtual machine vm_ref.config.extraConfig object
|
||||
"""
|
||||
log.trace(
|
||||
"Removing advanced configuration " "parameters {}".format(advanced_config)
|
||||
)
|
||||
log.trace("Removing advanced configuration parameters %s", advanced_config)
|
||||
if isinstance(advanced_config, str):
|
||||
raise salt.exceptions.ArgumentValueError(
|
||||
"The specified 'advanced_configs' configuration "
|
||||
|
@ -8750,19 +8730,16 @@ def _apply_hard_disk(
|
|||
Full file name of the vm disk
|
||||
"""
|
||||
log.trace(
|
||||
"Configuring hard disk {} size={}, unit={}, "
|
||||
"controller_key={}, thin_provision={}, "
|
||||
"eagerly_scrub={}, datastore={}, "
|
||||
"filename={}".format(
|
||||
disk_label,
|
||||
size,
|
||||
unit,
|
||||
controller_key,
|
||||
thin_provision,
|
||||
eagerly_scrub,
|
||||
datastore,
|
||||
filename,
|
||||
)
|
||||
"Configuring hard disk %s size=%s, unit=%s, controller_key=%s, "
|
||||
"thin_provision=%s, eagerly_scrub=%s, datastore=%s, filename=%s",
|
||||
disk_label,
|
||||
size,
|
||||
unit,
|
||||
controller_key,
|
||||
thin_provision,
|
||||
eagerly_scrub,
|
||||
datastore,
|
||||
filename,
|
||||
)
|
||||
disk_spec = vim.vm.device.VirtualDeviceSpec()
|
||||
disk_spec.device = vim.vm.device.VirtualDisk()
|
||||
|
@ -8809,8 +8786,7 @@ def _create_adapter_type(network_adapter, adapter_type, network_adapter_label=""
|
|||
string, network adapter name
|
||||
"""
|
||||
log.trace(
|
||||
"Configuring virtual machine network "
|
||||
"adapter adapter_type={}".format(adapter_type)
|
||||
"Configuring virtual machine network adapter adapter_type=%s", adapter_type
|
||||
)
|
||||
if adapter_type in ["vmxnet", "vmxnet2", "vmxnet3", "e1000", "e1000e"]:
|
||||
edited_network_adapter = salt.utils.vmware.get_network_adapter_type(
|
||||
|
@ -8821,12 +8797,10 @@ def _create_adapter_type(network_adapter, adapter_type, network_adapter_label=""
|
|||
else:
|
||||
if network_adapter:
|
||||
log.trace(
|
||||
"Changing type of '{}' from"
|
||||
" '{}' to '{}'".format(
|
||||
network_adapter.deviceInfo.label,
|
||||
type(network_adapter).__name__.rsplit(".", 1)[1][7:].lower(),
|
||||
adapter_type,
|
||||
)
|
||||
"Changing type of '%s' from '%s' to '%s'",
|
||||
network_adapter.deviceInfo.label,
|
||||
type(network_adapter).__name__.rsplit(".", 1)[1][7:].lower(),
|
||||
adapter_type,
|
||||
)
|
||||
else:
|
||||
# If device is edited and type not specified or does not match,
|
||||
|
@ -8834,17 +8808,17 @@ def _create_adapter_type(network_adapter, adapter_type, network_adapter_label=""
|
|||
if network_adapter:
|
||||
if adapter_type:
|
||||
log.error(
|
||||
"Cannot change type of '{}' to '{}'. "
|
||||
"Not changing type".format(
|
||||
network_adapter.deviceInfo.label, adapter_type
|
||||
)
|
||||
"Cannot change type of '%s' to '%s'. Not changing type",
|
||||
network_adapter.deviceInfo.label,
|
||||
adapter_type,
|
||||
)
|
||||
edited_network_adapter = network_adapter
|
||||
else:
|
||||
if not adapter_type:
|
||||
log.trace(
|
||||
"The type of '{}' has not been specified. "
|
||||
"Creating of default type 'vmxnet3'".format(network_adapter_label)
|
||||
"The type of '%s' has not been specified. "
|
||||
"Creating of default type 'vmxnet3'",
|
||||
network_adapter_label,
|
||||
)
|
||||
edited_network_adapter = vim.vm.device.VirtualVmxnet3()
|
||||
return edited_network_adapter
|
||||
|
@ -8865,12 +8839,11 @@ def _create_network_backing(network_name, switch_type, parent_ref):
|
|||
Parent reference to search for network
|
||||
"""
|
||||
log.trace(
|
||||
"Configuring virtual machine network backing network_name={} "
|
||||
"switch_type={} parent={}".format(
|
||||
network_name,
|
||||
switch_type,
|
||||
salt.utils.vmware.get_managed_object_name(parent_ref),
|
||||
)
|
||||
"Configuring virtual machine network backing network_name=%s "
|
||||
"switch_type=%s parent=%s",
|
||||
network_name,
|
||||
switch_type,
|
||||
salt.utils.vmware.get_managed_object_name(parent_ref),
|
||||
)
|
||||
backing = {}
|
||||
if network_name:
|
||||
|
@ -8952,11 +8925,13 @@ def _apply_network_adapter_config(
|
|||
adapter_type.strip().lower()
|
||||
switch_type.strip().lower()
|
||||
log.trace(
|
||||
"Configuring virtual machine network adapter "
|
||||
"network_adapter_label={} network_name={} "
|
||||
"adapter_type={} switch_type={} mac={}".format(
|
||||
network_adapter_label, network_name, adapter_type, switch_type, mac
|
||||
)
|
||||
"Configuring virtual machine network adapter network_adapter_label=%s "
|
||||
"network_name=%s adapter_type=%s switch_type=%s mac=%s",
|
||||
network_adapter_label,
|
||||
network_name,
|
||||
adapter_type,
|
||||
switch_type,
|
||||
mac,
|
||||
)
|
||||
network_spec = vim.vm.device.VirtualDeviceSpec()
|
||||
network_spec.device = _create_adapter_type(
|
||||
|
@ -9025,10 +9000,13 @@ def _apply_scsi_controller(
|
|||
bus_sharing: 'no_sharing' or 'virtual_sharing' or 'physical_sharing'
|
||||
"""
|
||||
log.trace(
|
||||
"Configuring scsi controller adapter={} adapter_type={} "
|
||||
"bus_sharing={} key={} bus_number={}".format(
|
||||
adapter, adapter_type, bus_sharing, key, bus_number
|
||||
)
|
||||
"Configuring scsi controller adapter=%s adapter_type=%s "
|
||||
"bus_sharing=%s key=%s bus_number=%s",
|
||||
adapter,
|
||||
adapter_type,
|
||||
bus_sharing,
|
||||
key,
|
||||
bus_number,
|
||||
)
|
||||
scsi_spec = vim.vm.device.VirtualDeviceSpec()
|
||||
if adapter_type == "lsilogic":
|
||||
|
@ -9083,7 +9061,7 @@ def _create_ide_controllers(ide_controllers):
|
|||
keys = range(-200, -250, -1)
|
||||
if ide_controllers:
|
||||
devs = [ide["adapter"] for ide in ide_controllers]
|
||||
log.trace("Creating IDE controllers {}".format(devs))
|
||||
log.trace("Creating IDE controllers %s", devs)
|
||||
for ide, key in zip(ide_controllers, keys):
|
||||
ide_ctrls.append(
|
||||
_apply_ide_controller_config(ide["adapter"], "add", key, abs(key + 200))
|
||||
|
@ -9109,8 +9087,7 @@ def _apply_ide_controller_config(ide_controller_label, operation, key, bus_numbe
|
|||
Device bus number property
|
||||
"""
|
||||
log.trace(
|
||||
"Configuring IDE controller "
|
||||
"ide_controller_label={}".format(ide_controller_label)
|
||||
"Configuring IDE controller ide_controller_label=%s", ide_controller_label
|
||||
)
|
||||
ide_spec = vim.vm.device.VirtualDeviceSpec()
|
||||
ide_spec.device = vim.vm.device.VirtualIDEController()
|
||||
|
@ -9139,7 +9116,7 @@ def _create_sata_controllers(sata_controllers):
|
|||
keys = range(-15000, -15050, -1)
|
||||
if sata_controllers:
|
||||
devs = [sata["adapter"] for sata in sata_controllers]
|
||||
log.trace("Creating SATA controllers {}".format(devs))
|
||||
log.trace("Creating SATA controllers %s", devs)
|
||||
for sata, key in zip(sata_controllers, keys):
|
||||
sata_ctrls.append(
|
||||
_apply_sata_controller_config(
|
||||
|
@ -9167,8 +9144,7 @@ def _apply_sata_controller_config(sata_controller_label, operation, key, bus_num
|
|||
Device bus number property
|
||||
"""
|
||||
log.trace(
|
||||
"Configuring SATA controller "
|
||||
"sata_controller_label={}".format(sata_controller_label)
|
||||
"Configuring SATA controller sata_controller_label=%s", sata_controller_label
|
||||
)
|
||||
sata_spec = vim.vm.device.VirtualDeviceSpec()
|
||||
sata_spec.device = vim.vm.device.VirtualAHCIController()
|
||||
|
@ -9242,11 +9218,12 @@ def _apply_cd_drive(
|
|||
allow_guest_control:
|
||||
"""
|
||||
log.trace(
|
||||
"Configuring CD/DVD drive drive_label={} "
|
||||
"device_type={} client_device={} "
|
||||
"datastore_iso_file={}".format(
|
||||
drive_label, device_type, client_device, datastore_iso_file
|
||||
)
|
||||
"Configuring CD/DVD drive drive_label=%s device_type=%s "
|
||||
"client_device=%s datastore_iso_file=%s",
|
||||
drive_label,
|
||||
device_type,
|
||||
client_device,
|
||||
datastore_iso_file,
|
||||
)
|
||||
drive_spec = vim.vm.device.VirtualDeviceSpec()
|
||||
drive_spec.device = vim.vm.device.VirtualCdrom()
|
||||
|
@ -9352,13 +9329,11 @@ def _apply_serial_port(serial_device_spec, key, operation="add"):
|
|||
yield: False
|
||||
"""
|
||||
log.trace(
|
||||
"Creating serial port adapter={} type={} connectable={} "
|
||||
"yield={}".format(
|
||||
serial_device_spec["adapter"],
|
||||
serial_device_spec["type"],
|
||||
serial_device_spec["connectable"],
|
||||
serial_device_spec["yield"],
|
||||
)
|
||||
"Creating serial port adapter=%s type=%s connectable=%s yield=%s",
|
||||
serial_device_spec["adapter"],
|
||||
serial_device_spec["type"],
|
||||
serial_device_spec["connectable"],
|
||||
serial_device_spec["yield"],
|
||||
)
|
||||
device_spec = vim.vm.device.VirtualDeviceSpec()
|
||||
device_spec.device = vim.vm.device.VirtualSerialPort()
|
||||
|
@ -9434,7 +9409,7 @@ def _create_disks(service_instance, disks, scsi_controllers=None, parent=None):
|
|||
keys = range(-2000, -2050, -1)
|
||||
if disks:
|
||||
devs = [disk["adapter"] for disk in disks]
|
||||
log.trace("Creating disks {}".format(devs))
|
||||
log.trace("Creating disks %s", devs)
|
||||
for disk, key in zip(disks, keys):
|
||||
# create the disk
|
||||
filename, datastore, datastore_ref = None, None, None
|
||||
|
@ -9504,7 +9479,7 @@ def _create_scsi_devices(scsi_devices):
|
|||
scsi_specs = []
|
||||
if scsi_devices:
|
||||
devs = [scsi["adapter"] for scsi in scsi_devices]
|
||||
log.trace("Creating SCSI devices {}".format(devs))
|
||||
log.trace("Creating SCSI devices %s", devs)
|
||||
# unitNumber for disk attachment, 0:0 1st 0 is the controller busNumber,
|
||||
# 2nd is the unitNumber
|
||||
for (key, scsi_controller) in zip(keys, scsi_devices):
|
||||
|
@ -9546,7 +9521,7 @@ def _create_network_adapters(network_interfaces, parent=None):
|
|||
keys = range(-4000, -4050, -1)
|
||||
if network_interfaces:
|
||||
devs = [inter["adapter"] for inter in network_interfaces]
|
||||
log.trace("Creating network interfaces {}".format(devs))
|
||||
log.trace("Creating network interfaces %s", devs)
|
||||
for interface, key in zip(network_interfaces, keys):
|
||||
network_spec = _apply_network_adapter_config(
|
||||
key,
|
||||
|
@ -9586,7 +9561,7 @@ def _create_serial_ports(serial_ports):
|
|||
keys = range(-9000, -9050, -1)
|
||||
if serial_ports:
|
||||
devs = [serial["adapter"] for serial in serial_ports]
|
||||
log.trace("Creating serial ports {}".format(devs))
|
||||
log.trace("Creating serial ports %s", devs)
|
||||
for port, key in zip(serial_ports, keys):
|
||||
serial_port_device = _apply_serial_port(port, key, "add")
|
||||
ports.append(serial_port_device)
|
||||
|
@ -9611,7 +9586,7 @@ def _create_cd_drives(cd_drives, controllers=None, parent_ref=None):
|
|||
keys = range(-3000, -3050, -1)
|
||||
if cd_drives:
|
||||
devs = [dvd["adapter"] for dvd in cd_drives]
|
||||
log.trace("Creating cd/dvd drives {}".format(devs))
|
||||
log.trace("Creating cd/dvd drives %s", devs)
|
||||
for drive, key in zip(cd_drives, keys):
|
||||
# if a controller is not available/cannot be created we should use the
|
||||
# one which is available by default, this is 'IDE 0'
|
||||
|
@ -10043,7 +10018,7 @@ def _update_disks(disks_old_new):
|
|||
disk_changes = []
|
||||
if disks_old_new:
|
||||
devs = [disk["old"]["address"] for disk in disks_old_new]
|
||||
log.trace("Updating disks {}".format(devs))
|
||||
log.trace("Updating disks %s", devs)
|
||||
for item in disks_old_new:
|
||||
current_disk = item["old"]
|
||||
next_disk = item["new"]
|
||||
|
@ -10062,14 +10037,12 @@ def _update_disks(disks_old_new):
|
|||
)
|
||||
)
|
||||
log.trace(
|
||||
"Virtual machine disk will be updated "
|
||||
"size={} unit={} controller_key={} "
|
||||
"unit_number={}".format(
|
||||
next_disk["size"],
|
||||
next_disk["unit"],
|
||||
current_disk["controller_key"],
|
||||
current_disk["unit_number"],
|
||||
)
|
||||
"Virtual machine disk will be updated size=%s unit=%s "
|
||||
"controller_key=%s unit_number=%s",
|
||||
next_disk["size"],
|
||||
next_disk["unit"],
|
||||
current_disk["controller_key"],
|
||||
current_disk["unit_number"],
|
||||
)
|
||||
device_config_spec = _apply_hard_disk(
|
||||
current_disk["unit_number"],
|
||||
|
@ -10098,7 +10071,7 @@ def _update_scsi_devices(scsis_old_new, current_disks):
|
|||
device_config_specs = []
|
||||
if scsis_old_new:
|
||||
devs = [scsi["old"]["adapter"] for scsi in scsis_old_new]
|
||||
log.trace("Updating SCSI controllers {}".format(devs))
|
||||
log.trace("Updating SCSI controllers %s", devs)
|
||||
for item in scsis_old_new:
|
||||
next_scsi = item["new"]
|
||||
current_scsi = item["old"]
|
||||
|
@ -10106,14 +10079,12 @@ def _update_scsi_devices(scsis_old_new, current_disks):
|
|||
difference.ignore_unset_values = False
|
||||
if difference.changed():
|
||||
log.trace(
|
||||
"Virtual machine scsi device will be updated "
|
||||
"key={} bus_number={} type={} "
|
||||
"bus_sharing={}".format(
|
||||
current_scsi["key"],
|
||||
current_scsi["bus_number"],
|
||||
next_scsi["type"],
|
||||
next_scsi["bus_sharing"],
|
||||
)
|
||||
"Virtual machine scsi device will be updated key=%s "
|
||||
"bus_number=%s type=%s bus_sharing=%s",
|
||||
current_scsi["key"],
|
||||
current_scsi["bus_number"],
|
||||
next_scsi["type"],
|
||||
next_scsi["bus_sharing"],
|
||||
)
|
||||
# The sharedBus property is not optional
|
||||
# The type can only be updated if we delete the original
|
||||
|
@ -10174,7 +10145,7 @@ def _update_network_adapters(interface_old_new, parent):
|
|||
network_changes = []
|
||||
if interface_old_new:
|
||||
devs = [inter["old"]["mac"] for inter in interface_old_new]
|
||||
log.trace("Updating network interfaces {}".format(devs))
|
||||
log.trace("Updating network interfaces %s", devs)
|
||||
for item in interface_old_new:
|
||||
current_interface = item["old"]
|
||||
next_interface = item["new"]
|
||||
|
@ -10183,13 +10154,11 @@ def _update_network_adapters(interface_old_new, parent):
|
|||
if difference.changed():
|
||||
log.trace(
|
||||
"Virtual machine network adapter will be updated "
|
||||
"switch_type={} name={} adapter_type={} "
|
||||
"mac={}".format(
|
||||
next_interface["switch_type"],
|
||||
next_interface["name"],
|
||||
current_interface["adapter_type"],
|
||||
current_interface["mac"],
|
||||
)
|
||||
"switch_type=%s name=%s adapter_type=%s mac=%s",
|
||||
next_interface["switch_type"],
|
||||
next_interface["name"],
|
||||
current_interface["adapter_type"],
|
||||
current_interface["mac"],
|
||||
)
|
||||
device_config_spec = _apply_network_adapter_config(
|
||||
current_interface["key"],
|
||||
|
@ -10216,7 +10185,7 @@ def _update_serial_ports(serial_old_new):
|
|||
serial_changes = []
|
||||
if serial_old_new:
|
||||
devs = [serial["old"]["adapter"] for serial in serial_old_new]
|
||||
log.trace("Updating serial ports {}".format(devs))
|
||||
log.trace("Updating serial ports %s", devs)
|
||||
for item in serial_old_new:
|
||||
current_serial = item["old"]
|
||||
next_serial = item["new"]
|
||||
|
@ -10247,7 +10216,7 @@ def _update_cd_drives(drives_old_new, controllers=None, parent=None):
|
|||
cd_changes = []
|
||||
if drives_old_new:
|
||||
devs = [drive["old"]["adapter"] for drive in drives_old_new]
|
||||
log.trace("Updating cd/dvd drives {}".format(devs))
|
||||
log.trace("Updating cd/dvd drives %s", devs)
|
||||
for item in drives_old_new:
|
||||
current_drive = item["old"]
|
||||
new_drive = item["new"]
|
||||
|
@ -10289,7 +10258,7 @@ def _delete_device(device):
|
|||
device
|
||||
Device data type object
|
||||
"""
|
||||
log.trace("Deleting device with type {}".format(type(device)))
|
||||
log.trace("Deleting device with type %s", type(device))
|
||||
device_spec = vim.vm.device.VirtualDeviceSpec()
|
||||
device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
|
||||
device_spec.device = device
|
||||
|
@ -11333,9 +11302,11 @@ def register_vm(name, datacenter, placement, vmx_path, service_instance=None):
|
|||
Default is None.
|
||||
"""
|
||||
log.trace(
|
||||
"Registering virtual machine with properties "
|
||||
"datacenter={}, placement={}, "
|
||||
"vmx_path={}".format(datacenter, placement, vmx_path)
|
||||
"Registering virtual machine with properties datacenter=%s, "
|
||||
"placement=%s, vmx_path=%s",
|
||||
datacenter,
|
||||
placement,
|
||||
vmx_path,
|
||||
)
|
||||
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
|
||||
if "cluster" in placement:
|
||||
|
@ -11411,7 +11382,7 @@ def power_on_vm(name, datacenter=None, service_instance=None):
|
|||
salt '*' vsphere.power_on_vm name=my_vm
|
||||
|
||||
"""
|
||||
log.trace("Powering on virtual machine {}".format(name))
|
||||
log.trace("Powering on virtual machine %s", name)
|
||||
vm_properties = ["name", "summary.runtime.powerState"]
|
||||
virtual_machine = salt.utils.vmware.get_vm_by_property(
|
||||
service_instance, name, datacenter=datacenter, vm_properties=vm_properties
|
||||
|
@ -11452,7 +11423,7 @@ def power_off_vm(name, datacenter=None, service_instance=None):
|
|||
salt '*' vsphere.power_off_vm name=my_vm
|
||||
|
||||
"""
|
||||
log.trace("Powering off virtual machine {}".format(name))
|
||||
log.trace("Powering off virtual machine %s", name)
|
||||
vm_properties = ["name", "summary.runtime.powerState"]
|
||||
virtual_machine = salt.utils.vmware.get_vm_by_property(
|
||||
service_instance, name, datacenter=datacenter, vm_properties=vm_properties
|
||||
|
|
|
@ -152,8 +152,7 @@ def _srvmgr(cmd, return_json=False):
|
|||
ret = __salt__["cmd.run_all"](cmd, shell="powershell", python_shell=True)
|
||||
|
||||
if ret["retcode"] != 0:
|
||||
msg = "Unable to execute command: {}\nError: {}" "".format(cmd, ret["stderr"])
|
||||
log.error(msg)
|
||||
log.error("Unable to execute command: %s\nError: %s", cmd, ret["stderr"])
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -179,10 +178,10 @@ def _prepare_settings(pspath, settings):
|
|||
prepared_settings = []
|
||||
for setting in settings:
|
||||
if setting.get("name", None) is None:
|
||||
log.warning("win_iis: Setting has no name: {}".format(setting))
|
||||
log.warning("win_iis: Setting has no name: %s", setting)
|
||||
continue
|
||||
if setting.get("filter", None) is None:
|
||||
log.warning("win_iis: Setting has no filter: {}".format(setting))
|
||||
log.warning("win_iis: Setting has no filter: %s", setting)
|
||||
continue
|
||||
match = re.search(r"Collection\[(\{.*\})\]", setting["name"])
|
||||
if match:
|
||||
|
@ -192,7 +191,7 @@ def _prepare_settings(pspath, settings):
|
|||
pspath, setting["filter"], name, match_dict
|
||||
)
|
||||
if index == -1:
|
||||
log.warning("win_iis: No match found for setting: {}".format(setting))
|
||||
log.warning("win_iis: No match found for setting: %s", setting)
|
||||
else:
|
||||
setting["name"] = setting["name"].replace(match.group(1), str(index))
|
||||
prepared_settings.append(setting)
|
||||
|
@ -389,7 +388,7 @@ def modify_site(name, sourcepath=None, apppool=None):
|
|||
current_sites = list_sites()
|
||||
|
||||
if name not in current_sites:
|
||||
log.debug("Site '{}' not defined.".format(name))
|
||||
log.debug("Site '%s' not defined.", name)
|
||||
return False
|
||||
|
||||
ps_cmd = list()
|
||||
|
@ -410,9 +409,9 @@ def modify_site(name, sourcepath=None, apppool=None):
|
|||
if apppool:
|
||||
|
||||
if apppool in list_apppools():
|
||||
log.debug("Utilizing pre-existing application pool: {}" "".format(apppool))
|
||||
log.debug("Utilizing pre-existing application pool: %s", apppool)
|
||||
else:
|
||||
log.debug("Application pool will be created: {}".format(apppool))
|
||||
log.debug("Application pool will be created: %s", apppool)
|
||||
create_apppool(apppool)
|
||||
|
||||
# If ps_cmd isn't empty, we need to add a semi-colon to run two commands
|
||||
|
@ -2206,7 +2205,7 @@ def set_webapp_settings(name, site, settings):
|
|||
log.error("Failed to change settings: %s", failed_settings)
|
||||
return False
|
||||
|
||||
log.debug("Settings configured successfully: {}".format(settings.keys()))
|
||||
log.debug("Settings configured successfully: %s", list(settings))
|
||||
return True
|
||||
|
||||
|
||||
|
|
|
@ -4446,7 +4446,8 @@ class _policy_info:
|
|||
except Exception: # pylint: disable=broad-except
|
||||
userSid = win32security.ConvertSidToStringSid(_sid)
|
||||
log.warning(
|
||||
'Unable to convert SID "%s" to a friendly name. The SID will be disaplayed instead of a user/group name.',
|
||||
"Unable to convert SID '%s' to a friendly name. "
|
||||
"The SID will be displayed instead of a user/group name.",
|
||||
userSid,
|
||||
)
|
||||
usernames.append(userSid)
|
||||
|
@ -4800,7 +4801,7 @@ def _load_policy_definitions(path="c:\\Windows\\PolicyDefinitions", language="en
|
|||
# Only process ADMX files, any other file will cause a
|
||||
# stacktrace later on
|
||||
if not admx_file_ext == ".admx":
|
||||
log.debug("{} is not an ADMX file".format(t_admx_file))
|
||||
log.debug("%s is not an ADMX file", t_admx_file)
|
||||
continue
|
||||
admx_file = os.path.join(root, t_admx_file)
|
||||
# Parse xml for the ADMX file
|
||||
|
@ -5167,12 +5168,12 @@ def _set_audit_file_data(option, value):
|
|||
# The value is not None, make the change
|
||||
row["Inclusion Setting"] = auditpol_values[value]
|
||||
row["Setting Value"] = value
|
||||
log.trace("LGPO: Setting {} to {}".format(option, value))
|
||||
log.trace("LGPO: Setting %s to %s", option, value)
|
||||
writer.writerow(row)
|
||||
else:
|
||||
# value is None, remove it by not writing it to the
|
||||
# temp file
|
||||
log.trace("LGPO: Removing {}".format(option))
|
||||
log.trace("LGPO: Removing %s", option)
|
||||
value_written = True
|
||||
# If it's not the value we're setting, just write it
|
||||
else:
|
||||
|
@ -5184,7 +5185,7 @@ def _set_audit_file_data(option, value):
|
|||
if not value_written:
|
||||
if not value == "None":
|
||||
# value is not None, write the new value
|
||||
log.trace("LGPO: Setting {} to {}".format(option, value))
|
||||
log.trace("LGPO: Setting %s to %s", option, value)
|
||||
defaults = _get_advaudit_defaults(option)
|
||||
writer.writerow(
|
||||
{
|
||||
|
@ -5265,16 +5266,17 @@ def _set_advaudit_value(option, value):
|
|||
# Only log this error, it will be in effect the next time the machine
|
||||
# updates its policy
|
||||
log.error(
|
||||
"Failed to apply audit setting: {}\n"
|
||||
"Policy will take effect on next GPO update".format(option)
|
||||
"Failed to apply audit setting: %s\n"
|
||||
"Policy will take effect on next GPO update",
|
||||
option,
|
||||
)
|
||||
|
||||
# Update __context__
|
||||
if value is None:
|
||||
log.debug("LGPO: Removing Advanced Audit data: {}".format(option))
|
||||
log.debug("LGPO: Removing Advanced Audit data: %s", option)
|
||||
__context__["lgpo.adv_audit_data"].pop(option)
|
||||
else:
|
||||
log.debug("LGPO: Updating Advanced Audit data: {}: {}".format(option, value))
|
||||
log.debug("LGPO: Updating Advanced Audit data: %s: %s", option, value)
|
||||
__context__["lgpo.adv_audit_data"][option] = value
|
||||
|
||||
return True
|
||||
|
@ -5285,14 +5287,14 @@ def _get_netsh_value(profile, option):
|
|||
__context__["lgpo.netsh_data"] = {}
|
||||
|
||||
if profile not in __context__["lgpo.netsh_data"]:
|
||||
log.debug("LGPO: Loading netsh data for {} profile".format(profile))
|
||||
log.debug("LGPO: Loading netsh data for %s profile", profile)
|
||||
settings = salt.utils.win_lgpo_netsh.get_all_settings(
|
||||
profile=profile, store="lgpo"
|
||||
)
|
||||
__context__["lgpo.netsh_data"].update({profile: settings})
|
||||
log.trace(
|
||||
"LGPO: netsh returning value: {}"
|
||||
"".format(__context__["lgpo.netsh_data"][profile][option])
|
||||
"LGPO: netsh returning value: %s",
|
||||
__context__["lgpo.netsh_data"][profile][option],
|
||||
)
|
||||
return __context__["lgpo.netsh_data"][profile][option]
|
||||
|
||||
|
@ -5301,11 +5303,11 @@ def _set_netsh_value(profile, section, option, value):
|
|||
if section not in ("firewallpolicy", "settings", "logging", "state"):
|
||||
raise ValueError("LGPO: Invalid section: {}".format(section))
|
||||
log.trace(
|
||||
"LGPO: Setting the following\n"
|
||||
"Profile: {}\n"
|
||||
"Section: {}\n"
|
||||
"Option: {}\n"
|
||||
"Value: {}".format(profile, section, option, value)
|
||||
"LGPO: Setting the following\nProfile: %s\nSection: %s\nOption: %s\nValue: %s",
|
||||
profile,
|
||||
section,
|
||||
option,
|
||||
value,
|
||||
)
|
||||
if section == "firewallpolicy":
|
||||
salt.utils.win_lgpo_netsh.set_firewall_settings(
|
||||
|
@ -5330,7 +5332,7 @@ def _set_netsh_value(profile, section, option, value):
|
|||
salt.utils.win_lgpo_netsh.set_logging_settings(
|
||||
profile=profile, setting=option, value=value, store="lgpo"
|
||||
)
|
||||
log.trace("LGPO: Clearing netsh data for {} profile".format(profile))
|
||||
log.trace("LGPO: Clearing netsh data for %s profile", profile)
|
||||
__context__["lgpo.netsh_data"].pop(profile)
|
||||
return True
|
||||
|
||||
|
@ -5442,7 +5444,7 @@ def _validateSetting(value, policy):
|
|||
True
|
||||
if the Policy has 'Children', we'll validate their settings too
|
||||
"""
|
||||
log.debug("validating {} for policy {}".format(value, policy))
|
||||
log.debug("validating %s for policy %s", value, policy)
|
||||
if "Settings" in policy:
|
||||
if policy["Settings"]:
|
||||
if isinstance(policy["Settings"], list):
|
||||
|
@ -5818,7 +5820,7 @@ def _checkValueItemParent(
|
|||
return search_string
|
||||
if _regexSearchRegPolData(re.escape(search_string), policy_file_data):
|
||||
log.trace(
|
||||
"found the search string in the pol file, " "%s is configured",
|
||||
"found the search string in the pol file, %s is configured",
|
||||
policy_name,
|
||||
)
|
||||
return True
|
||||
|
@ -6087,12 +6089,12 @@ def _processValueItem(
|
|||
if not check_deleted:
|
||||
if this_element_value is not None:
|
||||
log.trace(
|
||||
"_processValueItem has an explicit " "element_value of %s",
|
||||
"_processValueItem has an explicit element_value of %s",
|
||||
this_element_value,
|
||||
)
|
||||
expected_string = del_keys
|
||||
log.trace(
|
||||
"element_valuenames == %s and element_values " "== %s",
|
||||
"element_valuenames == %s and element_values == %s",
|
||||
element_valuenames,
|
||||
element_values,
|
||||
)
|
||||
|
@ -6158,7 +6160,7 @@ def _processValueItem(
|
|||
if this_element_value is not None:
|
||||
# Sometimes values come in as strings
|
||||
if isinstance(this_element_value, str):
|
||||
log.debug("Converting {} to bytes".format(this_element_value))
|
||||
log.debug("Converting %s to bytes", this_element_value)
|
||||
this_element_value = this_element_value.encode("utf-32-le")
|
||||
expected_string = b"".join(
|
||||
[
|
||||
|
@ -6262,7 +6264,7 @@ def _checkAllAdmxPolicies(
|
|||
admx_policy_definitions = _get_policy_definitions(language=adml_language)
|
||||
adml_policy_resources = _get_policy_resources(language=adml_language)
|
||||
if policy_file_data:
|
||||
log.trace("POLICY CLASS {} has file data".format(policy_class))
|
||||
log.trace("POLICY CLASS %s has file data", policy_class)
|
||||
policy_filedata_split = re.sub(
|
||||
salt.utils.stringutils.to_bytes(r"\]{}$".format(chr(0))),
|
||||
b"",
|
||||
|
@ -6370,7 +6372,7 @@ def _checkAllAdmxPolicies(
|
|||
this_policyname = admx_policy.attrib["name"]
|
||||
else:
|
||||
log.error(
|
||||
'policy item %s does not have the required "name" ' "attribute",
|
||||
'policy item %s does not have the required "name" attribute',
|
||||
admx_policy.attrib,
|
||||
)
|
||||
break
|
||||
|
@ -6871,9 +6873,8 @@ def _checkAllAdmxPolicies(
|
|||
policy_disabled_elements + 1
|
||||
)
|
||||
log.trace(
|
||||
"element {} is disabled".format(
|
||||
child_item.attrib["id"]
|
||||
)
|
||||
"element %s is disabled",
|
||||
child_item.attrib["id"],
|
||||
)
|
||||
if element_only_enabled_disabled:
|
||||
if len(required_elements.keys()) > 0 and len(
|
||||
|
@ -6883,9 +6884,8 @@ def _checkAllAdmxPolicies(
|
|||
required_elements.keys()
|
||||
):
|
||||
log.trace(
|
||||
"{} is disabled by all enum elements".format(
|
||||
this_policyname
|
||||
)
|
||||
"%s is disabled by all enum elements",
|
||||
this_policyname,
|
||||
)
|
||||
if this_policynamespace not in policy_vals:
|
||||
policy_vals[this_policynamespace] = {}
|
||||
|
@ -6899,9 +6899,7 @@ def _checkAllAdmxPolicies(
|
|||
this_policyname
|
||||
] = configured_elements
|
||||
log.trace(
|
||||
"{} is enabled by enum elements".format(
|
||||
this_policyname
|
||||
)
|
||||
"%s is enabled by enum elements", this_policyname
|
||||
)
|
||||
else:
|
||||
if this_policy_setting == "Enabled":
|
||||
|
@ -7488,7 +7486,7 @@ def _writeAdminTemplateRegPolFile(
|
|||
test_items=False,
|
||||
)
|
||||
log.trace(
|
||||
"working with disabledList " "portion of %s",
|
||||
"working with disabledList portion of %s",
|
||||
admPolicy,
|
||||
)
|
||||
existing_data = _policyFileReplaceOrAppendList(
|
||||
|
@ -7604,7 +7602,7 @@ def _writeAdminTemplateRegPolFile(
|
|||
)
|
||||
else:
|
||||
log.error(
|
||||
'policy item %s does not have the requried "class" attribute',
|
||||
'policy item %s does not have the required "class" attribute',
|
||||
this_policy.attrib,
|
||||
)
|
||||
else:
|
||||
|
@ -7713,9 +7711,8 @@ def _writeAdminTemplateRegPolFile(
|
|||
test_items=False,
|
||||
)
|
||||
log.trace(
|
||||
"working with trueList portion of {}".format(
|
||||
admPolicy
|
||||
)
|
||||
"working with trueList portion of %s",
|
||||
admPolicy,
|
||||
)
|
||||
else:
|
||||
list_strings = _checkListItem(
|
||||
|
@ -8097,8 +8094,11 @@ def _lookup_admin_template(policy_name, policy_class, adml_language="en-US"):
|
|||
)
|
||||
this_hierarchy.reverse()
|
||||
if hierarchy != this_hierarchy:
|
||||
msg = "hierarchy %s does not match this item's hierarchy of %s"
|
||||
log.trace(msg, hierarchy, this_hierarchy)
|
||||
log.trace(
|
||||
"hierarchy %s does not match this item's hierarchy of %s",
|
||||
hierarchy,
|
||||
this_hierarchy,
|
||||
)
|
||||
if len(these_admx_search_results) == 1:
|
||||
log.trace(
|
||||
"only 1 admx was found and it does not match this adml, it is safe to remove from the list"
|
||||
|
@ -8160,13 +8160,9 @@ def _lookup_admin_template(policy_name, policy_class, adml_language="en-US"):
|
|||
)
|
||||
if admx_search_results:
|
||||
log.trace(
|
||||
"processing admx_search_results of {}".format(
|
||||
admx_search_results
|
||||
)
|
||||
)
|
||||
log.trace(
|
||||
"multiple_adml_entries is {}".format(multiple_adml_entries)
|
||||
"processing admx_search_results of %s", admx_search_results
|
||||
)
|
||||
log.trace("multiple_adml_entries is %s", multiple_adml_entries)
|
||||
if (
|
||||
len(admx_search_results) == 1 or hierarchy
|
||||
) and not multiple_adml_entries:
|
||||
|
@ -8866,7 +8862,7 @@ def _get_policy_adm_setting(
|
|||
policy_file_data,
|
||||
):
|
||||
log.trace(
|
||||
"%s is disabled by no explicit enable/disable list or " "value",
|
||||
"%s is disabled by no explicit enable/disable list or value",
|
||||
this_policy_name,
|
||||
)
|
||||
this_policy_setting = "Disabled"
|
||||
|
@ -9195,24 +9191,18 @@ def _get_policy_adm_setting(
|
|||
):
|
||||
configured_elements[this_element_name] = "Disabled"
|
||||
policy_disabled_elements = policy_disabled_elements + 1
|
||||
log.trace(
|
||||
"element {} is disabled".format(child_item.attrib["id"])
|
||||
)
|
||||
log.trace("element %s is disabled", child_item.attrib["id"])
|
||||
if element_only_enabled_disabled:
|
||||
if 0 < len(required_elements.keys()) == len(configured_elements.keys()):
|
||||
if policy_disabled_elements == len(required_elements.keys()):
|
||||
log.trace(
|
||||
"{} is disabled by all enum elements".format(
|
||||
this_policy_name
|
||||
)
|
||||
"%s is disabled by all enum elements", this_policy_name
|
||||
)
|
||||
policy_vals.setdefault(this_policy_namespace, {})[
|
||||
this_policy_name
|
||||
] = "Disabled"
|
||||
else:
|
||||
log.trace(
|
||||
"{} is enabled by enum elements".format(this_policy_name)
|
||||
)
|
||||
log.trace("%s is enabled by enum elements", this_policy_name)
|
||||
policy_vals.setdefault(this_policy_namespace, {})[
|
||||
this_policy_name
|
||||
] = configured_elements
|
||||
|
@ -10142,14 +10132,14 @@ def set_(
|
|||
if _netshs:
|
||||
# we've got netsh settings to make
|
||||
for setting in _netshs:
|
||||
log.trace("Setting firewall policy: {}".format(setting))
|
||||
log.trace("Setting firewall policy: %s", setting)
|
||||
log.trace(_netshs[setting])
|
||||
_set_netsh_value(**_netshs[setting])
|
||||
|
||||
if _advaudits:
|
||||
# We've got AdvAudit settings to make
|
||||
for setting in _advaudits:
|
||||
log.trace("Setting Advanced Audit policy: {}".format(setting))
|
||||
log.trace("Setting Advanced Audit policy: %s", setting)
|
||||
log.trace(_advaudits[setting])
|
||||
_set_advaudit_value(**_advaudits[setting])
|
||||
|
||||
|
|
|
@ -1141,7 +1141,7 @@ def genrepo(**kwargs):
|
|||
|
||||
# Skip hidden directories (.git)
|
||||
if re.search(r"[\\/]\..*", root):
|
||||
log.debug("Skipping files in directory: {}".format(root))
|
||||
log.debug("Skipping files in directory: %s", root)
|
||||
continue
|
||||
|
||||
short_path = os.path.relpath(root, repo_details.local_dest)
|
||||
|
@ -1199,7 +1199,7 @@ def _repo_process_pkg_sls(filename, short_path_name, ret, successful_verbose):
|
|||
renderers = salt.loader.render(__opts__, __salt__)
|
||||
|
||||
def _failed_compile(prefix_msg, error_msg):
|
||||
log.error("{} '{}': {} ".format(prefix_msg, short_path_name, error_msg))
|
||||
log.error("%s '%s': %s", prefix_msg, short_path_name, error_msg)
|
||||
ret.setdefault("errors", {})[short_path_name] = [
|
||||
"{}, {} ".format(prefix_msg, error_msg)
|
||||
]
|
||||
|
@ -1774,8 +1774,9 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
|||
):
|
||||
ret[pkg_name] = {"install status": "task started"}
|
||||
if not __salt__["task.run"](name="update-salt-software"):
|
||||
log.error("Failed to install %s", pkg_name)
|
||||
log.error("Scheduled Task failed to run")
|
||||
log.error(
|
||||
"Scheduled Task failed to run. Failed to install %s", pkg_name
|
||||
)
|
||||
ret[pkg_name] = {"install status": "failed"}
|
||||
else:
|
||||
|
||||
|
@ -1790,15 +1791,18 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
|||
break
|
||||
|
||||
if not task_running:
|
||||
log.error("Failed to install %s", pkg_name)
|
||||
log.error("Scheduled Task failed to run")
|
||||
log.error(
|
||||
"Scheduled Task failed to run. Failed to install %s",
|
||||
pkg_name,
|
||||
)
|
||||
ret[pkg_name] = {"install status": "failed"}
|
||||
|
||||
# All other packages run with task scheduler
|
||||
else:
|
||||
if not __salt__["task.run_wait"](name="update-salt-software"):
|
||||
log.error("Failed to install %s", pkg_name)
|
||||
log.error("Scheduled Task failed to run")
|
||||
log.error(
|
||||
"Scheduled Task failed to run. Failed to install %s", pkg_name
|
||||
)
|
||||
ret[pkg_name] = {"install status": "failed"}
|
||||
else:
|
||||
# Launch the command
|
||||
|
@ -1824,9 +1828,12 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
|||
ret[pkg_name] = {"install status": "success, reboot initiated"}
|
||||
changed.append(pkg_name)
|
||||
else:
|
||||
log.error("Failed to install %s", pkg_name)
|
||||
log.error("retcode %s", result["retcode"])
|
||||
log.error("installer output: %s", result["stdout"])
|
||||
log.error(
|
||||
"Failed to install %s; retcode: %s; installer output: %s",
|
||||
pkg_name,
|
||||
result["retcode"],
|
||||
result["stdout"],
|
||||
)
|
||||
ret[pkg_name] = {"install status": "failed"}
|
||||
|
||||
# Get a new list of installed software
|
||||
|
@ -2140,8 +2147,9 @@ def remove(name=None, pkgs=None, **kwargs):
|
|||
)
|
||||
# Run Scheduled Task
|
||||
if not __salt__["task.run_wait"](name="update-salt-software"):
|
||||
log.error("Failed to remove %s", pkgname)
|
||||
log.error("Scheduled Task failed to run")
|
||||
log.error(
|
||||
"Scheduled Task failed to run. Failed to remove %s", pkgname
|
||||
)
|
||||
ret[pkgname] = {"uninstall status": "failed"}
|
||||
else:
|
||||
# Launch the command
|
||||
|
@ -2168,9 +2176,12 @@ def remove(name=None, pkgs=None, **kwargs):
|
|||
ret[pkgname] = {"uninstall status": "success, reboot initiated"}
|
||||
changed.append(pkgname)
|
||||
else:
|
||||
log.error("Failed to remove %s", pkgname)
|
||||
log.error("retcode %s", result["retcode"])
|
||||
log.error("uninstaller output: %s", result["stdout"])
|
||||
log.error(
|
||||
"Failed to remove %s; retcode: %s; uninstaller output: %s",
|
||||
pkgname,
|
||||
result["retcode"],
|
||||
result["stdout"],
|
||||
)
|
||||
ret[pkgname] = {"uninstall status": "failed"}
|
||||
|
||||
# Get a new list of installed software
|
||||
|
@ -2287,8 +2298,7 @@ def get_repo_data(saltenv="base"):
|
|||
log.exception(exc)
|
||||
return {}
|
||||
except OSError as exc:
|
||||
log.error("Not able to read repo file")
|
||||
log.exception(exc)
|
||||
log.exception("Not able to read repo file: %s", exc)
|
||||
return {}
|
||||
|
||||
|
||||
|
|
|
@ -342,7 +342,7 @@ def start(name, timeout=90):
|
|||
raise CommandExecutionError(
|
||||
"Failed To Start {}: {}".format(name, exc.strerror)
|
||||
)
|
||||
log.debug('Service "{}" is running'.format(name))
|
||||
log.debug('Service "%s" is running', name)
|
||||
|
||||
srv_status = _status_wait(
|
||||
service_name=name,
|
||||
|
@ -383,7 +383,7 @@ def stop(name, timeout=90):
|
|||
raise CommandExecutionError(
|
||||
"Failed To Stop {}: {}".format(name, exc.strerror)
|
||||
)
|
||||
log.debug('Service "{}" is not running'.format(name))
|
||||
log.debug('Service "%s" is not running', name)
|
||||
|
||||
srv_status = _status_wait(
|
||||
service_name=name,
|
||||
|
@ -1116,7 +1116,7 @@ def delete(name, timeout=90):
|
|||
raise CommandExecutionError(
|
||||
"Failed to open {}. {}".format(name, exc.strerror)
|
||||
)
|
||||
log.debug('Service "{}" is not present'.format(name))
|
||||
log.debug('Service "%s" is not present', name)
|
||||
return True
|
||||
|
||||
try:
|
||||
|
|
|
@ -1033,7 +1033,8 @@ def create_crl(
|
|||
crltext = crl.export(**export_kwargs)
|
||||
except (TypeError, ValueError):
|
||||
log.warning(
|
||||
"Error signing crl with specified digest. Are you using pyopenssl 0.15 or newer? The default md5 digest will be used."
|
||||
"Error signing crl with specified digest. Are you using "
|
||||
"pyopenssl 0.15 or newer? The default md5 digest will be used."
|
||||
)
|
||||
export_kwargs.pop("digest", None)
|
||||
crltext = crl.export(**export_kwargs)
|
||||
|
@ -1626,7 +1627,7 @@ def create_certificate(path=None, text=False, overwrite=True, ca_server=None, **
|
|||
name=extname, value=extval, critical=critical, issuer=issuer
|
||||
)
|
||||
if not ext.x509_ext:
|
||||
log.info("Invalid X509v3 Extension. {}: {}".format(extname, extval))
|
||||
log.info("Invalid X509v3 Extension. %s: %s", extname, extval)
|
||||
continue
|
||||
|
||||
cert.add_ext(ext)
|
||||
|
@ -1734,7 +1735,9 @@ def create_csr(path=None, text=False, **kwargs):
|
|||
if "private_key" not in kwargs and "public_key" in kwargs:
|
||||
kwargs["private_key"] = kwargs["public_key"]
|
||||
log.warning(
|
||||
"OpenSSL no longer allows working with non-signed CSRs. A private_key must be specified. Attempting to use public_key as private_key"
|
||||
"OpenSSL no longer allows working with non-signed CSRs. "
|
||||
"A private_key must be specified. Attempting to use public_key "
|
||||
"as private_key"
|
||||
)
|
||||
|
||||
if "private_key" not in kwargs:
|
||||
|
@ -1788,7 +1791,7 @@ def create_csr(path=None, text=False, **kwargs):
|
|||
name=extname, value=extval, critical=critical, issuer=issuer
|
||||
)
|
||||
if not ext.x509_ext:
|
||||
log.info("Invalid X509v3 Extension. {}: {}".format(extname, extval))
|
||||
log.info("Invalid X509v3 Extension. %s: %s", extname, extval)
|
||||
continue
|
||||
|
||||
extstack.push(ext)
|
||||
|
|
|
@ -3377,9 +3377,9 @@ def _get_patches(installed_only=False):
|
|||
|
||||
if parsing_errors:
|
||||
log.warning(
|
||||
"Skipped some unexpected output while running '{}' to list patches. Please check output".format(
|
||||
" ".join(cmd)
|
||||
)
|
||||
"Skipped some unexpected output while running '%s' to list "
|
||||
"patches. Please check output",
|
||||
" ".join(cmd),
|
||||
)
|
||||
|
||||
if installed_only:
|
||||
|
|
|
@ -700,7 +700,8 @@ def bootstrap(
|
|||
except OSError as exc:
|
||||
# don't block here, try to execute it if can pass
|
||||
_logger.error(
|
||||
"BUILDOUT bootstrap permissions error:" " {}".format(exc),
|
||||
"BUILDOUT bootstrap permissions error: %s",
|
||||
exc,
|
||||
exc_info=_logger.isEnabledFor(logging.DEBUG),
|
||||
)
|
||||
cmd = "{} bootstrap.py {}".format(python, bootstrap_args)
|
||||
|
|
|
@ -1102,7 +1102,7 @@ def _get_configured_repos(root=None):
|
|||
]
|
||||
)
|
||||
else:
|
||||
log.warning("Repositories not found in {}".format(repos))
|
||||
log.warning("Repositories not found in %s", repos)
|
||||
|
||||
return repos_cfg
|
||||
|
||||
|
@ -1572,7 +1572,7 @@ def install(
|
|||
pkg_params = {name: version_num}
|
||||
else:
|
||||
log.warning(
|
||||
'"version" parameter will be ignored for multiple ' "package targets"
|
||||
'"version" parameter will be ignored for multiple package targets'
|
||||
)
|
||||
|
||||
if pkg_type == "repository":
|
||||
|
@ -2043,7 +2043,7 @@ def list_locks(root=None):
|
|||
except OSError:
|
||||
pass
|
||||
except Exception: # pylint: disable=broad-except
|
||||
log.warning("Detected a problem when accessing {}".format(_locks))
|
||||
log.warning("Detected a problem when accessing %s", _locks)
|
||||
|
||||
return locks
|
||||
|
||||
|
|
|
@ -616,8 +616,7 @@ try:
|
|||
except AttributeError:
|
||||
cpstats = None
|
||||
logger.warn(
|
||||
"Import of cherrypy.cpstats failed. "
|
||||
"Possible upstream bug: "
|
||||
"Import of cherrypy.cpstats failed. Possible upstream bug: "
|
||||
"https://github.com/cherrypy/cherrypy/issues/1444"
|
||||
)
|
||||
except ImportError:
|
||||
|
@ -709,9 +708,9 @@ def salt_api_acl_tool(username, request):
|
|||
:param request: Cherrypy request to check against the API.
|
||||
:type request: cherrypy.request
|
||||
"""
|
||||
failure_str = "[api_acl] Authentication failed for " "user {0} from IP {1}"
|
||||
success_str = "[api_acl] Authentication successful for user {0} from IP {1}"
|
||||
pass_str = "[api_acl] Authentication not checked for " "user {0} from IP {1}"
|
||||
failure_str = "[api_acl] Authentication failed for " "user %s from IP %s"
|
||||
success_str = "[api_acl] Authentication successful for user %s from IP %s"
|
||||
pass_str = "[api_acl] Authentication not checked for " "user %s from IP %s"
|
||||
|
||||
acl = None
|
||||
# Salt Configuration
|
||||
|
@ -729,23 +728,23 @@ def salt_api_acl_tool(username, request):
|
|||
if users:
|
||||
if username in users:
|
||||
if ip in users[username] or "*" in users[username]:
|
||||
logger.info(success_str.format(username, ip))
|
||||
logger.info(success_str, username, ip)
|
||||
return True
|
||||
else:
|
||||
logger.info(failure_str.format(username, ip))
|
||||
logger.info(failure_str, username, ip)
|
||||
return False
|
||||
elif username not in users and "*" in users:
|
||||
if ip in users["*"] or "*" in users["*"]:
|
||||
logger.info(success_str.format(username, ip))
|
||||
logger.info(success_str, username, ip)
|
||||
return True
|
||||
else:
|
||||
logger.info(failure_str.format(username, ip))
|
||||
logger.info(failure_str, username, ip)
|
||||
return False
|
||||
else:
|
||||
logger.info(failure_str.format(username, ip))
|
||||
logger.info(failure_str, username, ip)
|
||||
return False
|
||||
else:
|
||||
logger.info(pass_str.format(username, ip))
|
||||
logger.info(pass_str, username, ip)
|
||||
return True
|
||||
|
||||
|
||||
|
@ -762,11 +761,11 @@ def salt_ip_verify_tool():
|
|||
if cherrypy_conf:
|
||||
auth_ip_list = cherrypy_conf.get("authorized_ips", None)
|
||||
if auth_ip_list:
|
||||
logger.debug("Found IP list: {}".format(auth_ip_list))
|
||||
logger.debug("Found IP list: %s", auth_ip_list)
|
||||
rem_ip = cherrypy.request.headers.get("Remote-Addr", None)
|
||||
logger.debug("Request from IP: {}".format(rem_ip))
|
||||
logger.debug("Request from IP: %s", rem_ip)
|
||||
if rem_ip not in auth_ip_list:
|
||||
logger.error("Blocked IP: {}".format(rem_ip))
|
||||
logger.error("Blocked IP: %s", rem_ip)
|
||||
raise cherrypy.HTTPError(403, "Bad IP")
|
||||
|
||||
|
||||
|
@ -1898,10 +1897,9 @@ class Login(LowDataAdapter):
|
|||
logger.debug("Eauth permission list not found.")
|
||||
except Exception: # pylint: disable=broad-except
|
||||
logger.debug(
|
||||
"Configuration for external_auth malformed for "
|
||||
"eauth '{}', and user '{}'.".format(
|
||||
token.get("eauth"), token.get("name")
|
||||
),
|
||||
"Configuration for external_auth malformed for eauth %r, and user %r.",
|
||||
token.get("eauth"),
|
||||
token.get("name"),
|
||||
exc_info=True,
|
||||
)
|
||||
perms = None
|
||||
|
@ -2558,7 +2556,7 @@ class WebsocketEndpoint:
|
|||
)
|
||||
except UnicodeDecodeError:
|
||||
logger.error(
|
||||
"Error: Salt event has non UTF-8 data:\n{}".format(data)
|
||||
"Error: Salt event has non UTF-8 data:\n%s", data
|
||||
)
|
||||
|
||||
parent_pipe, child_pipe = Pipe()
|
||||
|
|
|
@ -328,11 +328,13 @@ def ext_pillar(
|
|||
except Exception as err: # pylint: disable=broad-except
|
||||
import salt.log
|
||||
|
||||
msg = "pillar_ldap: error parsing configuration file: {0} - {1}"
|
||||
msg = "pillar_ldap: error parsing configuration file: {} - {}".format(
|
||||
config_file, err
|
||||
)
|
||||
if salt.log.is_console_configured():
|
||||
log.warning(msg.format(config_file, err))
|
||||
log.warning(msg)
|
||||
else:
|
||||
print(msg.format(config_file, err))
|
||||
print(msg)
|
||||
return {}
|
||||
else:
|
||||
if not isinstance(opts, dict):
|
||||
|
|
|
@ -98,7 +98,7 @@ def _validate_response_code(response_code_to_check, cookie_to_logout=None):
|
|||
if formatted_response_code not in ["200", "201", "202", "204"]:
|
||||
if cookie_to_logout:
|
||||
logout(cookie_to_logout)
|
||||
log.error("Received error HTTP status code: {}".format(formatted_response_code))
|
||||
log.error("Received error HTTP status code: %s", formatted_response_code)
|
||||
raise salt.exceptions.CommandExecutionError(
|
||||
"Did not receive a valid response from host."
|
||||
)
|
||||
|
|
|
@ -128,7 +128,7 @@ def init(opts):
|
|||
for arg in optional_args:
|
||||
if arg in proxy_keys:
|
||||
args[arg] = opts["proxy"][arg]
|
||||
log.debug("Args: {}".format(args))
|
||||
log.debug("Args: %s", args)
|
||||
thisproxy["conn"] = jnpr.junos.Device(**args)
|
||||
try:
|
||||
thisproxy["conn"].open()
|
||||
|
@ -139,7 +139,7 @@ def init(opts):
|
|||
ConnectTimeoutError,
|
||||
ConnectError,
|
||||
) as ex:
|
||||
log.error("{} : not able to initiate connection to the device".format(str(ex)))
|
||||
log.error("%s : not able to initiate connection to the device", ex)
|
||||
thisproxy["initialized"] = False
|
||||
return
|
||||
|
||||
|
@ -155,12 +155,12 @@ def init(opts):
|
|||
try:
|
||||
thisproxy["conn"].bind(cu=jnpr.junos.utils.config.Config)
|
||||
except Exception as ex: # pylint: disable=broad-except
|
||||
log.error("Bind failed with Config class due to: {}".format(str(ex)))
|
||||
log.error("Bind failed with Config class due to: %s", ex)
|
||||
|
||||
try:
|
||||
thisproxy["conn"].bind(sw=jnpr.junos.utils.sw.SW)
|
||||
except Exception as ex: # pylint: disable=broad-except
|
||||
log.error("Bind failed with SW class due to: {}".format(str(ex)))
|
||||
log.error("Bind failed with SW class due to: %s", ex)
|
||||
thisproxy["initialized"] = True
|
||||
|
||||
|
||||
|
@ -277,7 +277,7 @@ def shutdown(opts):
|
|||
This is called when the proxy-minion is exiting to make sure the
|
||||
connection to the device is closed cleanly.
|
||||
"""
|
||||
log.debug("Proxy module {} shutting down!!".format(opts["id"]))
|
||||
log.debug("Proxy module %s shutting down!!", opts["id"])
|
||||
try:
|
||||
thisproxy["conn"].close()
|
||||
|
||||
|
|
|
@ -210,7 +210,7 @@ def init(opts=None):
|
|||
log.info("NXOS PROXY: Initialize nxapi proxy connection")
|
||||
return _init_nxapi(opts)
|
||||
else:
|
||||
log.error("Unknown Connection Type: {}".format(CONNECTION))
|
||||
log.error("Unknown Connection Type: %s", CONNECTION)
|
||||
return False
|
||||
|
||||
|
||||
|
@ -247,7 +247,7 @@ def grains():
|
|||
if CONNECTION == "nxapi":
|
||||
data = data[0]
|
||||
ret = salt.utils.nxos.system_info(data)
|
||||
log.debug(ret)
|
||||
log.debug("System Info: %s", ret)
|
||||
DEVICE_DETAILS["grains_cache"].update(ret["nxos"])
|
||||
return {"nxos": DEVICE_DETAILS["grains_cache"]}
|
||||
|
||||
|
@ -351,7 +351,7 @@ def _init_ssh(opts=None):
|
|||
prompt=this_prompt,
|
||||
)
|
||||
out, err = DEVICE_DETAILS[_worker_name()].sendline("terminal length 0")
|
||||
log.info("SSH session establised for process {}".format(_worker_name()))
|
||||
log.info("SSH session establised for process %s", _worker_name())
|
||||
except Exception as ex: # pylint: disable=broad-except
|
||||
log.error("Unable to connect to %s", opts["proxy"]["host"])
|
||||
log.error("Please check the following:\n")
|
||||
|
@ -468,7 +468,7 @@ def _init_nxapi(opts):
|
|||
)
|
||||
log.error("-- Exception Generated: %s", ex)
|
||||
raise
|
||||
log.info("nxapi DEVICE_DETAILS info: {}".format(DEVICE_DETAILS))
|
||||
log.info("nxapi DEVICE_DETAILS info: %s", DEVICE_DETAILS)
|
||||
return True
|
||||
|
||||
|
||||
|
|
|
@ -9,7 +9,6 @@ import functools
|
|||
import logging
|
||||
import os.path
|
||||
import tempfile
|
||||
import traceback
|
||||
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
|
@ -100,9 +99,9 @@ def __mount_device(action):
|
|||
ret["comment"].append(msg)
|
||||
kwargs["__dest"] = dest
|
||||
ret = action(*args, **kwargs)
|
||||
except Exception as e: # pylint: disable=broad-except
|
||||
log.error("""Traceback: {}""".format(traceback.format_exc()))
|
||||
ret["comment"].append(e)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
log.error("Exception raised while mounting device: %s", exc, exc_info=True)
|
||||
ret["comment"].append(exc)
|
||||
finally:
|
||||
if device:
|
||||
_umount(dest)
|
||||
|
|
|
@ -12,7 +12,7 @@ or removed.
|
|||
|
||||
import logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
|
@ -59,7 +59,7 @@ def installed(name, cyg_arch="x86_64", mirrors=None):
|
|||
be one of 'x86' or 'x86_64'"
|
||||
return ret
|
||||
|
||||
LOG.debug("Installed State: Initial Mirror list: {}".format(mirrors))
|
||||
log.debug("Installed State: Initial Mirror list: %s", mirrors)
|
||||
|
||||
if not __salt__["cyg.check_valid_package"](
|
||||
name, cyg_arch=cyg_arch, mirrors=mirrors
|
||||
|
@ -194,7 +194,7 @@ def updated(name=None, cyg_arch="x86_64", mirrors=None):
|
|||
return ret
|
||||
|
||||
if not mirrors:
|
||||
LOG.warning("No mirror given, using the default.")
|
||||
log.warning("No mirror given, using the default.")
|
||||
|
||||
before = __salt__["cyg.list"](cyg_arch=cyg_arch)
|
||||
if __salt__["cyg.update"](cyg_arch, mirrors=mirrors):
|
||||
|
|
|
@ -203,7 +203,6 @@ Module was developed against.
|
|||
|
||||
import logging
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import salt.exceptions
|
||||
|
||||
|
@ -279,8 +278,10 @@ def dvs_configured(name, dvs):
|
|||
datacenter_name = _get_datacenter_name()
|
||||
dvs_name = dvs["name"] if dvs.get("name") else name
|
||||
log.info(
|
||||
"Running state {} for DVS '{}' in datacenter "
|
||||
"'{}'".format(name, dvs_name, datacenter_name)
|
||||
"Running state %s for DVS '%s' in datacenter '%s'",
|
||||
name,
|
||||
dvs_name,
|
||||
datacenter_name,
|
||||
)
|
||||
changes_required = False
|
||||
ret = {"name": name, "changes": {}, "result": None, "comment": None}
|
||||
|
@ -325,9 +326,10 @@ def dvs_configured(name, dvs):
|
|||
"network_resource_management_enabled",
|
||||
]
|
||||
log.trace(
|
||||
"DVS '{}' found in datacenter '{}'. Checking "
|
||||
"for any updates in "
|
||||
"{}".format(dvs_name, datacenter_name, props)
|
||||
"DVS '%s' found in datacenter '%s'. Checking for any updates in %s",
|
||||
dvs_name,
|
||||
datacenter_name,
|
||||
props,
|
||||
)
|
||||
props_to_original_values = {}
|
||||
props_to_updated_values = {}
|
||||
|
@ -416,7 +418,7 @@ def dvs_configured(name, dvs):
|
|||
)
|
||||
__salt__["vsphere.disconnect"](si)
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.error("Error: {}\n{}".format(exc, traceback.format_exc()))
|
||||
log.error("Error: %s", exc, exc_info=True)
|
||||
if si:
|
||||
__salt__["vsphere.disconnect"](si)
|
||||
if not __opts__["test"]:
|
||||
|
@ -560,7 +562,7 @@ def portgroups_configured(name, dvs, portgroups):
|
|||
pg_name = pg["name"]
|
||||
expected_pg_names.append(pg_name)
|
||||
del pg["name"]
|
||||
log.info("Checking pg '{}'".format(pg_name))
|
||||
log.info("Checking pg '%s'", pg_name)
|
||||
filtered_current_pgs = [p for p in current_pgs if p.get("name") == pg_name]
|
||||
if not filtered_current_pgs:
|
||||
changes_required = True
|
||||
|
@ -587,9 +589,10 @@ def portgroups_configured(name, dvs, portgroups):
|
|||
else:
|
||||
# Porgroup already exists. Checking the config
|
||||
log.trace(
|
||||
"Portgroup '{}' found in DVS '{}', datacenter "
|
||||
"'{}'. Checking for any updates."
|
||||
"".format(pg_name, dvs, datacenter)
|
||||
"Portgroup '%s' found in DVS '%s', datacenter '%s'. Checking for any updates.",
|
||||
pg_name,
|
||||
dvs,
|
||||
datacenter,
|
||||
)
|
||||
current_pg = filtered_current_pgs[0]
|
||||
diff_dict = _get_diff_dict(current_pg, pg)
|
||||
|
@ -598,7 +601,7 @@ def portgroups_configured(name, dvs, portgroups):
|
|||
changes_required = True
|
||||
if __opts__["test"]:
|
||||
changes_strings = _get_changes_from_diff_dict(diff_dict)
|
||||
log.trace("changes_strings = " "{}".format(changes_strings))
|
||||
log.trace("changes_strings = %s", changes_strings)
|
||||
comments.append(
|
||||
"State {} will update portgroup '{}' in "
|
||||
"DVS '{}', datacenter '{}':\n{}"
|
||||
|
@ -660,7 +663,7 @@ def portgroups_configured(name, dvs, portgroups):
|
|||
changes.update({current_pg["name"]: {"old": current_pg}})
|
||||
__salt__["vsphere.disconnect"](si)
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.error("Error: {}\n{}".format(exc, traceback.format_exc()))
|
||||
log.error("Error: %s", exc, exc_info=True)
|
||||
if si:
|
||||
__salt__["vsphere.disconnect"](si)
|
||||
if not __opts__["test"]:
|
||||
|
@ -724,7 +727,7 @@ def uplink_portgroup_configured(name, dvs, uplink_portgroup):
|
|||
changes_required = True
|
||||
if __opts__["test"]:
|
||||
changes_strings = _get_changes_from_diff_dict(diff_dict)
|
||||
log.trace("changes_strings = " "{}".format(changes_strings))
|
||||
log.trace("changes_strings = %s", changes_strings)
|
||||
comments.append(
|
||||
"State {} will update the "
|
||||
"uplink portgroup in DVS '{}', datacenter "
|
||||
|
|
|
@ -41,7 +41,6 @@ Module was developed against.
|
|||
|
||||
import logging
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import salt.exceptions
|
||||
from salt.config.schemas.esxcluster import ESXClusterConfigSchema, LicenseSchema
|
||||
|
@ -171,12 +170,13 @@ def cluster_configured(name, cluster_config):
|
|||
"Unsupported proxy {}".format(proxy_type)
|
||||
)
|
||||
log.info(
|
||||
"Running {} for cluster '{}' in datacenter '{}'".format(
|
||||
name, cluster_name, datacenter_name
|
||||
)
|
||||
"Running %s for cluster '%s' in datacenter '%s'",
|
||||
name,
|
||||
cluster_name,
|
||||
datacenter_name,
|
||||
)
|
||||
cluster_dict = cluster_config
|
||||
log.trace("cluster_dict = {}".format(cluster_dict))
|
||||
log.trace("cluster_dict = %s", cluster_dict)
|
||||
changes_required = False
|
||||
ret = {"name": name, "changes": {}, "result": None, "comment": "Default"}
|
||||
comments = []
|
||||
|
@ -186,7 +186,7 @@ def cluster_configured(name, cluster_config):
|
|||
try:
|
||||
log.trace("Validating cluster_configured state input")
|
||||
schema = ESXClusterConfigSchema.serialize()
|
||||
log.trace("schema = {}".format(schema))
|
||||
log.trace("schema = %s", schema)
|
||||
try:
|
||||
jsonschema.validate(cluster_dict, schema)
|
||||
except jsonschema.exceptions.ValidationError as exc:
|
||||
|
@ -210,9 +210,9 @@ def cluster_configured(name, cluster_config):
|
|||
ret.update({"result": None, "comment": "\n".join(comments)})
|
||||
return ret
|
||||
log.trace(
|
||||
"Creating cluster '{}' in datacenter '{}'.".format(
|
||||
cluster_name, datacenter_name
|
||||
)
|
||||
"Creating cluster '%s' in datacenter '%s'.",
|
||||
cluster_name,
|
||||
datacenter_name,
|
||||
)
|
||||
__salt__["vsphere.create_cluster"](
|
||||
cluster_dict, datacenter_name, cluster_name, service_instance=si
|
||||
|
@ -234,13 +234,13 @@ def cluster_configured(name, cluster_config):
|
|||
cluster_dict.get("ha", {}).get("options", []),
|
||||
"key",
|
||||
)
|
||||
log.trace("options diffs = {}".format(ldiff.diffs))
|
||||
log.trace("options diffs = %s", ldiff.diffs)
|
||||
# Remove options if exist
|
||||
del cluster_dict["ha"]["options"]
|
||||
if "ha" in current and "options" in current["ha"]:
|
||||
del current["ha"]["options"]
|
||||
diff = recursive_diff(current, cluster_dict)
|
||||
log.trace("diffs = {}".format(diff.diffs))
|
||||
log.trace("diffs = %s", diff.diffs)
|
||||
if not (diff.diffs or (ldiff and ldiff.diffs)):
|
||||
# No differences
|
||||
comments.append(
|
||||
|
@ -279,7 +279,7 @@ def cluster_configured(name, cluster_config):
|
|||
dictupdate.update(
|
||||
old_values, {"ha": {"options": ldiff.old_values}}
|
||||
)
|
||||
log.trace("new_values = {}".format(new_values))
|
||||
log.trace("new_values = %s", new_values)
|
||||
__salt__["vsphere.update_cluster"](
|
||||
new_values, datacenter_name, cluster_name, service_instance=si
|
||||
)
|
||||
|
@ -299,7 +299,7 @@ def cluster_configured(name, cluster_config):
|
|||
)
|
||||
return ret
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.error("Error: {}\n{}".format(exc, traceback.format_exc()))
|
||||
log.error("Error: %s", exc, exc_info=True)
|
||||
if si:
|
||||
__salt__["vsphere.disconnect"](si)
|
||||
ret.update({"result": False, "comment": str(exc)})
|
||||
|
@ -320,7 +320,7 @@ def vsan_datastore_configured(name, datastore_name):
|
|||
__salt__["esxcluster.get_details"]()["datacenter"],
|
||||
)
|
||||
display_name = "{}/{}".format(datacenter_name, cluster_name)
|
||||
log.info("Running vsan_datastore_configured for '{}'".format(display_name))
|
||||
log.info("Running vsan_datastore_configured for '%s'", display_name)
|
||||
ret = {"name": name, "changes": {}, "result": None, "comment": "Default"}
|
||||
comments = []
|
||||
changes = {}
|
||||
|
@ -349,9 +349,9 @@ def vsan_datastore_configured(name, datastore_name):
|
|||
log.info(comments[-1])
|
||||
else:
|
||||
log.trace(
|
||||
"Renaming vSAN datastore '{}' to '{}'".format(
|
||||
vsan_ds["name"], datastore_name
|
||||
)
|
||||
"Renaming vSAN datastore '%s' to '%s'",
|
||||
vsan_ds["name"],
|
||||
datastore_name,
|
||||
)
|
||||
__salt__["vsphere.rename_datastore"](
|
||||
datastore_name=vsan_ds["name"],
|
||||
|
@ -383,7 +383,7 @@ def vsan_datastore_configured(name, datastore_name):
|
|||
)
|
||||
return ret
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.error("Error: {}\n{}".format(exc, traceback.format_exc()))
|
||||
log.error("Error: %s", exc, exc_info=True)
|
||||
if si:
|
||||
__salt__["vsphere.disconnect"](si)
|
||||
ret.update({"result": False, "comment": exc.strerror})
|
||||
|
@ -410,15 +410,13 @@ def licenses_configured(name, licenses=None):
|
|||
__salt__["esxcluster.get_details"]()["datacenter"],
|
||||
)
|
||||
display_name = "{}/{}".format(datacenter_name, cluster_name)
|
||||
log.info("Running licenses configured for '{}'".format(display_name))
|
||||
log.trace("licenses = {}".format(licenses))
|
||||
log.info("Running licenses configured for '%s'", display_name)
|
||||
log.trace("licenses = %s", licenses)
|
||||
entity = {"type": "cluster", "datacenter": datacenter_name, "cluster": cluster_name}
|
||||
log.trace("entity = {}".format(entity))
|
||||
log.trace("entity = %s", entity)
|
||||
|
||||
comments = []
|
||||
changes = {}
|
||||
old_licenses = []
|
||||
new_licenses = []
|
||||
has_errors = False
|
||||
needs_changes = False
|
||||
try:
|
||||
|
@ -433,7 +431,6 @@ def licenses_configured(name, licenses=None):
|
|||
si = __salt__["vsphere.get_service_instance_via_proxy"]()
|
||||
# Retrieve licenses
|
||||
existing_licenses = __salt__["vsphere.list_licenses"](service_instance=si)
|
||||
remaining_licenses = existing_licenses[:]
|
||||
# Cycle through licenses
|
||||
for license_name, license in licenses.items():
|
||||
# Check if license already exists
|
||||
|
@ -587,7 +584,7 @@ def licenses_configured(name, licenses=None):
|
|||
|
||||
return ret
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.error("Error: {}\n{}".format(exc, traceback.format_exc()))
|
||||
log.error("Error: %s", exc, exc_info=True)
|
||||
if si:
|
||||
__salt__["vsphere.disconnect"](si)
|
||||
ret.update({"result": False, "comment": exc.strerror})
|
||||
|
|
|
@ -84,7 +84,7 @@ def datacenter_configured(name):
|
|||
dc_name = __salt__["esxdatacenter.get_details"]()["datacenter"]
|
||||
else:
|
||||
dc_name = name
|
||||
log.info("Running datacenter_configured for datacenter '{}'".format(dc_name))
|
||||
log.info("Running datacenter_configured for datacenter '%s'", dc_name)
|
||||
ret = {"name": name, "changes": {}, "result": None, "comment": "Default"}
|
||||
comments = []
|
||||
si = None
|
||||
|
@ -97,7 +97,7 @@ def datacenter_configured(name):
|
|||
if __opts__["test"]:
|
||||
comments.append("State will create datacenter '{}'.".format(dc_name))
|
||||
else:
|
||||
log.debug("Creating datacenter '{}'. ".format(dc_name))
|
||||
log.debug("Creating datacenter '%s'", dc_name)
|
||||
__salt__["vsphere.create_datacenter"](dc_name, si)
|
||||
comments.append("Created datacenter '{}'.".format(dc_name))
|
||||
log.info(comments[-1])
|
||||
|
@ -112,7 +112,7 @@ def datacenter_configured(name):
|
|||
ret["result"] = None if __opts__["test"] and ret["changes"] else True
|
||||
return ret
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.error("Error: {}".format(exc))
|
||||
log.error("Error: %s", exc)
|
||||
if si:
|
||||
__salt__["vsphere.disconnect"](si)
|
||||
ret.update(
|
||||
|
|
|
@ -593,7 +593,7 @@ def vm_registered(vm_name, datacenter, placement, vm_file, power_on=False):
|
|||
result = {"name": vm_name, "result": None, "changes": {}, "comment": ""}
|
||||
|
||||
vmx_path = "{}{}".format(vm_file.folderPath, vm_file.file[0].path)
|
||||
log.trace("Registering virtual machine with vmx file: {}".format(vmx_path))
|
||||
log.trace("Registering virtual machine with vmx file: %s", vmx_path)
|
||||
service_instance = __salt__["vsphere.get_service_instance_via_proxy"]()
|
||||
try:
|
||||
__salt__["vsphere.register_vm"](
|
||||
|
|
|
@ -453,8 +453,7 @@ def _gen_recurse_managed_files(
|
|||
for filename in _filenames:
|
||||
if filename.startswith(lname):
|
||||
log.debug(
|
||||
"** skipping file ** {}, it intersects a "
|
||||
"symlink".format(filename)
|
||||
"** skipping file ** %s, it intersects a symlink", filename
|
||||
)
|
||||
filenames.remove(filename)
|
||||
# Create the symlink along with the necessary dirs.
|
||||
|
@ -534,8 +533,7 @@ def _gen_recurse_managed_files(
|
|||
for link in symlinks:
|
||||
if mdir.startswith(link, 0):
|
||||
log.debug(
|
||||
"** skipping empty dir ** {}, it intersects"
|
||||
" a symlink".format(mdir)
|
||||
"** skipping empty dir ** %s, it intersectsa symlink", mdir
|
||||
)
|
||||
islink = True
|
||||
break
|
||||
|
@ -610,7 +608,7 @@ def _gen_keep_files(name, require, walk_d=None):
|
|||
if _is_child(fn, name):
|
||||
if fun == "recurse":
|
||||
fkeep = _gen_recurse_managed_files(**low)[3]
|
||||
log.debug("Keep from {}: {}".format(fn, fkeep))
|
||||
log.debug("Keep from %s: %s", fn, fkeep)
|
||||
keep.update(fkeep)
|
||||
elif walk_d:
|
||||
walk_ret = set()
|
||||
|
@ -620,7 +618,7 @@ def _gen_keep_files(name, require, walk_d=None):
|
|||
keep.update(_process(fn))
|
||||
else:
|
||||
keep.add(fn)
|
||||
log.debug("Files to keep from required states: {}".format(list(keep)))
|
||||
log.debug("Files to keep from required states: %s", list(keep))
|
||||
return list(keep)
|
||||
|
||||
|
||||
|
@ -1123,8 +1121,9 @@ def _get_template_texts(
|
|||
rndrd_templ_fn = __salt__["cp.get_template"](
|
||||
source, "", template=template, saltenv=__env__, context=tmpctx, **kwargs
|
||||
)
|
||||
msg = "cp.get_template returned {0} (Called with: {1})"
|
||||
log.debug(msg.format(rndrd_templ_fn, source))
|
||||
log.debug(
|
||||
"cp.get_template returned %s (Called with: %s)", rndrd_templ_fn, source
|
||||
)
|
||||
if rndrd_templ_fn:
|
||||
tmplines = None
|
||||
with salt.utils.files.fopen(rndrd_templ_fn, "rb") as fp_:
|
||||
|
@ -1132,10 +1131,12 @@ def _get_template_texts(
|
|||
tmplines = salt.utils.stringutils.to_unicode(tmplines)
|
||||
tmplines = tmplines.splitlines(True)
|
||||
if not tmplines:
|
||||
msg = "Failed to read rendered template file {0} ({1})"
|
||||
log.debug(msg.format(rndrd_templ_fn, source))
|
||||
msg = "Failed to read rendered template file {} ({})".format(
|
||||
rndrd_templ_fn, source
|
||||
)
|
||||
log.debug(msg)
|
||||
ret["name"] = source
|
||||
return _error(ret, msg.format(rndrd_templ_fn, source))
|
||||
return _error(ret, msg)
|
||||
txtl.append("".join(tmplines))
|
||||
else:
|
||||
msg = "Failed to load template file {}".format(source)
|
||||
|
@ -1373,8 +1374,9 @@ def hardlink(
|
|||
if salt.utils.platform.is_windows():
|
||||
if group is not None:
|
||||
log.warning(
|
||||
"The group argument for {} has been ignored as this "
|
||||
"is a Windows system.".format(name)
|
||||
"The group argument for %s has been ignored as this "
|
||||
"is a Windows system.",
|
||||
name,
|
||||
)
|
||||
group = user
|
||||
|
||||
|
@ -1632,9 +1634,10 @@ def symlink(
|
|||
# Group isn't relevant to Windows, use win_perms/win_deny_perms
|
||||
if group is not None:
|
||||
log.warning(
|
||||
"The group argument for {} has been ignored as this "
|
||||
"The group argument for %s has been ignored as this "
|
||||
"is a Windows system. Please use the `win_*` parameters to set "
|
||||
"permissions in Windows.".format(name)
|
||||
"permissions in Windows.",
|
||||
name,
|
||||
)
|
||||
group = user
|
||||
|
||||
|
@ -2777,11 +2780,12 @@ def managed(
|
|||
if not source and contents_count == 0 and replace:
|
||||
replace = False
|
||||
log.warning(
|
||||
"State for file: {} - Neither 'source' nor 'contents' nor "
|
||||
"State for file: %s - Neither 'source' nor 'contents' nor "
|
||||
"'contents_pillar' nor 'contents_grains' was defined, yet "
|
||||
"'replace' was set to 'True'. As there is no source to "
|
||||
"replace the file with, 'replace' has been set to 'False' to "
|
||||
"avoid reading the file unnecessarily.".format(name)
|
||||
"avoid reading the file unnecessarily.",
|
||||
name,
|
||||
)
|
||||
|
||||
if "file_mode" in kwargs:
|
||||
|
@ -2905,9 +2909,10 @@ def managed(
|
|||
# Group isn't relevant to Windows, use win_perms/win_deny_perms
|
||||
if group is not None:
|
||||
log.warning(
|
||||
"The group argument for {} has been ignored as this is "
|
||||
"The group argument for %s has been ignored as this is "
|
||||
"a Windows system. Please use the `win_*` parameters to set "
|
||||
"permissions in Windows.".format(name)
|
||||
"permissions in Windows.",
|
||||
name,
|
||||
)
|
||||
group = user
|
||||
|
||||
|
@ -3520,9 +3525,10 @@ def directory(
|
|||
# Group isn't relevant to Windows, use win_perms/win_deny_perms
|
||||
if group is not None:
|
||||
log.warning(
|
||||
"The group argument for {} has been ignored as this is "
|
||||
"The group argument for %s has been ignored as this is "
|
||||
"a Windows system. Please use the `win_*` parameters to set "
|
||||
"permissions in Windows.".format(name)
|
||||
"permissions in Windows.",
|
||||
name,
|
||||
)
|
||||
group = user
|
||||
|
||||
|
@ -4089,8 +4095,9 @@ def recurse(
|
|||
if salt.utils.platform.is_windows():
|
||||
if group is not None:
|
||||
log.warning(
|
||||
"The group argument for {} has been ignored as this "
|
||||
"is a Windows system.".format(name)
|
||||
"The group argument for %s has been ignored as this "
|
||||
"is a Windows system.",
|
||||
name,
|
||||
)
|
||||
group = user
|
||||
ret = {
|
||||
|
@ -7138,8 +7145,9 @@ def copy_(
|
|||
if salt.utils.platform.is_windows():
|
||||
if group is not None:
|
||||
log.warning(
|
||||
"The group argument for {} has been ignored as this is "
|
||||
"a Windows system.".format(name)
|
||||
"The group argument for %s has been ignored as this is "
|
||||
"a Windows system.",
|
||||
name,
|
||||
)
|
||||
group = user
|
||||
|
||||
|
|
|
@ -199,7 +199,7 @@ def installed(
|
|||
|
||||
for app in installing:
|
||||
try:
|
||||
log.info("Copying {} to {}".format(app, target))
|
||||
log.info("Copying %s to %s", app, target)
|
||||
|
||||
out = __salt__["macpackage.install_app"](
|
||||
os.path.join(mount_point, app), target
|
||||
|
|
|
@ -169,7 +169,7 @@ def default_vsan_policy_configured(name, policy):
|
|||
# TODO policy schema validation
|
||||
si = __salt__["vsphere.get_service_instance_via_proxy"]()
|
||||
current_policy = __salt__["vsphere.list_default_vsan_policy"](si)
|
||||
log.trace("current_policy = {}".format(current_policy))
|
||||
log.trace("current_policy = %s", current_policy)
|
||||
# Building all diffs between the current and expected policy
|
||||
# XXX We simplify the comparison by assuming we have at most 1
|
||||
# sub_profile
|
||||
|
@ -272,7 +272,7 @@ def default_vsan_policy_configured(name, policy):
|
|||
log.trace(changes)
|
||||
__salt__["vsphere.disconnect"](si)
|
||||
except CommandExecutionError as exc:
|
||||
log.error("Error: {}".format(exc))
|
||||
log.error("Error: %s", exc)
|
||||
if si:
|
||||
__salt__["vsphere.disconnect"](si)
|
||||
if not __opts__["test"]:
|
||||
|
@ -315,11 +315,11 @@ def storage_policies_configured(name, policies):
|
|||
changes = []
|
||||
changes_required = False
|
||||
ret = {"name": name, "changes": {}, "result": None, "comment": None}
|
||||
log.trace("policies = {}".format(policies))
|
||||
log.trace("policies = %s", policies)
|
||||
si = None
|
||||
try:
|
||||
proxy_type = __salt__["vsphere.get_proxy_type"]()
|
||||
log.trace("proxy_type = {}".format(proxy_type))
|
||||
log.trace("proxy_type = %s", proxy_type)
|
||||
# All allowed proxies have a shim execution module with the same
|
||||
# name which implementes a get_details function
|
||||
# All allowed proxies have a vcenter detail
|
||||
|
@ -329,7 +329,7 @@ def storage_policies_configured(name, policies):
|
|||
current_policies = __salt__["vsphere.list_storage_policies"](
|
||||
policy_names=[policy["name"] for policy in policies], service_instance=si
|
||||
)
|
||||
log.trace("current_policies = {}".format(current_policies))
|
||||
log.trace("current_policies = %s", current_policies)
|
||||
# TODO Refactor when recurse_differ supports list_differ
|
||||
# It's going to make the whole thing much easier
|
||||
for policy in policies:
|
||||
|
@ -466,7 +466,7 @@ def storage_policies_configured(name, policies):
|
|||
)
|
||||
__salt__["vsphere.disconnect"](si)
|
||||
except CommandExecutionError as exc:
|
||||
log.error("Error: {}".format(exc))
|
||||
log.error("Error: %s", exc)
|
||||
if si:
|
||||
__salt__["vsphere.disconnect"](si)
|
||||
if not __opts__["test"]:
|
||||
|
@ -509,8 +509,7 @@ def default_storage_policy_assigned(name, policy, datastore):
|
|||
Name of datastore
|
||||
"""
|
||||
log.info(
|
||||
"Running state {} for policy '{}', datastore '{}'."
|
||||
"".format(name, policy, datastore)
|
||||
"Running state %s for policy '%s', datastore '%s'.", name, policy, datastore
|
||||
)
|
||||
changes = {}
|
||||
changes_required = False
|
||||
|
@ -536,19 +535,19 @@ def default_storage_policy_assigned(name, policy, datastore):
|
|||
}
|
||||
}
|
||||
if __opts__["test"]:
|
||||
comment = (
|
||||
"State {} will assign storage policy '{}' to datastore '{}'."
|
||||
).format(name, policy, datastore)
|
||||
comment = "State {} will assign storage policy '{}' to datastore '{}'.".format(
|
||||
name, policy, datastore
|
||||
)
|
||||
else:
|
||||
__salt__["vsphere.assign_default_storage_policy_to_datastore"](
|
||||
policy=policy, datastore=datastore, service_instance=si
|
||||
)
|
||||
comment = ("Storage policy '{} was assigned to datastore '{}'.").format(
|
||||
comment = "Storage policy '{} was assigned to datastore '{}'.".format(
|
||||
policy, name
|
||||
)
|
||||
log.info(comment)
|
||||
except CommandExecutionError as exc:
|
||||
log.error("Error: {}".format(exc))
|
||||
log.error("Error: %s", exc)
|
||||
if si:
|
||||
__salt__["vsphere.disconnect"](si)
|
||||
ret.update(
|
||||
|
|
|
@ -58,9 +58,9 @@ def _get_missing_results(results, dest_dir):
|
|||
present = set(os.listdir(dest_dir))
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.ENOENT:
|
||||
log.debug("pkgbuild.built: dest_dir '{}' does not exist".format(dest_dir))
|
||||
log.debug("pkgbuild.built: dest_dir '%s' does not exist", dest_dir)
|
||||
elif exc.errno == errno.EACCES:
|
||||
log.error("pkgbuilt.built: cannot access dest_dir '{}'".format(dest_dir))
|
||||
log.error("pkgbuilt.built: cannot access dest_dir '%s'", dest_dir)
|
||||
present = set()
|
||||
return sorted(set(results).difference(present))
|
||||
|
||||
|
|
|
@ -483,9 +483,8 @@ def present(
|
|||
if algo == "1":
|
||||
log.warning("Using MD5 for hashing passwords is considered insecure!")
|
||||
log.debug(
|
||||
"Re-using existing shadow salt for hashing password using {}".format(
|
||||
algorithms.get(algo)
|
||||
)
|
||||
"Re-using existing shadow salt for hashing password using %s",
|
||||
algorithms.get(algo),
|
||||
)
|
||||
password = __salt__["shadow.gen_password"](
|
||||
password, crypt_salt=shadow_salt, algorithm=algorithms.get(algo)
|
||||
|
@ -553,8 +552,7 @@ def present(
|
|||
if groups and optional_groups:
|
||||
for isected in set(groups).intersection(optional_groups):
|
||||
log.warning(
|
||||
'Group "%s" specified in both groups and optional_groups '
|
||||
"for user %s",
|
||||
'Group "%s" specified in both groups and optional_groups for user %s',
|
||||
isected,
|
||||
name,
|
||||
)
|
||||
|
|
|
@ -204,12 +204,12 @@ class AESReqServerMixin:
|
|||
# we reject new minions, minions that are already
|
||||
# connected must be allowed for the mine, highstate, etc.
|
||||
if load["id"] not in minions:
|
||||
msg = (
|
||||
"Too many minions connected (max_minions={}). "
|
||||
"Rejecting connection from id "
|
||||
"{}".format(self.opts["max_minions"], load["id"])
|
||||
log.info(
|
||||
"Too many minions connected (max_minions=%s). "
|
||||
"Rejecting connection from id %s",
|
||||
self.opts["max_minions"],
|
||||
load["id"],
|
||||
)
|
||||
log.info(msg)
|
||||
eload = {
|
||||
"result": False,
|
||||
"act": "full",
|
||||
|
@ -242,7 +242,7 @@ class AESReqServerMixin:
|
|||
elif os.path.isfile(pubfn_rejected):
|
||||
# The key has been rejected, don't place it in pending
|
||||
log.info(
|
||||
"Public key rejected for %s. Key is present in " "rejection key dir.",
|
||||
"Public key rejected for %s. Key is present in rejection key dir.",
|
||||
load["id"],
|
||||
)
|
||||
eload = {"result": False, "id": load["id"], "pub": load["pub"]}
|
||||
|
@ -328,7 +328,7 @@ class AESReqServerMixin:
|
|||
except OSError:
|
||||
pass
|
||||
log.info(
|
||||
"Pending public key for %s rejected via " "autoreject_file",
|
||||
"Pending public key for %s rejected via autoreject_file",
|
||||
load["id"],
|
||||
)
|
||||
ret = {"enc": "clear", "load": {"ret": False}}
|
||||
|
@ -437,7 +437,7 @@ class AESReqServerMixin:
|
|||
with salt.utils.files.fopen(pubfn, "w+") as fp_:
|
||||
fp_.write(load["pub"])
|
||||
elif not load["pub"]:
|
||||
log.error("Public key is empty: {}".format(load["id"]))
|
||||
log.error("Public key is empty: %s", load["id"])
|
||||
return {"enc": "clear", "load": {"ret": False}}
|
||||
|
||||
pub = None
|
||||
|
|
|
@ -1083,9 +1083,10 @@ class SerializerExtension(Extension):
|
|||
|
||||
def _profile_end(self, label, source, previous_time):
|
||||
log.profile(
|
||||
"Time (in seconds) to render {} '{}': {}".format(
|
||||
source, label, time.time() - previous_time
|
||||
)
|
||||
"Time (in seconds) to render %s '%s': %s",
|
||||
source,
|
||||
label,
|
||||
time.time() - previous_time,
|
||||
)
|
||||
|
||||
def _parse_profile_block(self, parser, label, source, body, lineno):
|
||||
|
|
|
@ -45,7 +45,8 @@ def store_job(opts, load, event=None, mminion=None):
|
|||
raise KeyError(emsg)
|
||||
except Exception: # pylint: disable=broad-except
|
||||
log.critical(
|
||||
"The specified '{}' returner threw a stack trace:\n".format(job_cache),
|
||||
"The specified '%s' returner threw a stack trace:\n",
|
||||
job_cache,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
@ -59,7 +60,8 @@ def store_job(opts, load, event=None, mminion=None):
|
|||
raise KeyError(emsg)
|
||||
except Exception: # pylint: disable=broad-except
|
||||
log.critical(
|
||||
"The specified '{}' returner threw a stack trace:\n".format(job_cache),
|
||||
"The specified '%s' returner threw a stack trace",
|
||||
job_cache,
|
||||
exc_info=True,
|
||||
)
|
||||
elif salt.utils.jid.is_jid(load["jid"]):
|
||||
|
@ -73,7 +75,8 @@ def store_job(opts, load, event=None, mminion=None):
|
|||
raise KeyError(emsg)
|
||||
except Exception: # pylint: disable=broad-except
|
||||
log.critical(
|
||||
"The specified '{}' returner threw a stack trace:\n".format(job_cache),
|
||||
"The specified '%s' returner threw a stack trace",
|
||||
job_cache,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
@ -128,7 +131,8 @@ def store_job(opts, load, event=None, mminion=None):
|
|||
log.error("Load does not contain 'jid': %s", e)
|
||||
except Exception: # pylint: disable=broad-except
|
||||
log.critical(
|
||||
"The specified '{}' returner threw a stack trace:\n".format(job_cache),
|
||||
"The specified '%s' returner threw a stack trace",
|
||||
job_cache,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
@ -136,8 +140,7 @@ def store_job(opts, load, event=None, mminion=None):
|
|||
mminion.returners[fstr](load)
|
||||
except Exception: # pylint: disable=broad-except
|
||||
log.critical(
|
||||
"The specified '{}' returner threw a stack trace:\n".format(job_cache),
|
||||
exc_info=True,
|
||||
"The specified '%s' returner threw a stack trace", job_cache, exc_info=True,
|
||||
)
|
||||
|
||||
if opts.get("job_cache_store_endtime") and updateetfstr in mminion.returners:
|
||||
|
|
|
@ -328,16 +328,16 @@ def _read_plist_file(root, file_name):
|
|||
:return: An empty dictionary if the plist file was invalid, otherwise, a dictionary with plist data
|
||||
"""
|
||||
file_path = os.path.join(root, file_name)
|
||||
log.debug("read_plist: Gathering service info for {}".format(file_path))
|
||||
log.debug("read_plist: Gathering service info for %s", file_path)
|
||||
|
||||
# Must be a plist file
|
||||
if not file_path.lower().endswith(".plist"):
|
||||
log.debug("read_plist: Not a plist file: {}".format(file_path))
|
||||
log.debug("read_plist: Not a plist file: %s", file_path)
|
||||
return {}
|
||||
|
||||
# ignore broken symlinks
|
||||
if not os.path.exists(os.path.realpath(file_path)):
|
||||
log.warning("read_plist: Ignoring broken symlink: {}".format(file_path))
|
||||
log.warning("read_plist: Ignoring broken symlink: %s", file_path)
|
||||
return {}
|
||||
|
||||
try:
|
||||
|
@ -348,9 +348,8 @@ def _read_plist_file(root, file_name):
|
|||
# Raised in python3 if the file is not XML.
|
||||
# There's nothing we can do; move on to the next one.
|
||||
log.warning(
|
||||
'read_plist: Unable to parse "{}" as it is invalid XML: InvalidFileException.'.format(
|
||||
file_path
|
||||
)
|
||||
'read_plist: Unable to parse "%s" as it is invalid XML: InvalidFileException.',
|
||||
file_path,
|
||||
)
|
||||
return {}
|
||||
|
||||
|
@ -358,27 +357,22 @@ def _read_plist_file(root, file_name):
|
|||
# fixes https://github.com/saltstack/salt/issues/58143
|
||||
# choosing not to log a Warning as this would happen on BigSur+ machines.
|
||||
log.debug(
|
||||
"Caught ValueError: '{}', while trying to parse '{}'.".format(
|
||||
err, file_path
|
||||
)
|
||||
"Caught ValueError: '%s', while trying to parse '%s'.", err, file_path
|
||||
)
|
||||
return {}
|
||||
|
||||
except xml.parsers.expat.ExpatError:
|
||||
# Raised by py3 if the file is XML, but with errors.
|
||||
log.warning(
|
||||
'read_plist: Unable to parse "{}" as it is invalid XML: xml.parsers.expat.ExpatError.'.format(
|
||||
file_path
|
||||
)
|
||||
'read_plist: Unable to parse "%s" as it is invalid XML: xml.parsers.expat.ExpatError.',
|
||||
file_path,
|
||||
)
|
||||
return {}
|
||||
|
||||
if "Label" not in plist:
|
||||
# not all launchd plists contain a Label key
|
||||
log.debug(
|
||||
"read_plist: Service does not contain a Label key. Skipping {}.".format(
|
||||
file_path
|
||||
)
|
||||
"read_plist: Service does not contain a Label key. Skipping %s.", file_path
|
||||
)
|
||||
return {}
|
||||
|
||||
|
|
|
@ -2232,16 +2232,11 @@ def parse_host_port(host_port):
|
|||
try:
|
||||
port = int(port)
|
||||
except ValueError as _e_:
|
||||
log.error(
|
||||
'host_port "{}" port value "{}" is not an integer.'.format(
|
||||
host_port, port
|
||||
)
|
||||
)
|
||||
raise ValueError(
|
||||
'host_port "{}" port value "{}" is not an integer.'.format(
|
||||
host_port, port
|
||||
)
|
||||
errmsg = 'host_port "{}" port value "{}" is not an integer.'.format(
|
||||
host_port, port
|
||||
)
|
||||
log.error(errmsg)
|
||||
raise ValueError(errmsg)
|
||||
else:
|
||||
host = _s_
|
||||
try:
|
||||
|
|
|
@ -228,9 +228,7 @@ def claim_mantle_of_responsibility(file_name):
|
|||
# all OSs supported by salt has psutil
|
||||
if not HAS_PSUTIL:
|
||||
log.critical(
|
||||
"Assuming no other Process has this responsibility! pidfile: {}".format(
|
||||
file_name
|
||||
)
|
||||
"Assuming no other Process has this responsibility! pidfile: %s", file_name
|
||||
)
|
||||
return True
|
||||
|
||||
|
@ -245,9 +243,9 @@ def claim_mantle_of_responsibility(file_name):
|
|||
with salt.utils.files.fopen(file_name, "r") as file:
|
||||
file_process_info = json.load(file)
|
||||
except json.decoder.JSONDecodeError:
|
||||
log.error("pidfile: {} is corrupted".format(file_name))
|
||||
log.error("pidfile: %s is corrupted", file_name)
|
||||
except FileNotFoundError:
|
||||
log.info("pidfile: {} not found".format(file_name))
|
||||
log.info("pidfile: %s not found", file_name)
|
||||
|
||||
this_process_info = get_process_info()
|
||||
|
||||
|
@ -282,9 +280,7 @@ def check_mantle_of_responsibility(file_name):
|
|||
# all OSs supported by salt has psutil
|
||||
if not HAS_PSUTIL:
|
||||
log.critical(
|
||||
"Assuming no other Process has this responsibility! pidfile: {}".format(
|
||||
file_name
|
||||
)
|
||||
"Assuming no other Process has this responsibility! pidfile: %s", file_name
|
||||
)
|
||||
return
|
||||
|
||||
|
@ -293,10 +289,10 @@ def check_mantle_of_responsibility(file_name):
|
|||
with salt.utils.files.fopen(file_name, "r") as file:
|
||||
file_process_info = json.load(file)
|
||||
except json.decoder.JSONDecodeError:
|
||||
log.error("pidfile: {} is corrupted".format(file_name))
|
||||
log.error("pidfile: %s is corrupted", file_name)
|
||||
return
|
||||
except FileNotFoundError:
|
||||
log.info("pidfile: {} not found".format(file_name))
|
||||
log.info("pidfile: %s not found", file_name)
|
||||
return
|
||||
|
||||
if not isinstance(file_process_info, dict) or not isinstance(
|
||||
|
|
|
@ -287,9 +287,7 @@ def get_tops_python(py_ver, exclude=None, ext_py_ver=None):
|
|||
continue
|
||||
|
||||
if not salt.utils.path.which(py_ver):
|
||||
log.error(
|
||||
"{} does not exist. Could not auto detect dependencies".format(py_ver)
|
||||
)
|
||||
log.error("%s does not exist. Could not auto detect dependencies", py_ver)
|
||||
return {}
|
||||
py_shell_cmd = [py_ver, "-c", "import {0}; print({0}.__file__)".format(mod)]
|
||||
cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE)
|
||||
|
@ -298,9 +296,9 @@ def get_tops_python(py_ver, exclude=None, ext_py_ver=None):
|
|||
|
||||
if not stdout or not os.path.exists(mod_file):
|
||||
log.error(
|
||||
"Could not auto detect file location for module {} for python version {}".format(
|
||||
mod, py_ver
|
||||
)
|
||||
"Could not auto detect file location for module %s for python version %s",
|
||||
mod,
|
||||
py_ver,
|
||||
)
|
||||
continue
|
||||
|
||||
|
@ -453,16 +451,16 @@ def get_tops(extra_mods="", so_mods=""):
|
|||
else:
|
||||
tops.append(os.path.join(moddir, base + ".py"))
|
||||
except ImportError as err:
|
||||
log.exception(err)
|
||||
log.error('Unable to import extra-module "%s"', mod)
|
||||
log.error(
|
||||
'Unable to import extra-module "%s": %s', mod, err, exc_info=True
|
||||
)
|
||||
|
||||
for mod in [m for m in so_mods.split(",") if m]:
|
||||
try:
|
||||
locals()[mod] = __import__(mod)
|
||||
tops.append(locals()[mod].__file__)
|
||||
except ImportError as err:
|
||||
log.exception(err)
|
||||
log.error('Unable to import so-module "%s"', mod)
|
||||
log.error('Unable to import so-module "%s"', mod, exc_info=True)
|
||||
|
||||
return tops
|
||||
|
||||
|
|
|
@ -286,11 +286,11 @@ def get_cache():
|
|||
|
||||
# Determine if ttl still valid
|
||||
if ttl10 < cur_time:
|
||||
log.debug("Cached token has expired {} < {}: DELETING".format(ttl10, cur_time))
|
||||
log.debug("Cached token has expired %s < %s: DELETING", ttl10, cur_time)
|
||||
del_cache()
|
||||
return _gen_new_connection()
|
||||
else:
|
||||
log.debug("Token has not expired {} > {}".format(ttl10, cur_time))
|
||||
log.debug("Token has not expired %s > %s", ttl10, cur_time)
|
||||
return connection
|
||||
|
||||
|
||||
|
@ -364,7 +364,7 @@ def make_request(
|
|||
log.debug("Deleting token from memory")
|
||||
del __context__["vault_token"]
|
||||
else:
|
||||
log.debug("Token has {} uses left".format(connection["uses"]))
|
||||
log.debug("Token has %s uses left", connection["uses"])
|
||||
write_cache(connection)
|
||||
|
||||
if get_token_url:
|
||||
|
|
|
@ -192,14 +192,14 @@ def _move(src, dst):
|
|||
|
||||
|
||||
def _run_command(args):
|
||||
log.info("Running command: {}".format(args))
|
||||
log.info("Running command: %s", args)
|
||||
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdout, stderr = proc.communicate()
|
||||
if stdout:
|
||||
log.debug("Command output: \n{}".format(stdout))
|
||||
log.debug("Command output: \n%s", stdout)
|
||||
if stderr:
|
||||
log.error(stderr)
|
||||
log.info("Return code: {}".format(proc.returncode))
|
||||
log.info("Return code: %s", proc.returncode)
|
||||
return stdout, stderr, proc.returncode
|
||||
|
||||
|
||||
|
@ -212,7 +212,7 @@ def _make_sdist(opts, python_bin="python"):
|
|||
glob.iglob(os.path.join(opts.source_dir, "dist", "salt-*.tar.gz")),
|
||||
key=os.path.getctime,
|
||||
)
|
||||
log.info("sdist is located at {}".format(sdist_path))
|
||||
log.info("sdist is located at %s", sdist_path)
|
||||
return sdist_path
|
||||
else:
|
||||
_abort("Failed to create sdist")
|
||||
|
@ -239,7 +239,7 @@ def build_centos(opts):
|
|||
except OSError as exc:
|
||||
_abort("{}".format(exc))
|
||||
|
||||
log.info("major_release: {}".format(major_release))
|
||||
log.info("major_release: %s", major_release)
|
||||
|
||||
define_opts = ["--define", "_topdir {}".format(os.path.join(opts.build_dir))]
|
||||
build_reqs = ["rpm-build"]
|
||||
|
@ -280,8 +280,8 @@ def build_centos(opts):
|
|||
salt_pkgver = ".".join((base, offset, oid))
|
||||
salt_srcver = "-".join((base, offset, oid))
|
||||
|
||||
log.info("salt_pkgver: {}".format(salt_pkgver))
|
||||
log.info("salt_srcver: {}".format(salt_srcver))
|
||||
log.info("salt_pkgver: %s", salt_pkgver)
|
||||
log.info("salt_srcver: %s", salt_srcver)
|
||||
|
||||
# Setup build environment
|
||||
for build_dir in "BUILD BUILDROOT RPMS SOURCES SPECS SRPMS".split():
|
||||
|
@ -377,9 +377,7 @@ if __name__ == "__main__":
|
|||
level=LOG_LEVELS[opts.log_level],
|
||||
)
|
||||
if opts.log_level not in LOG_LEVELS:
|
||||
log.error(
|
||||
"Invalid log level '{}', falling back to 'warning'".format(opts.log_level)
|
||||
)
|
||||
log.error("Invalid log level '%s', falling back to 'warning'", opts.log_level)
|
||||
|
||||
# Build for the specified platform
|
||||
if not opts.platform:
|
||||
|
@ -394,5 +392,5 @@ if __name__ == "__main__":
|
|||
print(msg) # pylint: disable=C0325
|
||||
for artifact in artifacts:
|
||||
shutil.copy(artifact, opts.artifact_dir)
|
||||
log.info("Copied {} to artifact directory".format(artifact))
|
||||
log.info("Copied %s to artifact directory", artifact)
|
||||
log.info("Done!")
|
||||
|
|
|
@ -61,7 +61,7 @@ class CloudTest(ShellCase):
|
|||
if not query:
|
||||
query = self.query_instances()
|
||||
|
||||
log.debug('Checking for "{}" in {}'.format(instance_name, query))
|
||||
log.debug('Checking for "%s" in %s', instance_name, query)
|
||||
if isinstance(query, set):
|
||||
return instance_name in query
|
||||
return any(instance_name == q.strip(": ") for q in query)
|
||||
|
@ -89,9 +89,9 @@ class CloudTest(ShellCase):
|
|||
for tries in range(self.__RE_TRIES):
|
||||
if self._instance_exists(instance_name, query):
|
||||
log.debug(
|
||||
'Instance "{}" reported after {} seconds'.format(
|
||||
instance_name, tries * self.__RE_RUN_DELAY
|
||||
)
|
||||
'Instance "%s" reported after %s seconds',
|
||||
instance_name,
|
||||
tries * self.__RE_RUN_DELAY,
|
||||
)
|
||||
break
|
||||
else:
|
||||
|
@ -106,14 +106,14 @@ class CloudTest(ShellCase):
|
|||
),
|
||||
)
|
||||
|
||||
log.debug('Instance exists and was created: "{}"'.format(instance_name))
|
||||
log.debug('Instance exists and was created: "%s"', instance_name)
|
||||
|
||||
def assertDestroyInstance(self, instance_name=None, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = TIMEOUT
|
||||
if not instance_name:
|
||||
instance_name = self.instance_name
|
||||
log.debug('Deleting instance "{}"'.format(instance_name))
|
||||
log.debug('Deleting instance "%s"', instance_name)
|
||||
delete_str = self.run_cloud(
|
||||
"-d {} --assume-yes --out=yaml".format(instance_name), timeout=timeout
|
||||
)
|
||||
|
@ -143,9 +143,10 @@ class CloudTest(ShellCase):
|
|||
if self._instance_exists(query=query):
|
||||
sleep(30)
|
||||
log.debug(
|
||||
'Instance "{}" still found in query after {} tries: {}'.format(
|
||||
instance_name, tries, query
|
||||
)
|
||||
'Instance "%s" still found in query after %s tries: %s',
|
||||
instance_name,
|
||||
tries,
|
||||
query,
|
||||
)
|
||||
query = self.query_instances()
|
||||
# The last query should have been successful
|
||||
|
@ -254,9 +255,7 @@ class CloudTest(ShellCase):
|
|||
):
|
||||
instances.add(q)
|
||||
log.debug(
|
||||
'Adding "{}" to the set of instances that needs to be deleted'.format(
|
||||
q
|
||||
)
|
||||
'Adding "%s" to the set of instances that needs to be deleted', q
|
||||
)
|
||||
return instances
|
||||
|
||||
|
@ -281,9 +280,10 @@ class CloudTest(ShellCase):
|
|||
)
|
||||
except AssertionError as e:
|
||||
log.error(
|
||||
'Failed to delete instance "{}". Tries: {}\n{}'.format(
|
||||
instance_name, tries, str(e)
|
||||
)
|
||||
'Failed to delete instance "%s". Tries: %s\n%s',
|
||||
instance_name,
|
||||
tries,
|
||||
str(e),
|
||||
)
|
||||
if not self._instance_exists():
|
||||
destroyed = True
|
||||
|
@ -323,9 +323,7 @@ class CloudTest(ShellCase):
|
|||
success = False
|
||||
fail_messages.append(alt_destroy_message)
|
||||
log.error(
|
||||
'Failed to destroy instance "{}": {}'.format(
|
||||
instance, alt_destroy_message
|
||||
)
|
||||
'Failed to destroy instance "%s": %s', instance, alt_destroy_message
|
||||
)
|
||||
self.assertTrue(success, "\n".join(fail_messages))
|
||||
self.assertFalse(
|
||||
|
|
|
@ -533,12 +533,10 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
|
|||
# Exit loop if a versionlock package installed correctly
|
||||
try:
|
||||
self.assertSaltTrueReturn(ret)
|
||||
log.debug(
|
||||
"Installed versionlock package: {}".format(versionlock_pkg)
|
||||
)
|
||||
log.debug("Installed versionlock package: %s", versionlock_pkg)
|
||||
break
|
||||
except AssertionError as e:
|
||||
log.debug("Versionlock package not found:\n{}".format(e))
|
||||
except AssertionError as exc:
|
||||
log.debug("Versionlock package not found:\n%s", exc)
|
||||
else:
|
||||
self.fail("Could not install versionlock package from {}".format(pkgs))
|
||||
|
||||
|
@ -654,12 +652,10 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
|
|||
# Exit loop if a versionlock package installed correctly
|
||||
try:
|
||||
self.assertSaltTrueReturn(ret)
|
||||
log.debug(
|
||||
"Installed versionlock package: {}".format(versionlock_pkg)
|
||||
)
|
||||
log.debug("Installed versionlock package: %s", versionlock_pkg)
|
||||
break
|
||||
except AssertionError as e:
|
||||
log.debug("Versionlock package not found:\n{}".format(e))
|
||||
except AssertionError as exc:
|
||||
log.debug("Versionlock package not found:\n%s", exc)
|
||||
else:
|
||||
self.fail("Could not install versionlock package from {}".format(pkgs))
|
||||
|
||||
|
|
|
@ -216,9 +216,10 @@ def _testrpm_signed(abs_path_named_rpm):
|
|||
CHECK_KEYID_OK = re.compile(test_string, re.M)
|
||||
retrc = CHECK_KEYID_OK.search(rpm_chk_sign.decode())
|
||||
log.debug(
|
||||
"signed checking, found test_string '{}' in rpm_chk_sign '{}', return code '{}'".format(
|
||||
test_string, rpm_chk_sign, retrc
|
||||
)
|
||||
"signed checking, found test_string '%s' in rpm_chk_sign '%s', return code '%s'",
|
||||
test_string,
|
||||
rpm_chk_sign,
|
||||
retrc,
|
||||
)
|
||||
if retrc:
|
||||
return True
|
||||
|
|
|
@ -691,7 +691,7 @@ def with_system_user(
|
|||
def wrap(cls):
|
||||
|
||||
# Let's add the user to the system.
|
||||
log.debug("Creating system user {!r}".format(username))
|
||||
log.debug("Creating system user %r", username)
|
||||
kwargs = {"timeout": 60, "groups": groups}
|
||||
if salt.utils.platform.is_windows():
|
||||
kwargs.update({"password": password})
|
||||
|
@ -703,7 +703,7 @@ def with_system_user(
|
|||
cls.skipTest("Failed to create system user {!r}".format(username))
|
||||
|
||||
if on_existing == "delete":
|
||||
log.debug("Deleting the system user {!r}".format(username))
|
||||
log.debug("Deleting the system user %r", username)
|
||||
delete_user = cls.run_function(
|
||||
"user.delete", [username, True, True]
|
||||
)
|
||||
|
@ -714,7 +714,7 @@ def with_system_user(
|
|||
username
|
||||
)
|
||||
)
|
||||
log.debug("Second time creating system user {!r}".format(username))
|
||||
log.debug("Second time creating system user %r", username)
|
||||
create_user = cls.run_function("user.add", [username], **kwargs)
|
||||
if not create_user:
|
||||
cls.skipTest(
|
||||
|
@ -739,8 +739,7 @@ def with_system_user(
|
|||
return func(cls, username)
|
||||
except Exception as exc: # pylint: disable=W0703
|
||||
log.error(
|
||||
"Running {!r} raised an exception: {}".format(func, exc),
|
||||
exc_info=True,
|
||||
"Running %r raised an exception: %s", func, exc, exc_info=True,
|
||||
)
|
||||
# Store the original exception details which will be raised
|
||||
# a little further down the code
|
||||
|
@ -754,13 +753,15 @@ def with_system_user(
|
|||
if failure is None:
|
||||
log.warning(
|
||||
"Although the actual test-case did not fail, "
|
||||
"deleting the created system user {!r} "
|
||||
"afterwards did.".format(username)
|
||||
"deleting the created system user %r "
|
||||
"afterwards did.",
|
||||
username,
|
||||
)
|
||||
else:
|
||||
log.warning(
|
||||
"The test-case failed and also did the removal"
|
||||
" of the system user {!r}".format(username)
|
||||
" of the system user %r",
|
||||
username,
|
||||
)
|
||||
if failure is not None:
|
||||
# If an exception was thrown, raise it
|
||||
|
@ -800,7 +801,7 @@ def with_system_group(group, on_existing="delete", delete=True):
|
|||
def wrap(cls):
|
||||
|
||||
# Let's add the user to the system.
|
||||
log.debug("Creating system group {!r}".format(group))
|
||||
log.debug("Creating system group %r", group)
|
||||
create_group = cls.run_function("group.add", [group])
|
||||
if not create_group:
|
||||
log.debug("Failed to create system group")
|
||||
|
@ -809,14 +810,14 @@ def with_system_group(group, on_existing="delete", delete=True):
|
|||
cls.skipTest("Failed to create system group {!r}".format(group))
|
||||
|
||||
if on_existing == "delete":
|
||||
log.debug("Deleting the system group {!r}".format(group))
|
||||
log.debug("Deleting the system group %r", group)
|
||||
delete_group = cls.run_function("group.delete", [group])
|
||||
if not delete_group:
|
||||
cls.skipTest(
|
||||
"A group named {!r} already existed on the "
|
||||
"system and re-creating it was not possible".format(group)
|
||||
)
|
||||
log.debug("Second time creating system group {!r}".format(group))
|
||||
log.debug("Second time creating system group %r", group)
|
||||
create_group = cls.run_function("group.add", [group])
|
||||
if not create_group:
|
||||
cls.skipTest(
|
||||
|
@ -832,8 +833,7 @@ def with_system_group(group, on_existing="delete", delete=True):
|
|||
return func(cls, group)
|
||||
except Exception as exc: # pylint: disable=W0703
|
||||
log.error(
|
||||
"Running {!r} raised an exception: {}".format(func, exc),
|
||||
exc_info=True,
|
||||
"Running %r raised an exception: %s", func, exc, exc_info=True,
|
||||
)
|
||||
# Store the original exception details which will be raised
|
||||
# a little further down the code
|
||||
|
@ -845,13 +845,15 @@ def with_system_group(group, on_existing="delete", delete=True):
|
|||
if failure is None:
|
||||
log.warning(
|
||||
"Although the actual test-case did not fail, "
|
||||
"deleting the created system group {!r} "
|
||||
"afterwards did.".format(group)
|
||||
"deleting the created system group %r "
|
||||
"afterwards did.",
|
||||
group,
|
||||
)
|
||||
else:
|
||||
log.warning(
|
||||
"The test-case failed and also did the removal"
|
||||
" of the system group {!r}".format(group)
|
||||
" of the system group %r",
|
||||
group,
|
||||
)
|
||||
if failure is not None:
|
||||
# If an exception was thrown, raise it
|
||||
|
@ -894,9 +896,9 @@ def with_system_user_and_group(username, group, on_existing="delete", delete=Tru
|
|||
def wrap(cls):
|
||||
|
||||
# Let's add the user to the system.
|
||||
log.debug("Creating system user {!r}".format(username))
|
||||
log.debug("Creating system user %r", username)
|
||||
create_user = cls.run_function("user.add", [username])
|
||||
log.debug("Creating system group {!r}".format(group))
|
||||
log.debug("Creating system group %r", group)
|
||||
create_group = cls.run_function("group.add", [group])
|
||||
if not create_user:
|
||||
log.debug("Failed to create system user")
|
||||
|
@ -905,7 +907,7 @@ def with_system_user_and_group(username, group, on_existing="delete", delete=Tru
|
|||
cls.skipTest("Failed to create system user {!r}".format(username))
|
||||
|
||||
if on_existing == "delete":
|
||||
log.debug("Deleting the system user {!r}".format(username))
|
||||
log.debug("Deleting the system user %r", username)
|
||||
delete_user = cls.run_function(
|
||||
"user.delete", [username, True, True]
|
||||
)
|
||||
|
@ -916,7 +918,7 @@ def with_system_user_and_group(username, group, on_existing="delete", delete=Tru
|
|||
username
|
||||
)
|
||||
)
|
||||
log.debug("Second time creating system user {!r}".format(username))
|
||||
log.debug("Second time creating system user %r", username)
|
||||
create_user = cls.run_function("user.add", [username])
|
||||
if not create_user:
|
||||
cls.skipTest(
|
||||
|
@ -932,14 +934,14 @@ def with_system_user_and_group(username, group, on_existing="delete", delete=Tru
|
|||
cls.skipTest("Failed to create system group {!r}".format(group))
|
||||
|
||||
if on_existing == "delete":
|
||||
log.debug("Deleting the system group {!r}".format(group))
|
||||
log.debug("Deleting the system group %r", group)
|
||||
delete_group = cls.run_function("group.delete", [group])
|
||||
if not delete_group:
|
||||
cls.skipTest(
|
||||
"A group named {!r} already existed on the "
|
||||
"system and re-creating it was not possible".format(group)
|
||||
)
|
||||
log.debug("Second time creating system group {!r}".format(group))
|
||||
log.debug("Second time creating system group %r", group)
|
||||
create_group = cls.run_function("group.add", [group])
|
||||
if not create_group:
|
||||
cls.skipTest(
|
||||
|
@ -955,8 +957,7 @@ def with_system_user_and_group(username, group, on_existing="delete", delete=Tru
|
|||
return func(cls, username, group)
|
||||
except Exception as exc: # pylint: disable=W0703
|
||||
log.error(
|
||||
"Running {!r} raised an exception: {}".format(func, exc),
|
||||
exc_info=True,
|
||||
"Running %r raised an exception: %s", func, exc, exc_info=True,
|
||||
)
|
||||
# Store the original exception details which will be raised
|
||||
# a little further down the code
|
||||
|
@ -971,25 +972,29 @@ def with_system_user_and_group(username, group, on_existing="delete", delete=Tru
|
|||
if failure is None:
|
||||
log.warning(
|
||||
"Although the actual test-case did not fail, "
|
||||
"deleting the created system user {!r} "
|
||||
"afterwards did.".format(username)
|
||||
"deleting the created system user %r "
|
||||
"afterwards did.",
|
||||
username,
|
||||
)
|
||||
else:
|
||||
log.warning(
|
||||
"The test-case failed and also did the removal"
|
||||
" of the system user {!r}".format(username)
|
||||
" of the system user %r",
|
||||
username,
|
||||
)
|
||||
if not delete_group:
|
||||
if failure is None:
|
||||
log.warning(
|
||||
"Although the actual test-case did not fail, "
|
||||
"deleting the created system group {!r} "
|
||||
"afterwards did.".format(group)
|
||||
"deleting the created system group %r "
|
||||
"afterwards did.",
|
||||
group,
|
||||
)
|
||||
else:
|
||||
log.warning(
|
||||
"The test-case failed and also did the removal"
|
||||
" of the system group {!r}".format(group)
|
||||
" of the system group %r",
|
||||
group,
|
||||
)
|
||||
if failure is not None:
|
||||
# If an exception was thrown, raise it
|
||||
|
@ -1308,7 +1313,6 @@ def repeat(caller=None, condition=True, times=5):
|
|||
|
||||
@functools.wraps(caller)
|
||||
def wrap(cls):
|
||||
result = None
|
||||
for attempt in range(1, times + 1):
|
||||
log.info("%s test run %d of %s times", cls, attempt, times)
|
||||
caller(cls)
|
||||
|
|
|
@ -494,7 +494,7 @@ class SaltReturnAssertsMixin:
|
|||
for saltret in self.__getWithinSaltReturn(ret, "result"):
|
||||
self.assertTrue(saltret)
|
||||
except AssertionError:
|
||||
log.info("Salt Full Return:\n{}".format(pprint.pformat(ret)))
|
||||
log.info("Salt Full Return:\n%s", pprint.pformat(ret))
|
||||
try:
|
||||
raise AssertionError(
|
||||
"{result} is not True. Salt Comment:\n{comment}".format(
|
||||
|
@ -513,7 +513,7 @@ class SaltReturnAssertsMixin:
|
|||
for saltret in self.__getWithinSaltReturn(ret, "result"):
|
||||
self.assertFalse(saltret)
|
||||
except AssertionError:
|
||||
log.info("Salt Full Return:\n{}".format(pprint.pformat(ret)))
|
||||
log.info("Salt Full Return:\n%s", pprint.pformat(ret))
|
||||
try:
|
||||
raise AssertionError(
|
||||
"{result} is not False. Salt Comment:\n{comment}".format(
|
||||
|
@ -530,7 +530,7 @@ class SaltReturnAssertsMixin:
|
|||
for saltret in self.__getWithinSaltReturn(ret, "result"):
|
||||
self.assertIsNone(saltret)
|
||||
except AssertionError:
|
||||
log.info("Salt Full Return:\n{}".format(pprint.pformat(ret)))
|
||||
log.info("Salt Full Return:\n%s", pprint.pformat(ret))
|
||||
try:
|
||||
raise AssertionError(
|
||||
"{result} is not None. Salt Comment:\n{comment}".format(
|
||||
|
|
|
@ -291,7 +291,7 @@ class StateReturnAsserts:
|
|||
for saltret in self.get_within_state_return("result"):
|
||||
assert saltret is True
|
||||
except AssertionError:
|
||||
log.info("Salt Full Return:\n{}".format(pprint.pformat(self.ret)))
|
||||
log.info("Salt Full Return:\n%s", pprint.pformat(self.ret))
|
||||
try:
|
||||
raise AssertionError(
|
||||
"{result} is not True. Salt Comment:\n{comment}".format(
|
||||
|
@ -310,7 +310,7 @@ class StateReturnAsserts:
|
|||
for saltret in self.get_within_state_return("result"):
|
||||
assert saltret is False
|
||||
except AssertionError:
|
||||
log.info("Salt Full Return:\n{}".format(pprint.pformat(self.ret)))
|
||||
log.info("Salt Full Return:\n%s", pprint.pformat(self.ret))
|
||||
try:
|
||||
raise AssertionError(
|
||||
"{result} is not False. Salt Comment:\n{comment}".format(
|
||||
|
@ -327,7 +327,7 @@ class StateReturnAsserts:
|
|||
for saltret in self.get_within_state_return("result"):
|
||||
assert saltret is None
|
||||
except AssertionError:
|
||||
log.info("Salt Full Return:\n{}".format(pprint.pformat(self.ret)))
|
||||
log.info("Salt Full Return:\n%s", pprint.pformat(self.ret))
|
||||
try:
|
||||
raise AssertionError(
|
||||
"{result} is not None. Salt Comment:\n{comment}".format(
|
||||
|
|
|
@ -94,7 +94,7 @@ class TelegramBotMsgBeaconTestCase(TestCase, LoaderModuleMockMixin):
|
|||
inst = MagicMock(name="telegram.Bot()")
|
||||
telegram_api.Bot = MagicMock(name="telegram", return_value=inst)
|
||||
|
||||
log.debug("telegram {}".format(telegram))
|
||||
log.debug("telegram %s", telegram)
|
||||
username = "different_user"
|
||||
user = telegram.user.User(id=1, first_name="", username=username)
|
||||
chat = telegram.chat.Chat(1, "private", username=username)
|
||||
|
|
|
@ -169,7 +169,7 @@ class WTMPBeaconTestCase(TestCase, LoaderModuleMockMixin):
|
|||
]
|
||||
|
||||
ret = wtmp.beacon(config)
|
||||
log.debug("{}".format(ret))
|
||||
log.debug("wtmp beacon: %s", ret)
|
||||
self.assertEqual(ret, _expected)
|
||||
|
||||
@skipIf(not _TIME_SUPPORTED, "dateutil.parser is missing.")
|
||||
|
|
|
@ -1349,7 +1349,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
|
|||
if cmd == "/usr/bin/zonename":
|
||||
# NOTE: we return the name of the zone
|
||||
return "myzone"
|
||||
log.debug("cmd.run: '{}'".format(cmd))
|
||||
log.debug("cmd.run: '%s'", cmd)
|
||||
|
||||
def _cmd_all_side_effect(cmd):
|
||||
# NOTE: prtdiag doesn't work inside a zone
|
||||
|
@ -1361,7 +1361,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
|
|||
"stdout": "",
|
||||
"stderr": "prtdiag can only be run in the global zone",
|
||||
}
|
||||
log.debug("cmd.run_all: '{}'".format(cmd))
|
||||
log.debug("cmd.run_all: '%s'", cmd)
|
||||
|
||||
def _which_side_effect(path):
|
||||
if path == "prtdiag":
|
||||
|
@ -1401,7 +1401,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
|
|||
"stdout": "",
|
||||
"stderr": "prtdiag can only be run in the global zone",
|
||||
}
|
||||
log.debug("cmd.run_all: '{}'".format(cmd))
|
||||
log.debug("cmd.run_all: '%s'", cmd)
|
||||
|
||||
def _which_side_effect(path):
|
||||
if path == "prtdiag":
|
||||
|
|
Loading…
Add table
Reference in a new issue