Upgrade to black 21.7b0

This commit is contained in:
Pedro Algarvio 2021-08-03 07:25:24 +01:00 committed by Gareth J. Greenaway
parent 142c2e7867
commit 6abb43d2df
501 changed files with 6800 additions and 3586 deletions

View file

@ -1134,7 +1134,7 @@ repos:
)$
- repo: https://github.com/psf/black
rev: 19.10b0
rev: 21.7b0
hooks:
- id: black
# This tells pre-commit not to pass files to black.
@ -1146,6 +1146,14 @@ repos:
tests/kitchen/.*
)$
- repo: https://github.com/asottile/blacken-docs
rev: v1.10.0
hooks:
- id: blacken-docs
args: [--skip-errors]
files: ^doc/.*\.rst
additional_dependencies: [black==21.7b0]
- repo: https://github.com/myint/rstcheck
# This, for now, is meant to run when locally committing code and will be disabled(skipped) when we run pre-commit
# against all codebase to avoid MASSIVE code churn. This way, we do it in smaller chunks, a few at a time.
@ -1155,14 +1163,6 @@ repos:
name: Check reST files using rstcheck
args: [--report=warning]
additional_dependencies: [sphinx]
- repo: https://github.com/asottile/blacken-docs
rev: v1.7.0
hooks:
- id: blacken-docs
args: [--skip-errors]
files: ^doc/.*\.rst
additional_dependencies: [black==19.10b0]
# <---- Code Formatting --------------------------------------------------------------------------------------------
# ----- Security -------------------------------------------------------------------------------------------------->

View file

@ -222,7 +222,7 @@ class State(Directive):
indextext = "{1} ({0}-formula)".format(formula, statename)
inode = addnodes.index(
entries=[("single", indextext, "module-{}".format(statename), ""),]
entries=[("single", indextext, "module-{}".format(statename), "")]
)
return [targetnode, inode]
@ -244,9 +244,7 @@ class SaltDomain(python_domain.PythonDomain):
data_version = 2
object_types = python_domain.PythonDomain.object_types
object_types.update(
{"state": ObjType(_("state"), "state"),}
)
object_types.update({"state": ObjType(_("state"), "state")})
directives = python_domain.PythonDomain.directives
directives.update(
@ -260,14 +258,10 @@ class SaltDomain(python_domain.PythonDomain):
)
roles = python_domain.PythonDomain.roles
roles.update(
{"formula": SLSXRefRole(),}
)
roles.update({"formula": SLSXRefRole()})
initial_data = python_domain.PythonDomain.initial_data
initial_data.update(
{"formulas": {},}
)
initial_data.update({"formulas": {}})
indices = [
SaltModuleIndex,

View file

@ -92,7 +92,7 @@ included libraries.
def is_ok(person):
""" Checks whether a person is really a lumberjack """
"""Checks whether a person is really a lumberjack"""
return sleep.all_night(person) and work.all_day(person)
Then, create the zip:

View file

@ -82,7 +82,7 @@ to install a package:
"""
return {
"include": ["python"],
"python-foo": {"pkg.installed": [{"version": "1.5-1.el7"},]},
"python-foo": {"pkg.installed": [{"version": "1.5-1.el7"}]},
}
This would be equivalent to the following:

View file

@ -306,7 +306,7 @@ general, the following code can be used as-is:
Return a list of the VMs that are on the provider, with select fields
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full("function"), __opts__["query.selection"], call,
list_nodes_full("function"), __opts__["query.selection"], call
)
However, depending on the cloud provider, additional variables may be required.
@ -324,7 +324,7 @@ appropriately:
conn = get_conn() # pylint: disable=E0602
return salt.utils.cloud.list_nodes_select(
list_nodes_full(conn, "function"), __opts__["query.selection"], call,
list_nodes_full(conn, "function"), __opts__["query.selection"], call
)
This function is normally called with the ``-S`` option:

View file

@ -666,7 +666,7 @@ example is a state tree of two sls files, one simple and one complicated.
return {
"set_roles_grains": {
"grains.present": [{"name": "roles"}, {"value": list(list_of_roles)},],
"grains.present": [{"name": "roles"}, {"value": list(list_of_roles)}],
},
}

View file

@ -232,7 +232,7 @@ easily done using the normal cross-calling syntax:
# do something!
__salt__["event.send"](
"myco/my_custom_module/finished",
{"finished": True, "message": "The something is finished!",},
{"finished": True, "message": "The something is finished!"},
)
From Custom Python Scripts

View file

@ -316,11 +316,11 @@ structure to include job meta data such as ``retcode``.
This is useful at the Python API:
.. code-block:: python
.. code-block:: pycon
>>> import salt.client
>>> client = salt.client.LocalClient()
>>> client.cmd('*', 'cmd.run', ['return 1'], full_return=True)
>>> client.cmd("*", "cmd.run", ["return 1"], full_return=True)
{'jerry': {'jid': '20170520151213898053', 'ret': '', 'retcode': 1}}
As well as from salt-api:

View file

@ -959,7 +959,7 @@ their code to use ``tgt_type``.
>>> import salt.client
>>> local = salt.client.LocalClient()
>>> local.cmd('*', 'cmd.run', ['whoami'], tgt_type='glob')
>>> local.cmd("*", "cmd.run", ["whoami"], tgt_type="glob")
{'jerry': 'root'}
Minion Configuration Deprecated Option

View file

@ -86,18 +86,22 @@ deeply-nested dict can be declared with curly braces:
Here is a more concrete example of how YAML actually handles these
indentations, using the Python interpreter on the command line:
.. code-block:: python
.. code-block:: pycon
>>> import yaml
>>> yaml.safe_load('''mystate:
>>> yaml.safe_load(
... """mystate:
... file.managed:
... - context:
... some: var''')
... some: var"""
... )
{'mystate': {'file.managed': [{'context': {'some': 'var'}}]}}
>>> yaml.safe_load('''mystate:
>>> yaml.safe_load(
... """mystate:
... file.managed:
... - context:
... some: var''')
... some: var"""
... )
{'mystate': {'file.managed': [{'some': 'var', 'context': None}]}}
Note that in the second example, ``some`` is added as another key in the same
@ -367,10 +371,10 @@ string, it should be surrounded by quotes. `More information here`_.
Here's an example:
.. code-block:: python
.. code-block:: pycon
>>> import yaml
>>> yaml.safe_load('2013_05_10')
>>> yaml.safe_load("2013_05_10")
20130510
>>> yaml.safe_load('"2013_05_10"')
'2013_05_10'
@ -385,10 +389,10 @@ functionality. If values such as these are needed in a salt YAML file
(specifically a configuration file), they should be formatted with surrounding
strings to force YAML to serialize them as strings:
.. code-block:: python
.. code-block:: pycon
>>> import yaml
>>> yaml.safe_load('2014-01-20 14:23:23')
>>> yaml.safe_load("2014-01-20 14:23:23")
datetime.datetime(2014, 1, 20, 14, 23, 23)
>>> yaml.safe_load('"2014-01-20 14:23:23"')
'2014-01-20 14:23:23'
@ -400,10 +404,10 @@ is a real one). Thus, for example, if a minion were to have an ID of
date was out of range. The workaround is the same, surround the offending
string with quotes:
.. code-block:: python
.. code-block:: pycon
>>> import yaml
>>> yaml.safe_load('4017-16-20')
>>> yaml.safe_load("4017-16-20")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/site-packages/yaml/__init__.py", line 93, in safe_load

View file

@ -181,9 +181,7 @@ password may be passed in as ``username`` and ``password``, respectively.
.. code-block:: python
salt.utils.http.query(
"http://example.com", username="larry", password="5700g3543v4r",
)
salt.utils.http.query("http://example.com", username="larry", password="5700g3543v4r")
Cookies and Sessions
````````````````````
@ -327,9 +325,7 @@ debugging purposes, SSL verification can be turned off.
.. code-block:: python
salt.utils.http.query(
"https://example.com", verify_ssl=False,
)
salt.utils.http.query("https://example.com", verify_ssl=False)
CA Bundles
~~~~~~~~~~
@ -343,9 +339,7 @@ using the ``ca_bundle`` variable.
.. code-block:: python
salt.utils.http.query(
"https://example.com", ca_bundle="/path/to/ca_bundle.pem",
)
salt.utils.http.query("https://example.com", ca_bundle="/path/to/ca_bundle.pem")
Updating CA Bundles
```````````````````

View file

@ -122,7 +122,7 @@ Note how the module encapsulates all of the logic around finding the storage ser
]
colo = __pillar__.get("inventory", {}).get("colo", "Unknown")
return __pillar__.get("storage_servers", {}).get(colo, ["unknown",])
return __pillar__.get("storage_servers", {}).get(colo, ["unknown"])
def ip():

View file

@ -8,7 +8,7 @@ from os.path import abspath, dirname, join
from shutil import copy
from subprocess import check_call
parser = argparse.ArgumentParser(description="Build salt rpms",)
parser = argparse.ArgumentParser(description="Build salt rpms")
parser.add_argument(
"buildid",
help="The build id to use i.e. the bit after the salt version in the package name",

View file

@ -66,7 +66,8 @@ def beacon(config):
# NOTE: lookup current images
current_vms = __salt__["vmadm.list"](
keyed=True, order="uuid,state,alias,hostname,dns_domain",
keyed=True,
order="uuid,state,alias,hostname,dns_domain",
)
# NOTE: apply configuration

View file

@ -19,7 +19,7 @@ except ImportError:
HAS_WATCHDOG = False
class FileSystemEventHandler:
""" A dummy class to make the import work """
"""A dummy class to make the import work"""
def __init__(self):
pass

View file

@ -92,8 +92,7 @@ def __virtual__():
def _init_client():
"""Setup client and init datastore.
"""
"""Setup client and init datastore."""
global client, path_prefix
if client is not None:
return

View file

@ -127,7 +127,8 @@ def _create_table():
# warning on CREATE TABLE
query = """SELECT COUNT(TABLE_NAME) FROM information_schema.tables
WHERE table_schema = '{}' AND table_name = '{}'""".format(
_mysql_kwargs["db"], _table_name,
_mysql_kwargs["db"],
_table_name,
)
cur, _ = run_query(client, query)
r = cur.fetchone()
@ -149,8 +150,7 @@ def _create_table():
def _init_client():
"""Initialize connection and create table if needed
"""
"""Initialize connection and create table if needed"""
if client is not None:
return

View file

@ -29,7 +29,9 @@ class SPM(parsers.SPMParser):
self.config["spm_cache_dir"],
]
verify_env(
v_dirs, self.config["user"], root_dir=self.config["root_dir"],
v_dirs,
self.config["user"],
root_dir=self.config["root_dir"],
)
verify_log(self.config)
client = salt.spm.SPMClient(ui, self.config)

View file

@ -409,7 +409,9 @@ class SyncClientMixin:
data["return"] = str(ex)
else:
data["return"] = "Exception occurred in {} {}: {}".format(
self.client, fun, traceback.format_exc(),
self.client,
fun,
traceback.format_exc(),
)
data["success"] = False

View file

@ -1016,7 +1016,8 @@ class Single:
Deploy salt-thin
"""
self.shell.send(
self.thin, os.path.join(self.thin_dir, "salt-thin.tgz"),
self.thin,
os.path.join(self.thin_dir, "salt-thin.tgz"),
)
self.deploy_ext()
return True
@ -1027,7 +1028,8 @@ class Single:
"""
if self.mods.get("file"):
self.shell.send(
self.mods["file"], os.path.join(self.thin_dir, "salt-ext_mods.tgz"),
self.mods["file"],
os.path.join(self.thin_dir, "salt-ext_mods.tgz"),
)
return True

View file

@ -272,7 +272,9 @@ class Shell:
"""
try:
proc = salt.utils.nb_popen.NonBlockingPopen(
self._split_cmd(cmd), stderr=subprocess.PIPE, stdout=subprocess.PIPE,
self._split_cmd(cmd),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
while True:
time.sleep(0.1)

View file

@ -47,7 +47,7 @@ ARGS = None
def get_system_encoding():
"""
Get system encoding. Most of this code is a part of salt/__init__.py
Get system encoding. Most of this code is a part of salt/__init__.py
"""
# This is the most trustworthy source of the system encoding, though, if
# salt is being imported after being daemonized, this information is lost
@ -215,7 +215,13 @@ def reset_time(path=".", amt=None):
fname = os.path.join(path, fname)
if os.path.isdir(fname):
reset_time(fname, amt=amt)
os.utime(fname, (amt, amt,))
os.utime(
fname,
(
amt,
amt,
),
)
def get_executable():

View file

@ -246,7 +246,7 @@ def prep_trans_tar(
fn = filename[
len(file_client.get_cachedir(cache_dest)) :
].strip("/")
tgt = os.path.join(env_root, short, fn,)
tgt = os.path.join(env_root, short, fn)
tgt_dir = os.path.dirname(tgt)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)

View file

@ -1975,7 +1975,7 @@ class Map(Cloud):
break
log.warning(
"%r already exists, removing from the create map.", name,
"%r already exists, removing from the create map.", name
)
if "existing" not in ret:

View file

@ -104,70 +104,56 @@ class SaltCloud(salt.utils.parsers.SaltCloudParser):
if self.selected_query_option is not None:
if self.selected_query_option == "list_providers":
# pylint: disable=broad-except
try:
ret = mapper.provider_list()
except (SaltCloudException, Exception,) as exc:
except Exception as exc: # pylint: disable=broad-except
msg = "There was an error listing providers: {0}"
self.handle_exception(msg, exc)
# pylint: enable=broad-except
elif self.selected_query_option == "list_profiles":
provider = self.options.list_profiles
# pylint: disable=broad-except
try:
ret = mapper.profile_list(provider)
except (SaltCloudException, Exception,) as exc:
except Exception as exc: # pylint: disable=broad-except
msg = "There was an error listing profiles: {0}"
self.handle_exception(msg, exc)
# pylint: enable=broad-except
elif self.config.get("map", None):
log.info("Applying map from '%s'.", self.config["map"])
# pylint: disable=broad-except
try:
ret = mapper.interpolated_map(query=self.selected_query_option)
except (SaltCloudException, Exception,) as exc:
except Exception as exc: # pylint: disable=broad-except
msg = "There was an error with a custom map: {0}"
self.handle_exception(msg, exc)
# pylint: enable=broad-except
else:
# pylint: disable=broad-except
try:
ret = mapper.map_providers_parallel(
query=self.selected_query_option
)
except (SaltCloudException, Exception,) as exc:
except Exception as exc: # pylint: disable=broad-except
msg = "There was an error with a map: {0}"
self.handle_exception(msg, exc)
# pylint: enable=broad-except
elif self.options.list_locations is not None:
# pylint: disable=broad-except
try:
ret = mapper.location_list(self.options.list_locations)
except (SaltCloudException, Exception,) as exc:
except Exception as exc: # pylint: disable=broad-except
msg = "There was an error listing locations: {0}"
self.handle_exception(msg, exc)
# pylint: enable=broad-except
elif self.options.list_images is not None:
# pylint: disable=broad-except
try:
ret = mapper.image_list(self.options.list_images)
except (SaltCloudException, Exception,) as exc:
except Exception as exc: # pylint: disable=broad-except
msg = "There was an error listing images: {0}"
self.handle_exception(msg, exc)
# pylint: enable=broad-except
elif self.options.list_sizes is not None:
# pylint: disable=broad-except
try:
ret = mapper.size_list(self.options.list_sizes)
except (SaltCloudException, Exception,) as exc:
except Exception as exc: # pylint: disable=broad-except
msg = "There was an error listing sizes: {0}"
self.handle_exception(msg, exc)
# pylint: enable=broad-except
elif self.options.destroy and (
self.config.get("names", None) or self.config.get("map", None)
@ -204,14 +190,12 @@ class SaltCloud(salt.utils.parsers.SaltCloudParser):
for name in vms:
msg += " {}\n".format(name)
names.add(name)
# pylint: disable=broad-except
try:
if self.print_confirm(msg):
ret = mapper.destroy(names, cached=True)
except (SaltCloudException, Exception,) as exc:
except Exception as exc: # pylint: disable=broad-except
msg = "There was an error destroying machines: {0}"
self.handle_exception(msg, exc)
# pylint: enable=broad-except
elif self.options.action and (
self.config.get("names", None) or self.config.get("map", None)
@ -242,14 +226,12 @@ class SaltCloud(salt.utils.parsers.SaltCloudParser):
machines.append(name)
names = machines
# pylint: disable=broad-except
try:
if self.print_confirm(msg):
ret = mapper.do_action(names, kwargs)
except (SaltCloudException, Exception,) as exc:
except Exception as exc: # pylint: disable=broad-except
msg = "There was an error actioning machines: {0}"
self.handle_exception(msg, exc)
# pylint: enable=broad-except
elif self.options.function:
kwargs = {}
@ -266,24 +248,20 @@ class SaltCloud(salt.utils.parsers.SaltCloudParser):
"as kwargs. Ex: image=ami-54cf5c3d. Remaining "
"arguments: {}".format(args)
)
# pylint: disable=broad-except
try:
ret = mapper.do_function(
self.function_provider, self.function_name, kwargs
)
except (SaltCloudException, Exception,) as exc:
except Exception as exc: # pylint: disable=broad-except
msg = "There was an error running the function: {0}"
self.handle_exception(msg, exc)
# pylint: enable=broad-except
elif self.options.profile and self.config.get("names", False):
# pylint: disable=broad-except
try:
ret = mapper.run_profile(self.options.profile, self.config.get("names"))
except (SaltCloudException, Exception,) as exc:
except Exception as exc: # pylint: disable=broad-except
msg = "There was a profile error: {0}"
self.handle_exception(msg, exc)
# pylint: enable=broad-except
elif self.options.set_password:
username = self.credential_username
@ -295,7 +273,6 @@ class SaltCloud(salt.utils.parsers.SaltCloudParser):
if not mapper.rendered_map:
sys.stderr.write("No nodes defined in this map")
self.exit(salt.defaults.exitcodes.EX_GENERIC)
# pylint: disable=broad-except
try:
ret = {}
run_map = True
@ -354,10 +331,9 @@ class SaltCloud(salt.utils.parsers.SaltCloudParser):
msg = "Already running."
ret[name] = {"Message": msg}
except (SaltCloudException, Exception,) as exc:
except Exception as exc: # pylint: disable=broad-except
msg = "There was a query error: {0}"
self.handle_exception(msg, exc)
# pylint: enable=broad-except
elif self.options.bootstrap:
host = self.options.bootstrap
@ -386,13 +362,11 @@ class SaltCloud(salt.utils.parsers.SaltCloudParser):
)
)
# pylint: disable=broad-except
try:
ret = salt.utils.cloud.bootstrap(vm_, self.config)
except (SaltCloudException, Exception,) as exc:
except Exception as exc: # pylint: disable=broad-except
msg = "There was an error bootstrapping the minion: {0}"
self.handle_exception(msg, exc)
# pylint: enable=broad-except
else:
self.error("Nothing was done. Using the proper arguments?")

View file

@ -364,7 +364,9 @@ def list_nodes_select(call=None):
Return a list of the VMs that are on the provider, with select fields
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full("function"), __opts__["query.selection"], call,
list_nodes_full("function"),
__opts__["query.selection"],
call,
)

View file

@ -222,7 +222,9 @@ def get_configured_provider():
for combo in key_combos:
provider = config.is_provider_configured(
__opts__, _get_active_provider_name() or __virtualname__, combo,
__opts__,
_get_active_provider_name() or __virtualname__,
combo,
)
if provider:
@ -352,12 +354,15 @@ def avail_images(call=None):
data = {}
try:
offers = compconn.virtual_machine_images.list_offers(
location=region, publisher_name=publisher,
location=region,
publisher_name=publisher,
)
for offer_obj in offers:
offer = offer_obj.as_dict()
skus = compconn.virtual_machine_images.list_skus(
location=region, publisher_name=publisher, offer=offer["name"],
location=region,
publisher_name=publisher,
offer=offer["name"],
)
for sku_obj in skus:
sku = sku_obj.as_dict()
@ -370,7 +375,12 @@ def avail_images(call=None):
for version_obj in results:
version = version_obj.as_dict()
name = "|".join(
(publisher, offer["name"], sku["name"], version["name"],)
(
publisher,
offer["name"],
sku["name"],
version["name"],
)
)
data[name] = {
"publisher": publisher,
@ -593,14 +603,16 @@ def delete_interface(call=None, kwargs=None): # pylint: disable=unused-argument
ips = []
iface = netconn.network_interfaces.get(
kwargs["resource_group"], kwargs["iface_name"],
kwargs["resource_group"],
kwargs["iface_name"],
)
iface_name = iface.name
for ip_ in iface.ip_configurations:
ips.append(ip_.name)
poller = netconn.network_interfaces.delete(
kwargs["resource_group"], kwargs["iface_name"],
kwargs["resource_group"],
kwargs["iface_name"],
)
poller.wait()
@ -735,7 +747,9 @@ def create_network_interface(call=None, kwargs=None):
for pool in be_pools:
try:
lbbep_data = netconn.load_balancer_backend_address_pools.get(
kwargs["resource_group"], load_bal, pool,
kwargs["resource_group"],
load_bal,
pool,
)
pool_ids.append({"id": lbbep_data.as_dict()["id"]})
except CloudError as exc:
@ -768,7 +782,8 @@ def create_network_interface(call=None, kwargs=None):
while True:
try:
pub_ip_data = netconn.public_ip_addresses.get(
kwargs["resource_group"], pub_ip_name,
kwargs["resource_group"],
pub_ip_name,
)
if pub_ip_data.ip_address: # pylint: disable=no-member
ip_kwargs["public_ip_address"] = PublicIPAddress(
@ -942,7 +957,9 @@ def request_instance(vm_):
key_data=ssh_publickeyfile_contents,
path="/home/{}/.ssh/authorized_keys".format(vm_username),
)
sshconfiguration = SshConfiguration(public_keys=[sshpublickey],)
sshconfiguration = SshConfiguration(
public_keys=[sshpublickey],
)
linuxconfiguration = LinuxConfiguration(
disable_password_authentication=disable_password_authentication,
ssh=sshconfiguration,
@ -1074,7 +1091,10 @@ def request_instance(vm_):
if "|" in vm_["image"]:
img_pub, img_off, img_sku, img_ver = vm_["image"].split("|")
img_ref = ImageReference(
publisher=img_pub, offer=img_off, sku=img_sku, version=img_ver,
publisher=img_pub,
offer=img_off,
sku=img_sku,
version=img_ver,
)
elif vm_["image"].startswith("/subscriptions"):
img_ref = ImageReference(id=vm_["image"])
@ -1088,7 +1108,9 @@ def request_instance(vm_):
name=disk_name,
vhd=VirtualHardDisk(
uri="https://{}.blob.{}/vhds/{}.vhd".format(
vm_["storage_account"], storage_endpoint_suffix, disk_name,
vm_["storage_account"],
storage_endpoint_suffix,
disk_name,
),
),
os_type=os_type,
@ -1105,7 +1127,10 @@ def request_instance(vm_):
if "|" in vm_["image"]:
img_pub, img_off, img_sku, img_ver = vm_["image"].split("|")
img_ref = ImageReference(
publisher=img_pub, offer=img_off, sku=img_sku, version=img_ver,
publisher=img_pub,
offer=img_off,
sku=img_sku,
version=img_ver,
)
elif vm_["image"].startswith("/subscriptions"):
img_ref = ImageReference(id=vm_["image"])
@ -1192,7 +1217,9 @@ def request_instance(vm_):
vm_size=getattr(VirtualMachineSizeTypes, vm_["size"].lower()),
),
storage_profile=StorageProfile(
os_disk=os_disk, data_disks=data_disks, image_reference=img_ref,
os_disk=os_disk,
data_disks=data_disks,
image_reference=img_ref,
),
os_profile=OSProfile(
admin_username=vm_username, computer_name=vm_["name"], **os_kwargs
@ -1298,7 +1325,10 @@ def create(vm_):
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(vm_["name"], vm_["bootstrap_interface"],),
update_args=(
vm_["name"],
vm_["bootstrap_interface"],
),
timeout=config.get_cloud_config_value(
"wait_for_ip_timeout", vm_, __opts__, default=10 * 60
),
@ -1648,7 +1678,9 @@ def delete_managed_disk(call=None, kwargs=None): # pylint: disable=unused-argum
compconn.disks.delete(kwargs["resource_group"], kwargs["blob"])
except Exception as exc: # pylint: disable=broad-except
log.error(
"Error deleting managed disk %s - %s", kwargs.get("blob"), str(exc),
"Error deleting managed disk %s - %s",
kwargs.get("blob"),
str(exc),
)
return False
@ -1947,7 +1979,8 @@ def start(name, call=None):
ret = vm_result.as_dict()
except CloudError as exc:
salt.utils.azurearm.log_cloud_error(
"compute", "Error attempting to start {}: {}".format(name, exc.message),
"compute",
"Error attempting to start {}: {}".format(name, exc.message),
)
ret = {"error": exc.message}

View file

@ -119,7 +119,12 @@ def get_configured_provider():
return config.is_provider_configured(
__opts__,
_get_active_provider_name() or __virtualname__,
("token", "token_pass", "user", "password",),
(
"token",
"token_pass",
"user",
"password",
),
)
@ -331,32 +336,68 @@ def create(vm_):
__opts__, _get_active_provider_name() or __virtualname__, ("token",)
)
group = config.get_cloud_config_value(
"group", vm_, __opts__, search_global=False, default=None,
"group",
vm_,
__opts__,
search_global=False,
default=None,
)
name = vm_["name"]
description = config.get_cloud_config_value(
"description", vm_, __opts__, search_global=False, default=None,
"description",
vm_,
__opts__,
search_global=False,
default=None,
)
ram = config.get_cloud_config_value(
"ram", vm_, __opts__, search_global=False, default=None,
"ram",
vm_,
__opts__,
search_global=False,
default=None,
)
backup_level = config.get_cloud_config_value(
"backup_level", vm_, __opts__, search_global=False, default=None,
"backup_level",
vm_,
__opts__,
search_global=False,
default=None,
)
template = config.get_cloud_config_value(
"template", vm_, __opts__, search_global=False, default=None,
"template",
vm_,
__opts__,
search_global=False,
default=None,
)
password = config.get_cloud_config_value(
"password", vm_, __opts__, search_global=False, default=None,
"password",
vm_,
__opts__,
search_global=False,
default=None,
)
cpu = config.get_cloud_config_value(
"cpu", vm_, __opts__, search_global=False, default=None,
"cpu",
vm_,
__opts__,
search_global=False,
default=None,
)
network = config.get_cloud_config_value(
"network", vm_, __opts__, search_global=False, default=None,
"network",
vm_,
__opts__,
search_global=False,
default=None,
)
location = config.get_cloud_config_value(
"location", vm_, __opts__, search_global=False, default=None,
"location",
vm_,
__opts__,
search_global=False,
default=None,
)
if len(name) > 6:
name = name[0:6]

View file

@ -172,7 +172,7 @@ def get_location(conn, vm_):
# Default to Dallas if not otherwise set
loc = config.get_cloud_config_value("location", vm_, __opts__, default=2)
for location in locations:
if str(loc) in (str(location.id), str(location.name),):
if str(loc) in (str(location.id), str(location.name)):
return location
@ -270,7 +270,7 @@ def get_project(conn, vm_):
return False
for project in projects:
if str(projid) in (str(project.id), str(project.name),):
if str(projid) in (str(project.id), str(project.name)):
return project
log.warning("Couldn't find project %s in projects", projid)

View file

@ -197,7 +197,9 @@ def list_nodes_select(call=None):
Return a list of the VMs that are on the provider, with select fields
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full("function"), __opts__["query.selection"], call,
list_nodes_full("function"),
__opts__["query.selection"],
call,
)
@ -353,7 +355,11 @@ def create(vm_):
)
private_networking = config.get_cloud_config_value(
"private_networking", vm_, __opts__, search_global=False, default=None,
"private_networking",
vm_,
__opts__,
search_global=False,
default=None,
)
if private_networking is not None:
@ -370,7 +376,11 @@ def create(vm_):
)
backups_enabled = config.get_cloud_config_value(
"backups_enabled", vm_, __opts__, search_global=False, default=None,
"backups_enabled",
vm_,
__opts__,
search_global=False,
default=None,
)
if backups_enabled is not None:
@ -379,7 +389,11 @@ def create(vm_):
kwargs["backups"] = backups_enabled
ipv6 = config.get_cloud_config_value(
"ipv6", vm_, __opts__, search_global=False, default=None,
"ipv6",
vm_,
__opts__,
search_global=False,
default=None,
)
if ipv6 is not None:
@ -388,7 +402,11 @@ def create(vm_):
kwargs["ipv6"] = ipv6
monitoring = config.get_cloud_config_value(
"monitoring", vm_, __opts__, search_global=False, default=None,
"monitoring",
vm_,
__opts__,
search_global=False,
default=None,
)
if monitoring is not None:
@ -413,7 +431,11 @@ def create(vm_):
log.exception("Failed to read userdata from %s: %s", userdata_file, exc)
create_dns_record = config.get_cloud_config_value(
"create_dns_record", vm_, __opts__, search_global=False, default=None,
"create_dns_record",
vm_,
__opts__,
search_global=False,
default=None,
)
if create_dns_record:
@ -701,7 +723,8 @@ def list_keypairs(call=None):
while fetch:
items = query(
method="account/keys", command="?page=" + str(page) + "&per_page=100",
method="account/keys",
command="?page=" + str(page) + "&per_page=100",
)
for key_pair in items["ssh_keys"]:
@ -1031,7 +1054,8 @@ def list_floating_ips(call=None):
while fetch:
items = query(
method="floating_ips", command="?page=" + str(page) + "&per_page=200",
method="floating_ips",
command="?page=" + str(page) + "&per_page=200",
)
for floating_ip in items["floating_ips"]:

View file

@ -2335,7 +2335,11 @@ def query_instance(vm_=None, call=None):
def wait_for_instance(
vm_=None, data=None, ip_address=None, display_ssh_output=True, call=None,
vm_=None,
data=None,
ip_address=None,
display_ssh_output=True,
call=None,
):
"""
Wait for an instance upon creation from the EC2 API, to become available
@ -3707,7 +3711,9 @@ def list_nodes_select(call=None):
Return a list of the VMs that are on the provider, with select fields
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full(get_location()), __opts__["query.selection"], call,
list_nodes_full(get_location()),
__opts__["query.selection"],
call,
)
@ -4813,7 +4819,11 @@ def describe_snapshots(kwargs=None, call=None):
def get_console_output(
name=None, location=None, instance_id=None, call=None, kwargs=None,
name=None,
location=None,
instance_id=None,
call=None,
kwargs=None,
):
"""
Show the console output from the instance.
@ -4862,7 +4872,10 @@ def get_console_output(
def get_password_data(
name=None, kwargs=None, instance_id=None, call=None,
name=None,
kwargs=None,
instance_id=None,
call=None,
):
"""
Return password data for a Windows instance.

View file

@ -270,7 +270,9 @@ def list_nodes_select(call=None):
salt-cloud -S
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full("function"), __opts__["query.selection"], call,
list_nodes_full("function"),
__opts__["query.selection"],
call,
)

View file

@ -59,7 +59,9 @@ def get_configured_provider():
Return the first configured instance.
"""
return config.is_provider_configured(
__opts__, __active_provider_name__ or __virtualname__, ("key",),
__opts__,
__active_provider_name__ or __virtualname__,
("key",),
)
@ -68,7 +70,8 @@ def get_dependencies():
Warn if dependencies aren't met.
"""
return config.check_driver_dependencies(
__active_provider_name__ or __virtualname__, {"hcloud": HAS_HCLOUD},
__active_provider_name__ or __virtualname__,
{"hcloud": HAS_HCLOUD},
)
@ -222,7 +225,9 @@ def show_instance(name, call=None):
node = {}
__utils__["cloud.cache_node"](
node, __active_provider_name__ or __virtualname__, __opts__,
node,
__active_provider_name__ or __virtualname__,
__opts__,
)
return node
@ -268,7 +273,9 @@ def create(vm_):
"starting create",
"salt/cloud/{}/creating".format(vm_["name"]),
args=__utils__["cloud.filter_event"](
"creating", vm_, ["name", "profile", "provider", "driver"],
"creating",
vm_,
["name", "profile", "provider", "driver"],
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
@ -348,7 +355,9 @@ def create(vm_):
"created instance",
"salt/cloud/{}/created".format(vm_["name"]),
args=__utils__["cloud.filter_event"](
"created", vm_, ["name", "profile", "provider", "driver"],
"created",
vm_,
["name", "profile", "provider", "driver"],
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
@ -503,7 +512,9 @@ def destroy(name, call=None):
if __opts__.get("update_cachedir", False) is True:
__utils__["cloud.delete_minion_cachedir"](
name, __active_provider_name__.split(":")[0], __opts__,
name,
__active_provider_name__.split(":")[0],
__opts__,
)
return {"Destroyed": "{} was destroyed.".format(name)}

View file

@ -864,7 +864,9 @@ def list_nodes_select(call=None):
Return a list of the VMs that are on the provider, with select fields
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full("function"), __opts__["query.selection"], call,
list_nodes_full("function"),
__opts__["query.selection"],
call,
)
@ -989,7 +991,10 @@ def show_key(kwargs=None, call=None):
log.error("A keyname is required.")
return False
rcode, data = query(command="my/keys/{}".format(kwargs["keyname"]), method="GET",)
rcode, data = query(
command="my/keys/{}".format(kwargs["keyname"]),
method="GET",
)
return {"keys": {data["name"]: data["key"]}}
@ -1028,7 +1033,11 @@ def import_key(kwargs=None, call=None):
send_data = {"name": kwargs["keyname"], "key": kwargs["key"]}
kwargs["data"] = salt.utils.json.dumps(send_data)
rcode, data = query(command="my/keys", method="POST", data=kwargs["data"],)
rcode, data = query(
command="my/keys",
method="POST",
data=kwargs["data"],
)
log.debug(pprint.pformat(data))
return {"keys": {data["name"]: data["key"]}}
@ -1055,7 +1064,8 @@ def delete_key(kwargs=None, call=None):
return False
rcode, data = query(
command="my/keys/{}".format(kwargs["keyname"]), method="DELETE",
command="my/keys/{}".format(kwargs["keyname"]),
method="DELETE",
)
return data

View file

@ -233,7 +233,11 @@ def list_nodes_select(call=None):
raise SaltCloudSystemExit("query.selection not found in /etc/salt/cloud")
# TODO: somewhat doubt the implementation of cloud.list_nodes_select
return salt.utils.cloud.list_nodes_select(list_nodes_full(), selection, call,)
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
selection,
call,
)
def to_ip_addr_type(addr_type):

View file

@ -179,7 +179,7 @@ def get_configured_provider():
return config.is_provider_configured(
__opts__,
_get_active_provider_name() or __virtualname__,
("apikey", "password",),
("apikey", "password"),
)
@ -400,82 +400,82 @@ def _warn_for_api_v3():
class LinodeAPI:
@abc.abstractmethod
def avail_images(self):
""" avail_images implementation """
"""avail_images implementation"""
@abc.abstractmethod
def avail_locations(self):
""" avail_locations implementation """
"""avail_locations implementation"""
@abc.abstractmethod
def avail_sizes(self):
""" avail_sizes implementation """
"""avail_sizes implementation"""
@abc.abstractmethod
def boot(self, name=None, kwargs=None):
""" boot implementation """
"""boot implementation"""
@abc.abstractmethod
def clone(self, kwargs=None):
""" clone implementation """
"""clone implementation"""
@abc.abstractmethod
def create_config(self, kwargs=None):
""" create_config implementation """
"""create_config implementation"""
@abc.abstractmethod
def create(self, vm_):
""" create implementation """
"""create implementation"""
@abc.abstractmethod
def destroy(self, name):
""" destroy implementation """
"""destroy implementation"""
@abc.abstractmethod
def get_config_id(self, kwargs=None):
""" get_config_id implementation """
"""get_config_id implementation"""
@abc.abstractmethod
def list_nodes(self):
""" list_nodes implementation """
"""list_nodes implementation"""
@abc.abstractmethod
def list_nodes_full(self):
""" list_nodes_full implementation """
"""list_nodes_full implementation"""
@abc.abstractmethod
def list_nodes_min(self):
""" list_nodes_min implementation """
"""list_nodes_min implementation"""
@abc.abstractmethod
def reboot(self, name):
""" reboot implementation """
"""reboot implementation"""
@abc.abstractmethod
def show_instance(self, name):
""" show_instance implementation """
"""show_instance implementation"""
@abc.abstractmethod
def show_pricing(self, kwargs=None):
""" show_pricing implementation """
"""show_pricing implementation"""
@abc.abstractmethod
def start(self, name):
""" start implementation """
"""start implementation"""
@abc.abstractmethod
def stop(self, name):
""" stop implementation """
"""stop implementation"""
@abc.abstractmethod
def _get_linode_by_name(self, name):
""" _get_linode_by_name implementation """
"""_get_linode_by_name implementation"""
@abc.abstractmethod
def _get_linode_by_id(self, linode_id):
""" _get_linode_by_id implementation """
"""_get_linode_by_id implementation"""
def get_plan_id(self, kwargs=None):
""" get_plan_id implementation """
"""get_plan_id implementation"""
raise SaltCloudSystemExit(
"The get_plan_id is not supported by this api_version."
)
@ -495,7 +495,9 @@ class LinodeAPI:
def list_nodes_select(self, call):
return __utils__["cloud.list_nodes_select"](
self.list_nodes_full(), __opts__["query.selection"], call,
self.list_nodes_full(),
__opts__["query.selection"],
call,
)
@ -1016,7 +1018,12 @@ class LinodeAPIv4(LinodeAPI):
return (public, private)
def _poll(
self, description, getter, condition, timeout=None, poll_interval=None,
self,
description,
getter,
condition,
timeout=None,
poll_interval=None,
):
"""
Return true in handler to signal complete.

View file

@ -394,7 +394,9 @@ def list_nodes_select(conn=None, call=None):
conn = get_conn()
return salt.utils.cloud.list_nodes_select(
list_nodes_full(conn, "function"), __opts__["query.selection"], call,
list_nodes_full(conn, "function"),
__opts__["query.selection"],
call,
)
@ -487,7 +489,10 @@ def create(vm_):
)
ssh_endpoint = azure.servicemanagement.ConfigurationSetInputEndpoint(
name="SSH", protocol="TCP", port=ssh_port, local_port=22,
name="SSH",
protocol="TCP",
port=ssh_port,
local_port=22,
)
network_config = azure.servicemanagement.ConfigurationSet()
@ -506,7 +511,10 @@ def create(vm_):
smb_port = vm_["smb_port"]
smb_endpoint = azure.servicemanagement.ConfigurationSetInputEndpoint(
name="SMB", protocol="TCP", port=smb_port, local_port=smb_port,
name="SMB",
protocol="TCP",
port=smb_port,
local_port=smb_port,
)
network_config.input_endpoints.input_endpoints.append(smb_endpoint)
@ -805,7 +813,9 @@ def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True):
# If attach is None then everything is fine
if attach:
msg = "{} attached to {} (aka {})".format(
volume_dict["volume_name"], kwargs["role_name"], name,
volume_dict["volume_name"],
kwargs["role_name"],
name,
)
log.info(msg)
ret.append(msg)
@ -1190,7 +1200,9 @@ def show_storage(kwargs=None, conn=None, call=None):
if "name" not in kwargs:
raise SaltCloudSystemExit('A name must be specified as "name"')
data = conn.get_storage_account_properties(kwargs["name"],)
data = conn.get_storage_account_properties(
kwargs["name"],
)
return object_to_dict(data)
@ -1225,7 +1237,9 @@ def show_storage_keys(kwargs=None, conn=None, call=None):
raise SaltCloudSystemExit('A name must be specified as "name"')
try:
data = conn.get_storage_account_keys(kwargs["name"],)
data = conn.get_storage_account_keys(
kwargs["name"],
)
except AzureMissingResourceHttpError as exc:
storage_data = show_storage(kwargs={"name": kwargs["name"]}, call="function")
if storage_data["storage_service_properties"]["status"] == "Creating":
@ -1367,7 +1381,8 @@ def regenerate_storage_keys(kwargs=None, conn=None, call=None):
try:
data = conn.regenerate_storage_account_keys(
service_name=kwargs["name"], key_type=kwargs["key_type"],
service_name=kwargs["name"],
key_type=kwargs["key_type"],
)
return show_storage_keys(kwargs={"name": kwargs["name"]}, call="function")
except AzureConflictHttpError:
@ -1797,7 +1812,9 @@ def show_service_certificate(kwargs=None, conn=None, call=None):
raise SaltCloudSystemExit('A thumbprint must be specified as "thumbprint"')
data = conn.get_service_certificate(
kwargs["name"], kwargs["thumbalgorithm"], kwargs["thumbprint"],
kwargs["name"],
kwargs["thumbalgorithm"],
kwargs["thumbprint"],
)
return object_to_dict(data)
@ -1896,7 +1913,9 @@ def delete_service_certificate(kwargs=None, conn=None, call=None):
try:
data = conn.delete_service_certificate(
kwargs["name"], kwargs["thumbalgorithm"], kwargs["thumbprint"],
kwargs["name"],
kwargs["thumbalgorithm"],
kwargs["thumbprint"],
)
return {"Success": "The service certificate was successfully deleted"}
except AzureMissingResourceHttpError as exc:
@ -2000,7 +2019,9 @@ def add_management_certificate(kwargs=None, conn=None, call=None):
try:
conn.add_management_certificate(
kwargs["name"], kwargs["thumbprint"], kwargs["data"],
kwargs["name"],
kwargs["thumbprint"],
kwargs["data"],
)
return {"Success": "The management certificate was successfully added"}
except AzureConflictHttpError:
@ -2093,7 +2114,8 @@ def list_input_endpoints(kwargs=None, conn=None, call=None):
raise SaltCloudSystemExit('A deployment name must be specified as "deployment"')
path = "services/hostedservices/{}/deployments/{}".format(
kwargs["service"], kwargs["deployment"],
kwargs["service"],
kwargs["deployment"],
)
data = query(path)
@ -2267,7 +2289,9 @@ xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
)
path = "services/hostedservices/{}/deployments/{}/roles/{}".format(
kwargs["service"], kwargs["deployment"], kwargs["role"],
kwargs["service"],
kwargs["deployment"],
kwargs["role"],
)
query(
path=path,
@ -2296,7 +2320,10 @@ def add_input_endpoint(kwargs=None, conn=None, call=None):
timeout_for_tcp_idle_connection=4
"""
return update_input_endpoint(
kwargs=kwargs, conn=conn, call="function", activity="add",
kwargs=kwargs,
conn=conn,
call="function",
activity="add",
)
@ -2315,7 +2342,10 @@ def delete_input_endpoint(kwargs=None, conn=None, call=None):
deployment=mydeployment role=myrole name=HTTP
"""
return update_input_endpoint(
kwargs=kwargs, conn=conn, call="function", activity="delete",
kwargs=kwargs,
conn=conn,
call="function",
activity="delete",
)
@ -2351,7 +2381,8 @@ def show_deployment(kwargs=None, conn=None, call=None):
)
data = conn.get_deployment_by_name(
service_name=kwargs["service_name"], deployment_name=kwargs["deployment_name"],
service_name=kwargs["service_name"],
deployment_name=kwargs["deployment_name"],
)
return object_to_dict(data)
@ -2725,7 +2756,8 @@ def show_storage_container(kwargs=None, storage_conn=None, call=None):
storage_conn = get_storage_conn(conn_kwargs=kwargs)
data = storage_conn.get_container_properties(
container_name=kwargs["name"], x_ms_lease_id=kwargs.get("lease_id", None),
container_name=kwargs["name"],
x_ms_lease_id=kwargs.get("lease_id", None),
)
return data
@ -2769,7 +2801,8 @@ def show_storage_container_metadata(kwargs=None, storage_conn=None, call=None):
storage_conn = get_storage_conn(conn_kwargs=kwargs)
data = storage_conn.get_container_metadata(
container_name=kwargs["name"], x_ms_lease_id=kwargs.get("lease_id", None),
container_name=kwargs["name"],
x_ms_lease_id=kwargs.get("lease_id", None),
)
return data
@ -2866,7 +2899,8 @@ def show_storage_container_acl(kwargs=None, storage_conn=None, call=None):
storage_conn = get_storage_conn(conn_kwargs=kwargs)
data = storage_conn.get_container_acl(
container_name=kwargs["name"], x_ms_lease_id=kwargs.get("lease_id", None),
container_name=kwargs["name"],
x_ms_lease_id=kwargs.get("lease_id", None),
)
return data
@ -3484,7 +3518,9 @@ def query(path, method="GET", data=None, params=None, header_dict=None, decode=T
"backend", get_configured_provider(), __opts__, search_global=False
)
url = "https://{management_host}/{subscription_id}/{path}".format(
management_host=management_host, subscription_id=subscription_id, path=path,
management_host=management_host,
subscription_id=subscription_id,
path=path,
)
if header_dict is None:

View file

@ -448,7 +448,9 @@ def list_nodes_select(conn=None, call=None):
conn = get_conn()
return salt.utils.cloud.list_nodes_select(
list_nodes_full(conn, "function"), __opts__["query.selection"], call,
list_nodes_full(conn, "function"),
__opts__["query.selection"],
call,
)

View file

@ -338,7 +338,9 @@ def list_nodes_select(call=None):
)
return __utils__["cloud.list_nodes_select"](
list_nodes_full("function"), __opts__["query.selection"], call,
list_nodes_full("function"),
__opts__["query.selection"],
call,
)

View file

@ -513,7 +513,9 @@ def list_nodes_select(call=None):
Return a list of the VMs that are on the provider, with select fields.
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full(), __opts__["query.selection"], call,
list_nodes_full(),
__opts__["query.selection"],
call,
)

View file

@ -66,7 +66,11 @@ def get_configured_provider():
return config.is_provider_configured(
__opts__,
_get_active_provider_name() or __virtualname__,
("user", "password", "url",),
(
"user",
"password",
"url",
),
)
@ -148,7 +152,9 @@ def list_nodes_select(call=None):
Return a list of the VMs that are on the provider, with select fields
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full(), __opts__["query.selection"], call,
list_nodes_full(),
__opts__["query.selection"],
call,
)

View file

@ -520,7 +520,9 @@ def list_nodes_select(call=None):
salt-cloud -S my-proxmox-config
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full(), __opts__["query.selection"], call,
list_nodes_full(),
__opts__["query.selection"],
call,
)
@ -566,7 +568,9 @@ def _reconfigure_clone(vm_, vmid):
if re.match(r"^(ide|sata|scsi)(\d+)$", setting):
postParams = {setting: vm_[setting]}
query(
"post", "nodes/{}/qemu/{}/config".format(vm_["host"], vmid), postParams,
"post",
"nodes/{}/qemu/{}/config".format(vm_["host"], vmid),
postParams,
)
elif re.match(r"^net(\d+)$", setting):
@ -589,7 +593,9 @@ def _reconfigure_clone(vm_, vmid):
# Convert the dictionary back into a string list
postParams = {setting: _dictionary_to_stringlist(new_setting)}
query(
"post", "nodes/{}/qemu/{}/config".format(vm_["host"], vmid), postParams,
"post",
"nodes/{}/qemu/{}/config".format(vm_["host"], vmid),
postParams,
)
@ -702,7 +708,11 @@ def create(vm_):
ssh_username = config.get_cloud_config_value(
"ssh_username", vm_, __opts__, default="root"
)
ssh_password = config.get_cloud_config_value("password", vm_, __opts__,)
ssh_password = config.get_cloud_config_value(
"password",
vm_,
__opts__,
)
ret["ip_address"] = ip_address
ret["username"] = ssh_username

View file

@ -48,7 +48,11 @@ def get_configured_provider():
return config.is_provider_configured(
__opts__,
_get_active_provider_name() or __virtualname__,
("username", "identity_url", "compute_region",),
(
"username",
"identity_url",
"compute_region",
),
)

View file

@ -596,7 +596,9 @@ def list_nodes_select(call=None):
salt-cloud -S my-qingcloud
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full("function"), __opts__["query.selection"], call,
list_nodes_full("function"),
__opts__["query.selection"],
call,
)

View file

@ -216,7 +216,9 @@ def list_nodes_select(call=None):
select fields.
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full("function"), __opts__["query.selection"], call,
list_nodes_full("function"),
__opts__["query.selection"],
call,
)
@ -434,7 +436,7 @@ def _verify(vm_):
def destroy(name, call=None):
""" Destroy a node.
"""Destroy a node.
.. versionadded:: 2018.3.0

View file

@ -61,16 +61,14 @@ def _get_active_provider_name():
def get_configured_provider():
""" Return the first configured instance.
"""
"""Return the first configured instance."""
return config.is_provider_configured(
__opts__, _get_active_provider_name() or __virtualname__, ("token",)
)
def avail_images(call=None):
""" Return a list of the images that are on the provider.
"""
"""Return a list of the images that are on the provider."""
if call == "action":
raise SaltCloudSystemExit(
"The avail_images function must be called with "
@ -88,8 +86,7 @@ def avail_images(call=None):
def list_nodes(call=None):
""" Return a list of the BareMetal servers that are on the provider.
"""
"""Return a list of the BareMetal servers that are on the provider."""
if call == "action":
raise SaltCloudSystemExit(
"The list_nodes function must be called with -f or --function."
@ -124,8 +121,7 @@ def list_nodes(call=None):
def list_nodes_full(call=None):
""" Return a list of the BareMetal servers that are on the provider.
"""
"""Return a list of the BareMetal servers that are on the provider."""
if call == "action":
raise SaltCloudSystemExit(
"list_nodes_full must be called with -f or --function"
@ -144,17 +140,18 @@ def list_nodes_full(call=None):
def list_nodes_select(call=None):
""" Return a list of the BareMetal servers that are on the provider, with
"""Return a list of the BareMetal servers that are on the provider, with
select fields.
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full("function"), __opts__["query.selection"], call,
list_nodes_full("function"),
__opts__["query.selection"],
call,
)
def get_image(server_):
""" Return the image object to use.
"""
"""Return the image object to use."""
images = avail_images()
server_image = str(
config.get_cloud_config_value("image", server_, __opts__, search_global=False)
@ -168,8 +165,7 @@ def get_image(server_):
def create_node(args):
""" Create a node.
"""
"""Create a node."""
node = query(method="servers", args=args, http_method="POST")
action = query(
@ -269,8 +265,7 @@ def create(server_):
return False
def __query_node_data(server_name):
""" Called to check if the server has a public IP address.
"""
"""Called to check if the server has a public IP address."""
data = show_instance(server_name, "action")
if data and data.get("public_ip"):
return data
@ -332,8 +327,7 @@ def query(
http_method="GET",
root="api_root",
):
""" Make a call to the Scaleway API.
"""
"""Make a call to the Scaleway API."""
if root == "api_root":
default_url = "https://cp-par1.scaleway.com"
@ -344,7 +338,11 @@ def query(
base_path = str(
config.get_cloud_config_value(
root, vm_, __opts__, search_global=False, default=default_url,
root,
vm_,
__opts__,
search_global=False,
default=default_url,
)
)
@ -387,8 +385,7 @@ def query(
def script(server_):
""" Return the script deployment object.
"""
"""Return the script deployment object."""
return salt.utils.cloud.os_script(
config.get_cloud_config_value("script", server_, __opts__),
server_,
@ -400,8 +397,7 @@ def script(server_):
def show_instance(name, call=None):
""" Show the details from a Scaleway BareMetal server.
"""
"""Show the details from a Scaleway BareMetal server."""
if call != "action":
raise SaltCloudSystemExit(
"The show_instance action must be called with -a or --action."
@ -427,7 +423,7 @@ def _get_node(name):
def destroy(name, call=None):
""" Destroy a node. Will check termination protection and warn if enabled.
"""Destroy a node. Will check termination protection and warn if enabled.
CLI Example:

View file

@ -582,7 +582,9 @@ def list_nodes_select(call=None):
Return a list of the VMs that are on the provider, with select fields
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full(), __opts__["query.selection"], call,
list_nodes_full(),
__opts__["query.selection"],
call,
)

View file

@ -477,7 +477,9 @@ def list_nodes_select(call=None):
Return a list of the VMs that are on the provider, with select fields
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full(), __opts__["query.selection"], call,
list_nodes_full(),
__opts__["query.selection"],
call,
)

View file

@ -379,7 +379,9 @@ def list_nodes_select(call=None):
salt-cloud -S
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full("function"), __opts__["query.selection"], call,
list_nodes_full("function"),
__opts__["query.selection"],
call,
)

View file

@ -60,8 +60,8 @@ def avail_locations(call=None):
def avail_images(call=None):
"""This function returns a list of images available for this cloud provider.
vagrant will return a list of profiles.
salt-cloud --list-images my-cloud-provider
vagrant will return a list of profiles.
salt-cloud --list-images my-cloud-provider
"""
vm_ = get_configured_provider()
return {"Profiles": [profile for profile in vm_["profiles"]]}
@ -175,7 +175,9 @@ def list_nodes_select(call=None):
select fields.
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full("function"), __opts__["query.selection"], call,
list_nodes_full("function"),
__opts__["query.selection"],
call,
)

View file

@ -328,7 +328,9 @@ def list_nodes(kwargs=None, call=None):
"public_ips",
]
return __utils__["cloud.list_nodes_select"](
list_nodes_full("function"), attributes, call,
list_nodes_full("function"),
attributes,
call,
)
@ -337,7 +339,9 @@ def list_nodes_select(call=None):
Return a list of the VMs that are on the provider, with select fields
"""
return __utils__["cloud.list_nodes_select"](
list_nodes_full("function"), __opts__["query.selection"], call,
list_nodes_full("function"),
__opts__["query.selection"],
call,
)

View file

@ -191,7 +191,11 @@ def get_configured_provider():
return config.is_provider_configured(
__opts__,
_get_active_provider_name() or __virtualname__,
("url", "user", "password",),
(
"url",
"user",
"password",
),
)
@ -432,8 +436,10 @@ def _edit_existing_network_adapter(
else:
# If switch type not specified or does not match, show error and return
if not switch_type:
err_msg = "The switch type to be used by '{}' has not been specified".format(
network_adapter.deviceInfo.label
err_msg = (
"The switch type to be used by '{}' has not been specified".format(
network_adapter.deviceInfo.label
)
)
else:
err_msg = "Cannot create '{}'. Invalid/unsupported switch type '{}'".format(
@ -519,8 +525,10 @@ def _add_new_network_adapter_helper(
else:
# If switch type not specified or does not match, show error and return
if not switch_type:
err_msg = "The switch type to be used by '{}' has not been specified".format(
network_adapter_label
err_msg = (
"The switch type to be used by '{}' has not been specified".format(
network_adapter_label
)
)
else:
err_msg = "Cannot create '{}'. Invalid/unsupported switch type '{}'".format(
@ -3151,8 +3159,10 @@ def create(vm_):
)
# get recommended datastores
recommended_datastores = si.content.storageResourceManager.RecommendDatastores(
storageSpec=storage_spec
recommended_datastores = (
si.content.storageResourceManager.RecommendDatastores(
storageSpec=storage_spec
)
)
# apply storage DRS recommendations
@ -4369,7 +4379,9 @@ def add_host(kwargs=None, call=None):
raise SaltCloudSystemExit("Specified datacenter does not exist.")
spec = vim.host.ConnectSpec(
hostName=host_name, userName=host_user, password=host_password,
hostName=host_name,
userName=host_user,
password=host_password,
)
if host_ssl_thumbprint:

View file

@ -264,7 +264,9 @@ def list_nodes_select(conn=None, call=None):
Return a list of the VMs that are on the provider, with select fields
"""
return __utils__["cloud.list_nodes_select"](
list_nodes_full(), __opts__["query.selection"], call,
list_nodes_full(),
__opts__["query.selection"],
call,
)
@ -342,7 +344,11 @@ def create(vm_):
vm_["driver"] = vm_["provider"]
private_networking = config.get_cloud_config_value(
"enable_private_network", vm_, __opts__, search_global=False, default=False,
"enable_private_network",
vm_,
__opts__,
search_global=False,
default=False,
)
ssh_key_ids = config.get_cloud_config_value(
@ -350,7 +356,11 @@ def create(vm_):
)
startup_script = config.get_cloud_config_value(
"startup_script_id", vm_, __opts__, search_global=False, default=None,
"startup_script_id",
vm_,
__opts__,
search_global=False,
default=None,
)
if startup_script and str(startup_script) not in avail_scripts():
@ -361,7 +371,11 @@ def create(vm_):
return False
firewall_group_id = config.get_cloud_config_value(
"firewall_group_id", vm_, __opts__, search_global=False, default=None,
"firewall_group_id",
vm_,
__opts__,
search_global=False,
default=None,
)
if firewall_group_id and str(firewall_group_id) not in avail_firewall_groups():
@ -598,7 +612,10 @@ def _query(path, method="GET", data=None, params=None, header_dict=None, decode=
Perform a query directly against the Vultr REST API
"""
api_key = config.get_cloud_config_value(
"api_key", get_configured_provider(), __opts__, search_global=False,
"api_key",
get_configured_provider(),
__opts__,
search_global=False,
)
management_host = config.get_cloud_config_value(
"management_host",
@ -608,7 +625,9 @@ def _query(path, method="GET", data=None, params=None, header_dict=None, decode=
default="api.vultr.com",
)
url = "https://{management_host}/v1/{path}?api_key={api_key}".format(
management_host=management_host, path=path, api_key=api_key,
management_host=management_host,
path=path,
api_key=api_key,
)
if header_dict is None:

View file

@ -342,7 +342,9 @@ def list_nodes_select(call=None):
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full(), __opts__["query.selection"], call,
list_nodes_full(),
__opts__["query.selection"],
call,
)

View file

@ -57,8 +57,10 @@ def check_libcloud_version(reqver=LIBCLOUD_MINIMAL_VERSION, why=None):
if LIBCLOUD_VERSION_INFO >= reqver:
return libcloud.__version__
errormsg = "Your version of libcloud is {}. salt-cloud requires >= libcloud {}".format(
libcloud.__version__, ".".join([str(num) for num in reqver])
errormsg = (
"Your version of libcloud is {}. salt-cloud requires >= libcloud {}".format(
libcloud.__version__, ".".join([str(num) for num in reqver])
)
)
if why:
errormsg += " for {}".format(why)
@ -219,7 +221,10 @@ def get_size(conn, vm_):
return sizes[0]
for size in sizes:
if vm_size and str(vm_size) in (str(size.id), str(size.name),):
if vm_size and str(vm_size) in (
str(size.id),
str(size.name),
):
return size
raise SaltCloudNotFound(
"The specified size, '{}', could not be found.".format(vm_size)
@ -409,7 +414,9 @@ def list_nodes_select(conn=None, call=None):
conn = get_conn() # pylint: disable=E0602
return salt.utils.cloud.list_nodes_select(
list_nodes_full(conn, "function"), __opts__["query.selection"], call,
list_nodes_full(conn, "function"),
__opts__["query.selection"],
call,
)

View file

@ -149,7 +149,11 @@ def clean_old_jobs(opts):
Clean out the old jobs from the job cache
"""
# TODO: better way to not require creating the masterminion every time?
mminion = salt.minion.MasterMinion(opts, states=False, rend=False,)
mminion = salt.minion.MasterMinion(
opts,
states=False,
rend=False,
)
# If the master job cache has a clean_old_jobs, call it
fstr = "{}.clean_old_jobs".format(opts["master_job_cache"])
if fstr in mminion.returners:
@ -1186,7 +1190,9 @@ class LocalFuncs:
except Exception as exc: # pylint: disable=broad-except
log.exception("Exception occurred while introspecting %s", fun)
data["return"] = "Exception occurred in wheel {}: {}: {}".format(
fun, exc.__class__.__name__, exc,
fun,
exc.__class__.__name__,
exc,
)
data["success"] = False
self.event.fire_event(data, salt.utils.event.tagify([jid, "ret"], "wheel"))

View file

@ -696,7 +696,9 @@ class SlackClient:
except (StopIteration, AttributeError):
outputter = None
return salt.output.string_format(
{x: y["return"] for x, y in data.items()}, out=outputter, opts=__opts__,
{x: y["return"] for x, y in data.items()},
out=outputter,
opts=__opts__,
)
except Exception as exc: # pylint: disable=broad-except
import pprint
@ -826,11 +828,13 @@ class SlackClient:
this_job = outstanding[jid]
channel = self.sc.server.channels.find(this_job["channel"])
return_text = self.format_return_text(result, function)
return_prefix = "@{}'s job `{}` (id: {}) (target: {}) returned".format(
this_job["user_name"],
this_job["cmdline"],
jid,
this_job["target"],
return_prefix = (
"@{}'s job `{}` (id: {}) (target: {}) returned".format(
this_job["user_name"],
this_job["cmdline"],
jid,
this_job["target"],
)
)
channel.send_message(return_prefix)
ts = time.time()
@ -901,7 +905,11 @@ class SlackClient:
# according to https://github.com/saltstack/salt-api/issues/164, tgt_type has changed to expr_form
with salt.client.LocalClient() as local:
job_id = local.cmd_async(
str(target), cmd, arg=args, kwarg=kwargs, tgt_type=str(tgt_type),
str(target),
cmd,
arg=args,
kwarg=kwargs,
tgt_type=str(tgt_type),
)
log.info("ret from local.cmd_async is %s", job_id)
return job_id

View file

@ -287,7 +287,8 @@ class SaltRenderError(SaltException):
self.buffer, self.line_num, marker=marker
)
exc_str += "; line {}\n\n{}".format(
self.line_num, salt.utils.stringutils.to_unicode(self.context),
self.line_num,
salt.utils.stringutils.to_unicode(self.context),
)
super().__init__(exc_str)

View file

@ -54,7 +54,9 @@ def _zfs_pool_data():
# collect zpool data
zpool_list_cmd = __utils__["zfs.zpool_command"](
"list", flags=["-H"], opts={"-o": "name,size"},
"list",
flags=["-H"],
opts={"-o": "name,size"},
)
for zpool in __salt__["cmd.run"](zpool_list_cmd, ignore_retcode=True).splitlines():
if "zpool" not in grains:

View file

@ -78,7 +78,13 @@ def static_loader(
):
funcs = LazyLoader(
_module_dirs(
opts, ext_type, tag, int_type, ext_dirs, ext_type_dirs, base_path,
opts,
ext_type,
tag,
int_type,
ext_dirs,
ext_type_dirs,
base_path,
),
opts,
tag=tag,
@ -448,7 +454,10 @@ def tops(opts):
return {}
whitelist = list(opts["master_tops"].keys())
ret = LazyLoader(
_module_dirs(opts, "tops", "top"), opts, tag="top", whitelist=whitelist,
_module_dirs(opts, "tops", "top"),
opts,
tag="top",
whitelist=whitelist,
)
return FilterDictWrapper(ret, ".top")
@ -492,7 +501,11 @@ def serializers(opts):
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only serializers present in the keyspace
"""
return LazyLoader(_module_dirs(opts, "serializers"), opts, tag="serializers",)
return LazyLoader(
_module_dirs(opts, "serializers"),
opts,
tag="serializers",
)
def eauth_tokens(opts):
@ -501,7 +514,11 @@ def eauth_tokens(opts):
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only token backends present in the keyspace
"""
return LazyLoader(_module_dirs(opts, "tokens"), opts, tag="tokens",)
return LazyLoader(
_module_dirs(opts, "tokens"),
opts,
tag="tokens",
)
def auth(opts, whitelist=None):
@ -688,7 +705,12 @@ def render(opts, functions, states=None, proxy=None, context=None):
pack["__proxy__"] = proxy
ret = LazyLoader(
_module_dirs(opts, "renderers", "render", ext_type_dirs="render_dirs",),
_module_dirs(
opts,
"renderers",
"render",
ext_type_dirs="render_dirs",
),
opts,
tag="render",
pack=pack,
@ -722,7 +744,12 @@ def grain_funcs(opts, proxy=None, context=None):
_utils = utils(opts, proxy=proxy)
pack = {"__utils__": utils(opts, proxy=proxy), "__context__": context}
ret = LazyLoader(
_module_dirs(opts, "grains", "grain", ext_type_dirs="grains_dirs",),
_module_dirs(
opts,
"grains",
"grain",
ext_type_dirs="grains_dirs",
),
opts,
tag="grains",
extra_module_dirs=_utils.module_dirs,
@ -1102,7 +1129,11 @@ def netapi(opts):
"""
Return the network api functions
"""
return LazyLoader(_module_dirs(opts, "netapi"), opts, tag="netapi",)
return LazyLoader(
_module_dirs(opts, "netapi"),
opts,
tag="netapi",
)
def executors(opts, functions=None, context=None, proxy=None):

View file

@ -266,7 +266,8 @@ class LazyLoader(salt.utils.lazy.LazyDict):
self.disabled = set(
self.opts.get(
"disable_{}{}".format(self.tag, "" if self.tag[-1] == "s" else "s"), [],
"disable_{}{}".format(self.tag, "" if self.tag[-1] == "s" else "s"),
[],
)
)

View file

@ -551,7 +551,10 @@ def setup_multiprocessing_logging_listener(opts, queue=None):
__MP_MAINPROCESS_ID = os.getpid()
__MP_LOGGING_QUEUE_PROCESS = multiprocessing.Process(
target=__process_multiprocessing_logging_queue,
args=(opts, queue or get_multiprocessing_logging_queue(),),
args=(
opts,
queue or get_multiprocessing_logging_queue(),
),
)
__MP_LOGGING_QUEUE_PROCESS.daemon = True
__MP_LOGGING_QUEUE_PROCESS.start()
@ -901,7 +904,9 @@ def __global_logging_exception_handler(
)
except Exception: # pylint: disable=broad-except
msg = "{}\n{}: {}\n(UNABLE TO FORMAT TRACEBACK)".format(
msg, exc_type.__name__, exc_value,
msg,
exc_type.__name__,
exc_value,
)
try:
_logger.error(msg)

View file

@ -376,7 +376,8 @@ class FileserverUpdate(salt.utils.process.SignalHandlingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
state["opts"], log_queue=state["log_queue"],
state["opts"],
log_queue=state["log_queue"],
)
def __getstate__(self):
@ -508,7 +509,8 @@ class FileserverUpdate(salt.utils.process.SignalHandlingProcess):
for interval in self.buckets:
self.update_threads[interval] = threading.Thread(
target=self.update, args=(interval, self.buckets[interval]),
target=self.update,
args=(interval, self.buckets[interval]),
)
self.update_threads[interval].start()
@ -1169,7 +1171,10 @@ class MWorker(salt.utils.process.SignalHandlingProcess):
)
os.nice(self.opts["mworker_niceness"])
self.clear_funcs = ClearFuncs(self.opts, self.key,)
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
@ -2140,7 +2145,9 @@ class ClearFuncs(TransportMethods):
except Exception as exc: # pylint: disable=broad-except
log.error("Exception occurred while introspecting %s: %s", fun, exc)
data["return"] = "Exception occurred in wheel {}: {}: {}".format(
fun, exc.__class__.__name__, exc,
fun,
exc.__class__.__name__,
exc,
)
data["success"] = False
self.event.fire_event(data, tagify([jid, "ret"], "wheel"))

View file

@ -1042,7 +1042,8 @@ class MinionManager(MinionBase):
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts, io_loop=self.io_loop,
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event(
"minion", opts=self.opts, io_loop=self.io_loop
@ -1550,7 +1551,11 @@ class Minion(MinionBase):
)
else:
functions = salt.loader.minion_mods(
opts, utils=self.utils, notify=notify, proxy=proxy, context=context,
opts,
utils=self.utils,
notify=notify,
proxy=proxy,
context=context,
)
returners = salt.loader.returners(opts, functions, proxy=proxy, context=context)
errors = {}
@ -3006,7 +3011,8 @@ class Minion(MinionBase):
if name in self.periodic_callbacks:
return False
self.periodic_callbacks[name] = salt.ext.tornado.ioloop.PeriodicCallback(
method, interval * 1000,
method,
interval * 1000,
)
self.periodic_callbacks[name].start()
return True
@ -3576,7 +3582,8 @@ class SyndicManager(MinionBase):
# forward events every syndic_event_forward_timeout
self.forward_events = salt.ext.tornado.ioloop.PeriodicCallback(
self._forward_events, self.opts["syndic_event_forward_timeout"] * 1000,
self._forward_events,
self.opts["syndic_event_forward_timeout"] * 1000,
)
self.forward_events.start()
@ -3841,7 +3848,10 @@ class SProxyMinion(SMinion):
self.matchers = salt.loader.matchers(self.opts)
self.functions["sys.reload_modules"] = self.gen_modules
self.executors = salt.loader.executors(
self.opts, functions=self.functions, proxy=self.proxy, context=context,
self.opts,
functions=self.functions,
proxy=self.proxy,
context=context,
)
fq_proxyname = self.opts["proxy"]["proxytype"]

View file

@ -40,29 +40,29 @@ def get_latest_snapshot(
use_literal_group_id=False,
):
"""
Gets latest snapshot of the given artifact
Gets latest snapshot of the given artifact
artifactory_url
URL of artifactory instance
repository
Snapshot repository in artifactory to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-snapshot_version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
"""
artifactory_url
URL of artifactory instance
repository
Snapshot repository in artifactory to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-snapshot_version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION: artifactory.get_latest_snapshot, artifactory_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, target_dir=%s, classifier=%s)",
artifactory_url,
@ -122,31 +122,31 @@ def get_snapshot(
use_literal_group_id=False,
):
"""
Gets snapshot of the desired version of the artifact
Gets snapshot of the desired version of the artifact
artifactory_url
URL of artifactory instance
repository
Snapshot repository in artifactory to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-snapshot_version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
"""
artifactory_url
URL of artifactory instance
repository
Snapshot repository in artifactory to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-snapshot_version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION: artifactory.get_snapshot(artifactory_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, version=%s, target_dir=%s, classifier=%s)",
artifactory_url,
@ -196,29 +196,29 @@ def get_latest_release(
use_literal_group_id=False,
):
"""
Gets the latest release of the artifact
Gets the latest release of the artifact
artifactory_url
URL of artifactory instance
repository
Release repository in artifactory to retrieve artifact from, for example: libs-releases
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
"""
artifactory_url
URL of artifactory instance
repository
Release repository in artifactory to retrieve artifact from, for example: libs-releases
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION: artifactory.get_latest_release(artifactory_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, target_dir=%s, classifier=%s)",
artifactory_url,
@ -273,31 +273,31 @@ def get_release(
use_literal_group_id=False,
):
"""
Gets the specified release of the artifact
Gets the specified release of the artifact
artifactory_url
URL of artifactory instance
repository
Release repository in artifactory to retrieve artifact from, for example: libs-releases
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
"""
artifactory_url
URL of artifactory instance
repository
Release repository in artifactory to retrieve artifact from, for example: libs-releases
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION: artifactory.get_release(artifactory_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, version=%s, target_dir=%s, classifier=%s)",
artifactory_url,

View file

@ -215,7 +215,12 @@ def atrm(*args):
ret = {"jobs": {"removed": opts, "tag": None}}
else:
opts = list(
list(map(str, [i["job"] for i in atq()["jobs"] if str(i["job"]) in args],))
list(
map(
str,
[i["job"] for i in atq()["jobs"] if str(i["job"]) in args],
)
)
)
ret = {"jobs": {"removed": opts, "tag": None}}

View file

@ -230,9 +230,10 @@ def execute(context=None, lens=None, commands=(), load_path=None):
# if command.split fails arg will not be set
if "arg" not in locals():
arg = command
ret["error"] = (
"Invalid formatted command, "
"see debug log for details: {}".format(arg)
ret[
"error"
] = "Invalid formatted command, " "see debug log for details: {}".format(
arg
)
return ret
@ -488,7 +489,7 @@ def ls(path, load_path=None): # pylint: disable=C0103
"""
def _match(path):
""" Internal match function """
"""Internal match function"""
try:
matches = aug.match(salt.utils.stringutils.to_str(path))
except RuntimeError:

View file

@ -179,7 +179,10 @@ def update_employee(emp_id, key=None, value=None, items=None):
xml_items = "<employee>{}</employee>".format(xml_items)
status, result = _query(
action="employees", command=emp_id, data=xml_items, method="POST",
action="employees",
command=emp_id,
data=xml_items,
method="POST",
)
return show_employee(emp_id, ",".join(items.keys()))

View file

@ -188,9 +188,10 @@ def add(name, beacon_data, **kwargs):
if not valid:
ret["result"] = False
ret["comment"] = (
"Beacon {} configuration invalid, "
"not adding.\n{}".format(name, vcomment)
ret[
"comment"
] = "Beacon {} configuration invalid, " "not adding.\n{}".format(
name, vcomment
)
return ret
except KeyError:
@ -286,9 +287,10 @@ def modify(name, beacon_data, **kwargs):
if not valid:
ret["result"] = False
ret["comment"] = (
"Beacon {} configuration invalid, "
"not modifying.\n{}".format(name, vcomment)
ret[
"comment"
] = "Beacon {} configuration invalid, " "not modifying.\n{}".format(
name, vcomment
)
return ret
@ -300,9 +302,10 @@ def modify(name, beacon_data, **kwargs):
if not valid:
ret["result"] = False
ret["comment"] = (
"Beacon {} configuration invalid, "
"not modifying.\n{}".format(name, vcomment)
ret[
"comment"
] = "Beacon {} configuration invalid, " "not modifying.\n{}".format(
name, vcomment
)
return ret

View file

@ -1601,8 +1601,10 @@ def create_virtual(
elif vlans["disabled"]:
payload["vlans-disabled"] = True
except Exception: # pylint: disable=broad-except
return "Error: Unable to Parse vlans dictionary: \n\tvlans={vlans}".format(
vlans=vlans
return (
"Error: Unable to Parse vlans dictionary: \n\tvlans={vlans}".format(
vlans=vlans
)
)
elif vlans == "none":
payload["vlans"] = "none"
@ -1888,8 +1890,10 @@ def modify_virtual(
elif vlans["disabled"]:
payload["vlans-disabled"] = True
except Exception: # pylint: disable=broad-except
return "Error: Unable to Parse vlans dictionary: \n\tvlans={vlans}".format(
vlans=vlans
return (
"Error: Unable to Parse vlans dictionary: \n\tvlans={vlans}".format(
vlans=vlans
)
)
elif vlans == "none":
payload["vlans"] = "none"
@ -1967,7 +1971,11 @@ def delete_virtual(hostname, username, password, name):
def list_monitor(
hostname, username, password, monitor_type, name=None,
hostname,
username,
password,
monitor_type,
name=None,
):
"""
A function to connect to a bigip device and list an existing monitor. If no name is provided than all
@ -2163,7 +2171,11 @@ def delete_monitor(hostname, username, password, monitor_type, name):
def list_profile(
hostname, username, password, profile_type, name=None,
hostname,
username,
password,
profile_type,
name=None,
):
"""
A function to connect to a bigip device and list an existing profile. If no name is provided than all

View file

@ -80,7 +80,12 @@ def __virtual__():
def _list_distributions(
conn, name=None, region=None, key=None, keyid=None, profile=None,
conn,
name=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Private function that returns an iterator over all CloudFront distributions.
@ -185,7 +190,12 @@ def get_distribution(name, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
for _, dist in _list_distributions(
conn, name=name, region=region, key=key, keyid=keyid, profile=profile,
conn,
name=name,
region=region,
key=key,
keyid=keyid,
profile=profile,
):
# _list_distributions should only return the one distribution
# that we want (with the given name).
@ -231,7 +241,11 @@ def export_distributions(region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
for name, distribution in _list_distributions(
conn, region=region, key=key, keyid=keyid, profile=profile,
conn,
region=region,
key=key,
keyid=keyid,
profile=profile,
):
config = distribution["distribution"]["DistributionConfig"]
tags = distribution["tags"]
@ -250,11 +264,21 @@ def export_distributions(region=None, key=None, keyid=None, profile=None):
log.trace("Boto client error: {}", exc)
dumper = __utils__["yaml.get_dumper"]("IndentedSafeOrderedDumper")
return __utils__["yaml.dump"](results, default_flow_style=False, Dumper=dumper,)
return __utils__["yaml.dump"](
results,
default_flow_style=False,
Dumper=dumper,
)
def create_distribution(
name, config, tags=None, region=None, key=None, keyid=None, profile=None,
name,
config,
tags=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Create a CloudFront distribution with the given name, config, and (optionally) tags.
@ -318,7 +342,13 @@ def create_distribution(
def update_distribution(
name, config, tags=None, region=None, key=None, keyid=None, profile=None,
name,
config,
tags=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Update the config (and optionally tags) for the CloudFront distribution with the given name.
@ -372,7 +402,9 @@ def update_distribution(
try:
if "old" in config_diff or "new" in config_diff:
conn.update_distribution(
DistributionConfig=config, Id=current_distribution["Id"], IfMatch=etag,
DistributionConfig=config,
Id=current_distribution["Id"],
IfMatch=etag,
)
if tags:
arn = current_distribution["ARN"]
@ -383,14 +415,16 @@ def update_distribution(
],
}
conn.tag_resource(
Resource=arn, Tags=tags_to_add,
Resource=arn,
Tags=tags_to_add,
)
if "old" in tags_diff:
tags_to_remove = {
"Items": list(tags_diff["old"].keys()),
}
conn.untag_resource(
Resource=arn, TagKeys=tags_to_remove,
Resource=arn,
TagKeys=tags_to_remove,
)
except botocore.exceptions.ClientError as err:
return {"error": __utils__["boto3.get_error"](err)}

View file

@ -67,7 +67,9 @@ def create_pipeline(
r = {}
try:
response = client.create_pipeline(
name=name, uniqueId=unique_id, description=description,
name=name,
uniqueId=unique_id,
description=description,
)
r["result"] = response["pipelineId"]
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
@ -130,7 +132,8 @@ def get_pipeline_definition(
r = {}
try:
r["result"] = client.get_pipeline_definition(
pipelineId=pipeline_id, version=version,
pipelineId=pipeline_id,
version=version,
)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
r["error"] = str(e)
@ -255,5 +258,7 @@ def _get_session(region, key, keyid, profile):
region = "us-east-1"
return boto3.session.Session(
region_name=region, aws_secret_access_key=key, aws_access_key_id=keyid,
region_name=region,
aws_secret_access_key=key,
aws_access_key_id=keyid,
)

View file

@ -252,7 +252,7 @@ def zone_exists(
error_retries=5,
):
"""
Check for the existence of a Route53 hosted zone.
Check for the existence of a Route53 hosted zone.
.. versionadded:: 2015.8.0
@ -262,7 +262,7 @@ def zone_exists(
salt myminion boto_route53.zone_exists example.org
retry_on_errors
retry_on_errors
Continue to query if the zone exists after an error is
raised. The previously used argument `retry_on_rate_limit`
was deprecated for this argument. Users can still use
@ -277,7 +277,6 @@ def zone_exists(
`rate_limit_retries` to ensure backwards compatibility,
but please migrate to using the favored `error_retries`
argument instead.
"""
if region is None:
region = "universal"
@ -546,7 +545,7 @@ def get_record(
error_retries=5,
):
"""
Get a record from a zone.
Get a record from a zone.
CLI Example:
@ -554,7 +553,7 @@ def get_record(
salt myminion boto_route53.get_record test.example.org example.org A
retry_on_errors
retry_on_errors
Continue to query if the zone exists after an error is
raised. The previously used argument `retry_on_rate_limit`
was deprecated for this argument. Users can still use
@ -657,7 +656,7 @@ def add_record(
error_retries=5,
):
"""
Add a record to a zone.
Add a record to a zone.
CLI Example:
@ -665,7 +664,7 @@ def add_record(
salt myminion boto_route53.add_record test.example.org 1.1.1.1 example.org A
retry_on_errors
retry_on_errors
Continue to query if the zone exists after an error is
raised. The previously used argument `retry_on_rate_limit`
was deprecated for this argument. Users can still use
@ -764,15 +763,15 @@ def update_record(
error_retries=5,
):
"""
Modify a record in a zone.
Modify a record in a zone.
CLI Example:
CLI Example:
.. code-block:: bash
.. code-block:: bash
salt myminion boto_route53.modify_record test.example.org 1.1.1.1 example.org A
salt myminion boto_route53.modify_record test.example.org 1.1.1.1 example.org A
retry_on_errors
retry_on_errors
Continue to query if the zone exists after an error is
raised. The previously used argument `retry_on_rate_limit`
was deprecated for this argument. Users can still use
@ -853,15 +852,15 @@ def delete_record(
error_retries=5,
):
"""
Modify a record in a zone.
Modify a record in a zone.
CLI Example:
CLI Example:
.. code-block:: bash
.. code-block:: bash
salt myminion boto_route53.delete_record test.example.org example.org A
salt myminion boto_route53.delete_record test.example.org example.org A
retry_on_errors
retry_on_errors
Continue to query if the zone exists after an error is
raised. The previously used argument `retry_on_rate_limit`
was deprecated for this argument. Users can still use

View file

@ -85,7 +85,12 @@ def __init__(opts): # pylint: disable=unused-argument
def get_object_metadata(
name, extra_args=None, region=None, key=None, keyid=None, profile=None,
name,
extra_args=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Get metadata about an S3 object.
@ -121,7 +126,13 @@ def get_object_metadata(
def upload_file(
source, name, extra_args=None, region=None, key=None, keyid=None, profile=None,
source,
name,
extra_args=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Upload a local file as an S3 object.

View file

@ -118,7 +118,12 @@ def exists(name, region=None, key=None, keyid=None, profile=None):
def create(
name, attributes=None, region=None, key=None, keyid=None, profile=None,
name,
attributes=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Create an SQS queue.
@ -210,7 +215,12 @@ def get_attributes(name, region=None, key=None, keyid=None, profile=None):
def set_attributes(
name, attributes, region=None, key=None, keyid=None, profile=None,
name,
attributes,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Set attributes on an SQS queue.

View file

@ -196,7 +196,12 @@ def __init__(opts):
def check_vpc(
vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None,
vpc_id=None,
vpc_name=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Check whether a VPC with the given name or id exists.
@ -672,7 +677,13 @@ def _get_id(
def get_id(
name=None, cidr=None, tags=None, region=None, key=None, keyid=None, profile=None,
name=None,
cidr=None,
tags=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Given VPC properties, return the VPC id if a match is found.
@ -883,7 +894,12 @@ def delete(
def describe(
vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None,
vpc_id=None,
vpc_name=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Describe a VPC's properties. If no VPC ID/Name is spcified then describe the default VPC.

View file

@ -37,7 +37,9 @@ def _jobs():
Return the currently configured jobs.
"""
response = salt.utils.http.query(
"{}/scheduler/jobs".format(_base_url()), decode_type="json", decode=True,
"{}/scheduler/jobs".format(_base_url()),
decode_type="json",
decode=True,
)
jobs = {}
for job in response["dict"]:
@ -127,6 +129,7 @@ def rm_job(name):
salt chronos-minion-id chronos.rm_job my-job
"""
response = salt.utils.http.query(
"{}/scheduler/job/{}".format(_base_url(), name), method="DELETE",
"{}/scheduler/job/{}".format(_base_url(), name),
method="DELETE",
)
return True

View file

@ -660,8 +660,10 @@ def set_hostname(hostname=None):
raise salt.exceptions.CommandExecutionError("Hostname option must be provided.")
dn = "sys/rack-unit-1/mgmt/if-1"
inconfig = """<mgmtIf dn="sys/rack-unit-1/mgmt/if-1" hostname="{}" ></mgmtIf>""".format(
hostname
inconfig = (
"""<mgmtIf dn="sys/rack-unit-1/mgmt/if-1" hostname="{}" ></mgmtIf>""".format(
hostname
)
)
ret = __proxy__["cimc.set_config_modify"](dn, inconfig, False)

View file

@ -54,8 +54,7 @@ def __virtual__():
def _get_ccp(config=None, config_path=None, saltenv="base"):
"""
"""
""" """
if config_path:
config = __salt__["cp.get_file_str"](config_path, saltenv=saltenv)
if config is False:
@ -257,7 +256,7 @@ def find_lines_w_child(
salt '*' ciscoconfparse.find_lines_w_child config_path=https://bit.ly/2mAdq7z parent_line='line con' child_line='stopbits'
salt '*' ciscoconfparse.find_lines_w_child config_path=https://bit.ly/2uIRxau parent_regex='ge-(.*)' child_regex='unit \d+'
"""
"""
lines = find_objects_w_child(
config=config,
config_path=config_path,
@ -323,7 +322,7 @@ def find_objects_wo_child(
child_regex='stopbits')
for obj in objects:
print(obj.text)
"""
"""
ccp = _get_ccp(config=config, config_path=config_path, saltenv=saltenv)
lines = ccp.find_objects_wo_child(parent_regex, child_regex, ignore_ws=ignore_ws)
return lines

View file

@ -55,12 +55,12 @@ def _cron_id(cron):
def _cron_matched(cron, cmd, identifier=None):
"""Check if:
- we find a cron with same cmd, old state behavior
- but also be smart enough to remove states changed crons where we do
not removed priorly by a cron.absent by matching on the provided
identifier.
We assure retrocompatibility by only checking on identifier if
and only if an identifier was set on the serialized crontab
- we find a cron with same cmd, old state behavior
- but also be smart enough to remove states changed crons where we do
not removed priorly by a cron.absent by matching on the provided
identifier.
We assure retrocompatibility by only checking on identifier if
and only if an identifier was set on the serialized crontab
"""
ret, id_matched = False, None
cid = _cron_id(cron)

View file

@ -29,12 +29,16 @@ def __virtual__():
"""
Only work on Debian and when systemd isn't running
"""
if __grains__["os"] in (
"Debian",
"Raspbian",
"Devuan",
"NILinuxRT",
) and not salt.utils.systemd.booted(__context__):
if (
__grains__["os"]
in (
"Debian",
"Raspbian",
"Devuan",
"NILinuxRT",
)
and not salt.utils.systemd.booted(__context__)
):
return __virtualname__
else:
return (

View file

@ -226,7 +226,10 @@ def createsuperuser(
salt '*' django.createsuperuser <settings_module> user user@example.com
"""
args = ["noinput"]
kwargs = dict(email=email, username=username,)
kwargs = dict(
email=email,
username=username,
)
if database:
kwargs["database"] = database
return command(

View file

@ -810,7 +810,8 @@ def _error_detail(data, item):
)
except TypeError:
msg = "{}: {}".format(
item["errorDetail"]["code"], item["errorDetail"]["message"],
item["errorDetail"]["code"],
item["errorDetail"]["message"],
)
else:
msg = item["errorDetail"]["message"]
@ -902,7 +903,7 @@ def _get_create_kwargs(
client_args = get_client_args(["create_container", "host_config"])
except CommandExecutionError as exc:
log.error(
"docker.create: Error getting client args: '%s'", exc, exc_info=True,
"docker.create: Error getting client args: '%s'", exc, exc_info=True
)
raise CommandExecutionError("Failed to get client args: {}".format(exc))
@ -1472,7 +1473,9 @@ def login(*registries):
username,
)
login_cmd = __salt__["cmd.run_all"](
cmd, python_shell=False, output_loglevel="quiet",
cmd,
python_shell=False,
output_loglevel="quiet",
)
results[registry] = login_cmd["retcode"] == 0
if not results[registry]:
@ -1552,7 +1555,9 @@ def logout(*registries):
cmd.append(registry)
log.debug("Attempting to logout of docker registry '%s'", registry)
logout_cmd = __salt__["cmd.run_all"](
cmd, python_shell=False, output_loglevel="quiet",
cmd,
python_shell=False,
output_loglevel="quiet",
)
results[registry] = logout_cmd["retcode"] == 0
if not results[registry]:

View file

@ -174,7 +174,8 @@ def _chattr_version():
cmd = [tune2fs]
result = __salt__["cmd.run"](cmd, ignore_retcode=True, python_shell=False)
match = re.search(
r"tune2fs (?P<version>[0-9\.]+)", salt.utils.stringutils.to_str(result),
r"tune2fs (?P<version>[0-9\.]+)",
salt.utils.stringutils.to_str(result),
)
if match is None:
version = None
@ -587,7 +588,8 @@ def _cmp_attrs(path, attrs):
new.add("e")
return AttrChanges(
added="".join(new - old) or None, removed="".join(old - new) or None,
added="".join(new - old) or None,
removed="".join(old - new) or None,
)
@ -5072,11 +5074,15 @@ def check_perms(
else:
if diff_attrs.added:
chattr(
name, operator="add", attributes=diff_attrs.added,
name,
operator="add",
attributes=diff_attrs.added,
)
if diff_attrs.removed:
chattr(
name, operator="remove", attributes=diff_attrs.removed,
name,
operator="remove",
attributes=diff_attrs.removed,
)
cmp_attrs = _cmp_attrs(name, attrs)
if any(attr for attr in cmp_attrs):
@ -5397,10 +5403,14 @@ def check_managed_changes(
__clean_tmp(sfn)
return False, comments
if sfn and source and keep_mode:
if urllib.parse.urlparse(source).scheme in (
"salt",
"file",
) or source.startswith("/"):
if (
urllib.parse.urlparse(source).scheme
in (
"salt",
"file",
)
or source.startswith("/")
):
try:
mode = __salt__["cp.stat_file"](source, saltenv=saltenv, octal=True)
except Exception as exc: # pylint: disable=broad-except

View file

@ -117,8 +117,10 @@ def route_create(
packet to instance "instance-1"(if packet is intended to other network)
"""
credentials = oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name(
credential_file
credentials = (
oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name(
credential_file
)
)
service = googleapiclient.discovery.build("compute", "v1", credentials=credentials)
routes = service.routes()

View file

@ -180,7 +180,10 @@ def bootstrap(
if platform in ("rpm", "yum"):
_bootstrap_yum(
root, pkgs=pkgs, exclude_pkgs=exclude_pkgs, epel_url=epel_url,
root,
pkgs=pkgs,
exclude_pkgs=exclude_pkgs,
epel_url=epel_url,
)
elif platform == "deb":
_bootstrap_deb(
@ -194,7 +197,10 @@ def bootstrap(
)
elif platform == "pacman":
_bootstrap_pacman(
root, img_format=img_format, pkgs=pkgs, exclude_pkgs=exclude_pkgs,
root,
img_format=img_format,
pkgs=pkgs,
exclude_pkgs=exclude_pkgs,
)
if img_format != "dir":
@ -285,7 +291,11 @@ def _populate_cache(platform, pkg_cache, mount_dir):
def _bootstrap_yum(
root, pkg_confs="/etc/yum*", pkgs=None, exclude_pkgs=None, epel_url=EPEL_URL,
root,
pkg_confs="/etc/yum*",
pkgs=None,
exclude_pkgs=None,
epel_url=EPEL_URL,
):
"""
Bootstrap an image using the yum tools
@ -364,7 +374,13 @@ def _bootstrap_yum(
def _bootstrap_deb(
root, arch, flavor, repo_url=None, static_qemu=None, pkgs=None, exclude_pkgs=None,
root,
arch,
flavor,
repo_url=None,
static_qemu=None,
pkgs=None,
exclude_pkgs=None,
):
"""
Bootstrap an image using the Debian tools
@ -452,7 +468,11 @@ def _bootstrap_deb(
def _bootstrap_pacman(
root, pkg_confs="/etc/pacman*", img_format="dir", pkgs=None, exclude_pkgs=None,
root,
pkg_confs="/etc/pacman*",
img_format="dir",
pkgs=None,
exclude_pkgs=None,
):
"""
Bootstrap an image using the pacman tools
@ -618,7 +638,10 @@ def _tar(name, root, path=None, compress="bzip2"):
tarfile = "{}/{}.tar.{}".format(path, name, ext)
out = __salt__["archive.tar"](
options="{}pcf".format(compression), tarfile=tarfile, sources=".", dest=root,
options="{}pcf".format(compression),
tarfile=tarfile,
sources=".",
dest=root,
)
@ -642,7 +665,9 @@ def _untar(name, dest=None, path=None, compress="bz2"):
tarfile = "{}/{}.tar.{}".format(path, name, ext)
out = __salt__["archive.tar"](
options="{}xf".format(compression), tarfile=tarfile, dest=dest,
options="{}xf".format(compression),
tarfile=tarfile,
dest=dest,
)

View file

@ -68,7 +68,10 @@ def _gluster_xml(cmd):
# We will pass the command string as stdin to allow for much longer
# command strings. This is especially useful for creating large volumes
# where the list of bricks exceeds 128 characters.
if _get_version() < (3, 6,):
if _get_version() < (
3,
6,
):
result = __salt__["cmd.run"](
'script -q -c "gluster --xml --mode=script"', stdin="{}\n\004".format(cmd)
)
@ -767,7 +770,10 @@ def get_max_op_version():
salt '*' glusterfs.get_max_op_version
"""
if _get_version() < (3, 10,):
if _get_version() < (
3,
10,
):
return (
False,
"Glusterfs version must be 3.10+. Your version is {}.".format(

View file

@ -28,16 +28,16 @@ def __virtual__():
def _hadoop_cmd(module, command, *args):
"""
Hadoop/hdfs command wrapper
Hadoop/hdfs command wrapper
As Hadoop command has been deprecated this module will default
to use hdfs command and fall back to hadoop if it is not found
As Hadoop command has been deprecated this module will default
to use hdfs command and fall back to hadoop if it is not found
In order to prevent random execution the module name is checked
In order to prevent random execution the module name is checked
Follows hadoop command template:
hadoop module -command args
E.g.: hadoop dfs -ls /
Follows hadoop command template:
hadoop module -command args
E.g.: hadoop dfs -ls /
"""
tool = "hadoop"
if salt.utils.path.which("hdfs"):

View file

@ -58,7 +58,13 @@ def _render_tab(lst):
for pre in lst["pre"]:
ret.append("{}\n".format(pre))
for cron in lst["crons"]:
ret.append("{} {} {}\n".format(cron["path"], cron["mask"], cron["cmd"],))
ret.append(
"{} {} {}\n".format(
cron["path"],
cron["mask"],
cron["cmd"],
)
)
return ret

View file

@ -637,7 +637,7 @@ def create_continuous_query(
.. code-block:: bash
salt '*' influxdb.create_continuous_query mydb cq_month 'SELECT mean(*) INTO mydb.a_month.:MEASUREMENT FROM mydb.a_week./.*/ GROUP BY time(5m), *' """
salt '*' influxdb.create_continuous_query mydb cq_month 'SELECT mean(*) INTO mydb.a_month.:MEASUREMENT FROM mydb.a_week./.*/ GROUP BY time(5m), *'"""
client = _client(**client_args)
full_query = "CREATE CONTINUOUS QUERY {name} ON {database}"
if resample_time:

View file

@ -257,7 +257,10 @@ class Inspector(EnvLoader):
for p_type, p_list in (
("f", files),
("d", directories),
("l", links,),
(
"l",
links,
),
):
for p_obj in p_list:
stats = os.stat(p_obj)
@ -415,7 +418,12 @@ class Inspector(EnvLoader):
all_links.extend(e_links)
return self._get_unmanaged_files(
self._get_managed_files(), (all_files, all_dirs, all_links,)
self._get_managed_files(),
(
all_files,
all_dirs,
all_links,
),
)
def _prepare_full_scan(self, **kwargs):

View file

@ -52,7 +52,7 @@ def status(jboss_config, host=None, server_config=None):
salt '*' jboss7.status '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}'
"""
"""
log.debug("======================== MODULE FUNCTION: jboss7.status")
if host is None and server_config is None:
operation = ":read-attribute(name=server-state)"
@ -85,7 +85,7 @@ def stop_server(jboss_config, host=None):
salt '*' jboss7.stop_server '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}'
"""
"""
log.debug("======================== MODULE FUNCTION: jboss7.stop_server")
if host is None:
operation = ":shutdown"
@ -124,7 +124,7 @@ def reload_(jboss_config, host=None):
salt '*' jboss7.reload '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}'
"""
"""
log.debug("======================== MODULE FUNCTION: jboss7.reload")
if host is None:
operation = ":reload"
@ -306,8 +306,10 @@ def __get_datasource_resource_description(jboss_config, name, profile=None):
profile,
)
operation = '/subsystem=datasources/data-source="{name}":read-resource-description'.format(
name=name
operation = (
'/subsystem=datasources/data-source="{name}":read-resource-description'.format(
name=name
)
)
if profile is not None:
operation = '/profile="{profile}"'.format(profile=profile) + operation
@ -332,7 +334,7 @@ def read_datasource(jboss_config, name, profile=None):
.. code-block:: bash
salt '*' jboss7.read_datasource '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}'
"""
"""
log.debug(
"======================== MODULE FUNCTION: jboss7.read_datasource, name=%s",
name,
@ -394,7 +396,7 @@ def update_simple_binding(jboss_config, binding_name, value, profile=None):
.. code-block:: bash
salt '*' jboss7.update_simple_binding '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_binding_name my_binding_value
"""
"""
log.debug(
"======================== MODULE FUNCTION: jboss7.update_simple_binding, binding_name=%s, value=%s, profile=%s",
binding_name,
@ -425,7 +427,7 @@ def read_simple_binding(jboss_config, binding_name, profile=None):
.. code-block:: bash
salt '*' jboss7.read_simple_binding '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_binding_name
"""
"""
log.debug(
"======================== MODULE FUNCTION: jboss7.read_simple_binding, %s",
binding_name,
@ -502,7 +504,7 @@ def remove_datasource(jboss_config, name, profile=None):
.. code-block:: bash
salt '*' jboss7.remove_datasource '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_datasource_name
"""
"""
log.debug(
"======================== MODULE FUNCTION: jboss7.remove_datasource, name=%s, profile=%s",
name,
@ -532,7 +534,7 @@ def deploy(jboss_config, source_file):
.. code-block:: bash
salt '*' jboss7.deploy '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' /opt/deploy_files/my_deploy
"""
"""
log.debug(
"======================== MODULE FUNCTION: jboss7.deploy, source_file=%s",
source_file,
@ -556,7 +558,7 @@ def list_deployments(jboss_config):
salt '*' jboss7.list_deployments '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}'
"""
"""
log.debug("======================== MODULE FUNCTION: jboss7.list_deployments")
command_result = __salt__["jboss7_cli.run_command"](jboss_config, "deploy")
deployments = []
@ -580,7 +582,7 @@ def undeploy(jboss_config, deployment):
.. code-block:: bash
salt '*' jboss7.undeploy '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_deployment
"""
"""
log.debug(
"======================== MODULE FUNCTION: jboss7.undeploy, deployment=%s",
deployment,

View file

@ -1909,9 +1909,10 @@ def get_table(
ret["table"][table]["args"] = args
ret["table"][table]["command"] = data.GET_CMD
except ConnectClosedError:
ret["message"] = (
"Got ConnectClosedError exception. Connection lost "
"with {}".format(str(conn))
ret[
"message"
] = "Got ConnectClosedError exception. Connection lost " "with {}".format(
str(conn)
)
ret["out"] = False
_restart_connection()

View file

@ -63,7 +63,7 @@ def _guess_apiserver(apiserver_url=None):
def _kpost(url, data):
""" create any object in kubernetes based on URL """
"""create any object in kubernetes based on URL"""
# Prepare headers
headers = {"Content-Type": "application/json"}
@ -80,7 +80,7 @@ def _kpost(url, data):
def _kput(url, data):
""" put any object in kubernetes based on URL """
"""put any object in kubernetes based on URL"""
# Prepare headers
headers = {"Content-Type": "application/json"}
@ -96,7 +96,7 @@ def _kput(url, data):
def _kpatch(url, data):
""" patch any object in kubernetes based on URL """
"""patch any object in kubernetes based on URL"""
# Prepare headers
headers = {"Content-Type": "application/json-patch+json"}
@ -126,8 +126,8 @@ def _kname(obj):
def _is_dns_subdomain(name):
""" Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123
labels separated by '.' with a maximum length of 253 characters """
"""Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123
labels separated by '.' with a maximum length of 253 characters"""
dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""")
if dns_subdomain.match(name):
@ -139,10 +139,10 @@ def _is_dns_subdomain(name):
def _is_port_name(name):
""" Check that name is IANA service: An alphanumeric (a-z, and 0-9) string,
"""Check that name is IANA service: An alphanumeric (a-z, and 0-9) string,
with a maximum length of 15 characters, with the '-' character allowed
anywhere except the first or the last character or adjacent to another '-'
character, it must contain at least a (a-z) character """
character, it must contain at least a (a-z) character"""
port_name = re.compile("""^[a-z0-9]{1,15}$""")
if port_name.match(name):
@ -152,10 +152,10 @@ def _is_port_name(name):
def _is_dns_label(name):
""" Check that name is DNS label: An alphanumeric (a-z, and 0-9) string,
"""Check that name is DNS label: An alphanumeric (a-z, and 0-9) string,
with a maximum length of 63 characters, with the '-' character allowed
anywhere except the first or last character, suitable for use as a hostname
or segment in a domain name """
or segment in a domain name"""
dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""")
if dns_label.match(name):
@ -396,7 +396,7 @@ def _get_namespaces(apiserver_url, name=""):
def _create_namespace(namespace, apiserver_url):
""" create namespace on the defined k8s cluster """
"""create namespace on the defined k8s cluster"""
# Prepare URL
url = "{}/api/v1/namespaces".format(apiserver_url)
# Prepare data
@ -507,7 +507,7 @@ def _update_secret(namespace, name, data, apiserver_url):
def _create_secret(namespace, name, data, apiserver_url):
""" create namespace on the defined k8s cluster """
"""create namespace on the defined k8s cluster"""
# Prepare URL
url = "{}/api/v1/namespaces/{}/secrets".format(apiserver_url, namespace)
# Prepare data

View file

@ -170,7 +170,10 @@ def _hex_to_octets(addr):
Convert hex fields from /proc/net/route to octects
"""
return "{}:{}:{}:{}".format(
int(addr[6:8], 16), int(addr[4:6], 16), int(addr[2:4], 16), int(addr[0:2], 16),
int(addr[6:8], 16),
int(addr[4:6], 16),
int(addr[2:4], 16),
int(addr[0:2], 16),
)

View file

@ -295,7 +295,10 @@ def rotate(name, pattern=None, conf_file=default_conf, **kwargs):
command = "logadm -f {}".format(conf_file)
for arg, val in kwargs.items():
if arg in option_toggles.values() and val:
command = "{} {}".format(command, _arg2opt(arg),)
command = "{} {}".format(
command,
_arg2opt(arg),
)
elif arg in option_flags.values():
command = "{} {} {}".format(command, _arg2opt(arg), _quote_args(str(val)))
elif arg != "log_file":

View file

@ -1630,17 +1630,20 @@ def init(
run(name, "rm -f '{}'".format(SEED_MARKER), path=path, python_shell=False)
gid = "/.lxc.initial_seed"
gids = [gid, "/lxc.initial_seed"]
if any(
retcode(
name,
"test -e {}".format(x),
path=path,
chroot_fallback=True,
ignore_retcode=True,
if (
any(
retcode(
name,
"test -e {}".format(x),
path=path,
chroot_fallback=True,
ignore_retcode=True,
)
== 0
for x in gids
)
== 0
for x in gids
) or not ret.get("result", True):
or not ret.get("result", True)
):
pass
elif seed or seed_cmd:
if seed:
@ -4373,7 +4376,10 @@ def write_conf(conf_file, conf):
elif isinstance(line, dict):
for key in list(line.keys()):
out_line = None
if isinstance(line[key], (str, (str,), (int,), float),):
if isinstance(
line[key],
(str, (str,), (int,), float),
):
out_line = " = ".join((key, "{}".format(line[key])))
elif isinstance(line[key], dict):
out_line = " = ".join((key, line[key]["value"]))

File diff suppressed because it is too large Load diff

View file

@ -64,15 +64,12 @@ def install(app_id, enable=True):
)
client_type = _client_type(app_id)
enable_str = "1" if enable else "0"
cmd = (
'sqlite3 "/Library/Application Support/com.apple.TCC/TCC.db" '
"\"INSERT or REPLACE INTO access VALUES('kTCCServiceAccessibility','{}',{},{},1,NULL{}{})\"".format(
app_id,
client_type,
enable_str,
",NULL" if ge_el_capitan else "",
",NULL,NULL,NULL,NULL,''" if ge_mojave else "",
)
cmd = 'sqlite3 "/Library/Application Support/com.apple.TCC/TCC.db" ' "\"INSERT or REPLACE INTO access VALUES('kTCCServiceAccessibility','{}',{},{},1,NULL{}{})\"".format(
app_id,
client_type,
enable_str,
",NULL" if ge_el_capitan else "",
",NULL,NULL,NULL,NULL,''" if ge_mojave else "",
)
call = __salt__["cmd.run_all"](cmd, output_loglevel="debug", python_shell=False)

View file

@ -120,7 +120,12 @@ def set_sleep(minutes):
state = []
for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep):
state.append(salt.utils.mac_utils.confirm_updated(value, check,))
state.append(
salt.utils.mac_utils.confirm_updated(
value,
check,
)
)
return all(state)
@ -163,7 +168,10 @@ def set_computer_sleep(minutes):
cmd = "systemsetup -setcomputersleep {}".format(value)
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.confirm_updated(str(value), get_computer_sleep,)
return salt.utils.mac_utils.confirm_updated(
str(value),
get_computer_sleep,
)
def get_display_sleep():
@ -205,7 +213,10 @@ def set_display_sleep(minutes):
cmd = "systemsetup -setdisplaysleep {}".format(value)
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.confirm_updated(str(value), get_display_sleep,)
return salt.utils.mac_utils.confirm_updated(
str(value),
get_display_sleep,
)
def get_harddisk_sleep():
@ -247,7 +258,10 @@ def set_harddisk_sleep(minutes):
cmd = "systemsetup -setharddisksleep {}".format(value)
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.confirm_updated(str(value), get_harddisk_sleep,)
return salt.utils.mac_utils.confirm_updated(
str(value),
get_harddisk_sleep,
)
def get_wake_on_modem():
@ -292,7 +306,10 @@ def set_wake_on_modem(enabled):
cmd = "systemsetup -setwakeonmodem {}".format(state)
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.confirm_updated(state, get_wake_on_modem,)
return salt.utils.mac_utils.confirm_updated(
state,
get_wake_on_modem,
)
def get_wake_on_network():
@ -339,7 +356,10 @@ def set_wake_on_network(enabled):
cmd = "systemsetup -setwakeonnetworkaccess {}".format(state)
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.confirm_updated(state, get_wake_on_network,)
return salt.utils.mac_utils.confirm_updated(
state,
get_wake_on_network,
)
def get_restart_power_failure():
@ -386,7 +406,10 @@ def set_restart_power_failure(enabled):
cmd = "systemsetup -setrestartpowerfailure {}".format(state)
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.confirm_updated(state, get_restart_power_failure,)
return salt.utils.mac_utils.confirm_updated(
state,
get_restart_power_failure,
)
def get_restart_freeze():
@ -482,4 +505,7 @@ def set_sleep_on_power_button(enabled):
cmd = "systemsetup -setallowpowerbuttontosleepcomputer {}".format(state)
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.confirm_updated(state, get_sleep_on_power_button,)
return salt.utils.mac_utils.confirm_updated(
state,
get_sleep_on_power_button,
)

Some files were not shown because too many files have changed in this diff Show more