x86_64: salt-{release}-py3-x86_64.pkg | md5
-""".format(release=stripped_release) +""".format( + release=stripped_release +) # A shortcut for linking to tickets on the GitHub issue tracker extlinks = { - 'blob': ('https://github.com/saltstack/salt/blob/%s/%%s' % repo_primary_branch, None), - 'issue': ('https://github.com/saltstack/salt/issues/%s', 'issue #'), - 'pull': ('https://github.com/saltstack/salt/pull/%s', 'PR #'), - 'formula_url': ('https://github.com/saltstack-formulas/%s', ''), + "blob": ( + "https://github.com/saltstack/salt/blob/%s/%%s" % repo_primary_branch, + None, + ), + "issue": ("https://github.com/saltstack/salt/issues/%s", "issue #"), + "pull": ("https://github.com/saltstack/salt/pull/%s", "PR #"), + "formula_url": ("https://github.com/saltstack-formulas/%s", ""), } # ----- Localization --------------------------------------------------------> -locale_dirs = ['locale/'] +locale_dirs = ["locale/"] gettext_compact = False # <---- Localization --------------------------------------------------------- ### HTML options # set 'HTML_THEME=saltstack' to use previous theme -html_theme = os.environ.get('HTML_THEME', 'saltstack2') -html_theme_path = ['_themes'] -html_title = u'' -html_short_title = 'Salt' +html_theme = os.environ.get("HTML_THEME", "saltstack2") +html_theme_path = ["_themes"] +html_title = u"" +html_short_title = "Salt" -html_static_path = ['_static'] -html_logo = None # specified in the theme layout.html -html_favicon = 'favicon.ico' +html_static_path = ["_static"] +html_logo = None # specified in the theme layout.html +html_favicon = "favicon.ico" smartquotes = False # Use Google customized search or use Sphinx built-in JavaScript search if on_saltstack: - html_search_template = 'googlesearch.html' + html_search_template = "googlesearch.html" else: - html_search_template = 'searchbox.html' + html_search_template = "searchbox.html" html_additional_pages = { - '404': '404.html', + "404": "404.html", } html_default_sidebars = [ html_search_template, - 'version.html', - 'localtoc.html', - 'relations.html', - 'sourcelink.html', - 'saltstack.html', + "version.html", + "localtoc.html", + "relations.html", + "sourcelink.html", + "saltstack.html", ] html_sidebars = { - 'ref/**/all/salt.*': [ + "ref/**/all/salt.*": [ html_search_template, - 'version.html', - 'modules-sidebar.html', - 'localtoc.html', - 'relations.html', - 'sourcelink.html', - 'saltstack.html', - ], - 'ref/formula/all/*': [ + "version.html", + "modules-sidebar.html", + "localtoc.html", + "relations.html", + "sourcelink.html", + "saltstack.html", ], + "ref/formula/all/*": [], } html_context = { - 'on_saltstack': on_saltstack, - 'html_default_sidebars': html_default_sidebars, - 'github_base': 'https://github.com/saltstack/salt', - 'github_issues': 'https://github.com/saltstack/salt/issues', - 'github_downloads': 'https://github.com/saltstack/salt/downloads', - 'latest_release': latest_release, - 'previous_release': previous_release, - 'previous_release_dir': previous_release_dir, - 'next_release': next_release, - 'next_release_dir': next_release_dir, - 'search_cx': search_cx, - 'build_type': build_type, - 'today': today, - 'copyright': copyright, - 'repo_primary_branch': repo_primary_branch + "on_saltstack": on_saltstack, + "html_default_sidebars": html_default_sidebars, + "github_base": "https://github.com/saltstack/salt", + "github_issues": "https://github.com/saltstack/salt/issues", + "github_downloads": "https://github.com/saltstack/salt/downloads", + "latest_release": latest_release, + "previous_release": previous_release, + "previous_release_dir": previous_release_dir, + "next_release": next_release, + "next_release_dir": next_release_dir, + "search_cx": search_cx, + "build_type": build_type, + "today": today, + "copyright": copyright, + "repo_primary_branch": repo_primary_branch, } html_use_index = True -html_last_updated_fmt = '%b %d, %Y' +html_last_updated_fmt = "%b %d, %Y" html_show_sourcelink = False html_show_sphinx = True html_show_copyright = True @@ -462,20 +472,20 @@ html_show_copyright = True ### Latex options latex_documents = [ - ('contents', 'Salt.tex', 'Salt Documentation', 'SaltStack, Inc.', 'manual'), + ("contents", "Salt.tex", "Salt Documentation", "SaltStack, Inc.", "manual"), ] -latex_logo = '_static/salt-logo.png' +latex_logo = "_static/salt-logo.png" latex_elements = { - 'inputenc': '', # use XeTeX instead of the inputenc LaTeX package. - 'utf8extra': '', - 'preamble': r''' + "inputenc": "", # use XeTeX instead of the inputenc LaTeX package. + "utf8extra": "", + "preamble": r""" \usepackage{fontspec} \setsansfont{Linux Biolinum O} \setromanfont{Linux Libertine O} \setmonofont{Source Code Pro} -''', +""", } ### Linux Biolinum, Linux Libertine: http://www.linuxlibertine.org/ ### Source Code Pro: https://github.com/adobe-fonts/source-code-pro/releases @@ -483,34 +493,34 @@ latex_elements = { ### Linkcheck options linkcheck_ignore = [ - r'http://127.0.0.1', - r'http://salt:\d+', - r'http://local:\d+', - r'https://console.aws.amazon.com', - r'http://192.168.33.10', - r'http://domain:\d+', - r'http://123.456.789.012:\d+', - r'http://localhost', - r'https://groups.google.com/forum/#!forum/salt-users', - r'http://logstash.net/docs/latest/inputs/udp', - r'http://logstash.net/docs/latest/inputs/zeromq', - r'http://www.youtube.com/saltstack', - r'https://raven.readthedocs.io', - r'https://getsentry.com', - r'https://salt-cloud.readthedocs.io', - r'https://salt.readthedocs.io', - r'http://www.pip-installer.org/', - r'http://www.windowsazure.com/', - r'https://github.com/watching', - r'dash-feed://', - r'https://github.com/saltstack/salt/', - r'http://bootstrap.saltstack.org', - r'https://bootstrap.saltstack.com', - r'https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh', - r'media.readthedocs.org/dash/salt/latest/salt.xml', - r'https://portal.aws.amazon.com/gp/aws/securityCredentials', - r'https://help.github.com/articles/fork-a-repo', - r'dash-feed://https%3A//media.readthedocs.org/dash/salt/latest/salt.xml', + r"http://127.0.0.1", + r"http://salt:\d+", + r"http://local:\d+", + r"https://console.aws.amazon.com", + r"http://192.168.33.10", + r"http://domain:\d+", + r"http://123.456.789.012:\d+", + r"http://localhost", + r"https://groups.google.com/forum/#!forum/salt-users", + r"http://logstash.net/docs/latest/inputs/udp", + r"http://logstash.net/docs/latest/inputs/zeromq", + r"http://www.youtube.com/saltstack", + r"https://raven.readthedocs.io", + r"https://getsentry.com", + r"https://salt-cloud.readthedocs.io", + r"https://salt.readthedocs.io", + r"http://www.pip-installer.org/", + r"http://www.windowsazure.com/", + r"https://github.com/watching", + r"dash-feed://", + r"https://github.com/saltstack/salt/", + r"http://bootstrap.saltstack.org", + r"https://bootstrap.saltstack.com", + r"https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh", + r"media.readthedocs.org/dash/salt/latest/salt.xml", + r"https://portal.aws.amazon.com/gp/aws/securityCredentials", + r"https://help.github.com/articles/fork-a-repo", + r"dash-feed://https%3A//media.readthedocs.org/dash/salt/latest/salt.xml", ] linkcheck_anchors = False @@ -519,53 +529,53 @@ linkcheck_anchors = False # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). authors = [ - 'Thomas S. Hatch[^ ]+)(?: (?P.*))?$', line)
- source, code, line = search.group('source'), search.group('code'), search.group('line')
+ log.debug("Received: %s", line)
+ search = re.match(
+ "^(?:(?P:[^ ]+) )?(?P[^ ]+)(?: (?P.*))?$", line
+ )
+ source, code, line = (
+ search.group("source"),
+ search.group("code"),
+ search.group("line"),
+ )
return Event(source, code, line)
def _allow_host(self, host):
@@ -132,82 +153,113 @@ class IRCClient(object):
return any([re.match(match, nick) for match in self.allow_nicks])
def _privmsg(self, event):
- search = re.match('^:(?P[^!]+)!(?P[^@]+)@(?P.*)$', event.source)
- nick, user, host = search.group('nick'), search.group('user'), search.group('host')
- search = re.match('^(?P[^ ]+) :(?:{0}(?P[^ ]+)(?: (?P.*))?)?$'.format(self.char), event.line)
+ search = re.match(
+ "^:(?P[^!]+)!(?P[^@]+)@(?P.*)$", event.source
+ )
+ nick, user, host = (
+ search.group("nick"),
+ search.group("user"),
+ search.group("host"),
+ )
+ search = re.match(
+ "^(?P[^ ]+) :(?:{0}(?P[^ ]+)(?: (?P.*))?)?$".format(
+ self.char
+ ),
+ event.line,
+ )
if search:
- channel, command, line = search.group('channel'), search.group('command'), search.group('line')
- if self.disable_query is True and not channel.startswith('#'):
+ channel, command, line = (
+ search.group("channel"),
+ search.group("command"),
+ search.group("line"),
+ )
+ if self.disable_query is True and not channel.startswith("#"):
return
if channel == self.nick:
channel = nick
- privevent = PrivEvent(event.source, nick, user, host, event.code, channel, command, line)
- if (self._allow_nick(nick) or self._allow_host(host)) and hasattr(self, '_command_{0}'.format(command)):
- getattr(self, '_command_{0}'.format(command))(privevent)
+ privevent = PrivEvent(
+ event.source, nick, user, host, event.code, channel, command, line
+ )
+ if (self._allow_nick(nick) or self._allow_host(host)) and hasattr(
+ self, "_command_{0}".format(command)
+ ):
+ getattr(self, "_command_{0}".format(command))(privevent)
def _command_echo(self, event):
- message = 'PRIVMSG {0} :{1}'.format(event.channel, event.line)
+ message = "PRIVMSG {0} :{1}".format(event.channel, event.line)
self.send_message(message)
def _command_ping(self, event):
- message = 'PRIVMSG {0} :{1}: pong'.format(event.channel, event.nick)
+ message = "PRIVMSG {0} :{1}: pong".format(event.channel, event.nick)
self.send_message(message)
def _command_event(self, event):
- if __opts__.get('__role') == 'master':
- fire_master = salt.utils.event.get_master_event(__opts__, __opts__['sock_dir']).fire_event
+ if __opts__.get("__role") == "master":
+ fire_master = salt.utils.event.get_master_event(
+ __opts__, __opts__["sock_dir"]
+ ).fire_event
else:
fire_master = None
def fire(tag, msg):
- '''
+ """
How to fire the event
- '''
+ """
if fire_master:
fire_master(msg, tag)
else:
- __salt__['event.send'](tag, msg)
+ __salt__["event.send"](tag, msg)
- args = event.line.split(' ')
+ args = event.line.split(" ")
tag = args[0]
if len(args) > 1:
- payload = {'data': args[1:]}
+ payload = {"data": args[1:]}
else:
- payload = {'data': []}
+ payload = {"data": []}
- fire('salt/engines/ircbot/' + tag, payload)
- message = 'PRIVMSG {0} :{1}: TaDa!'.format(event.channel, event.nick)
+ fire("salt/engines/ircbot/" + tag, payload)
+ message = "PRIVMSG {0} :{1}: TaDa!".format(event.channel, event.nick)
self.send_message(message)
def _message(self, raw):
- raw = raw.rstrip(b'\r\n').decode('utf-8')
+ raw = raw.rstrip(b"\r\n").decode("utf-8")
event = self._event(raw)
if event.code == "PING":
- salt.ext.tornado.ioloop.IOLoop.current().spawn_callback(self.send_message, "PONG {0}".format(event.line))
- elif event.code == 'PRIVMSG':
- salt.ext.tornado.ioloop.IOLoop.current().spawn_callback(self._privmsg, event)
+ salt.ext.tornado.ioloop.IOLoop.current().spawn_callback(
+ self.send_message, "PONG {0}".format(event.line)
+ )
+ elif event.code == "PRIVMSG":
+ salt.ext.tornado.ioloop.IOLoop.current().spawn_callback(
+ self._privmsg, event
+ )
self.read_messages()
def join_channel(self, channel):
- if not channel.startswith('#'):
- channel = '#' + channel
- self.send_message('JOIN {0}'.format(channel))
+ if not channel.startswith("#"):
+ channel = "#" + channel
+ self.send_message("JOIN {0}".format(channel))
def on_connect(self):
logging.info("on_connect")
if self.sasl is True:
- self.send_message('CAP REQ :sasl')
- self.send_message('NICK {0}'.format(self.nick))
- self.send_message('USER saltstack 0 * :saltstack')
+ self.send_message("CAP REQ :sasl")
+ self.send_message("NICK {0}".format(self.nick))
+ self.send_message("USER saltstack 0 * :saltstack")
if self.password:
if self.sasl is True:
- authstring = base64.b64encode("{0}\x00{0}\x00{1}".format(self.username, self.password).encode())
- self.send_message('AUTHENTICATE PLAIN')
- self.send_message('AUTHENTICATE {0}'.format(authstring))
- self.send_message('CAP END')
+ authstring = base64.b64encode(
+ "{0}\x00{0}\x00{1}".format(self.username, self.password).encode()
+ )
+ self.send_message("AUTHENTICATE PLAIN")
+ self.send_message("AUTHENTICATE {0}".format(authstring))
+ self.send_message("CAP END")
else:
- self.send_message('PRIVMSG NickServ :IDENTIFY {0} {1}'.format(self.username, self.password))
+ self.send_message(
+ "PRIVMSG NickServ :IDENTIFY {0} {1}".format(
+ self.username, self.password
+ )
+ )
for channel in self.channels:
self.join_channel(channel)
self.read_messages()
@@ -217,14 +269,26 @@ class IRCClient(object):
def send_message(self, line):
if isinstance(line, six.string_types):
- line = line.encode('utf-8')
+ line = line.encode("utf-8")
log.debug("Sending: %s", line)
- self._stream.write(line + b'\r\n')
+ self._stream.write(line + b"\r\n")
-def start(nick, host, port=6667, username=None, password=None, channels=None, use_ssl=False, use_sasl=False,
- char='!', allow_hosts=False, allow_nicks=False, disable_query=True):
- '''
+def start(
+ nick,
+ host,
+ port=6667,
+ username=None,
+ password=None,
+ channels=None,
+ use_ssl=False,
+ use_sasl=False,
+ char="!",
+ allow_hosts=False,
+ allow_nicks=False,
+ disable_query=True,
+):
+ """
IRC Bot for interacting with salt.
nick
@@ -279,7 +343,19 @@ def start(nick, host, port=6667, username=None, password=None, channels=None, us
/mode +r # do not allow unauthenticated users into the channel
It would also be possible to add a password to the irc channel, or only allow invited users to join.
- '''
- client = IRCClient(nick, host, port, username, password, channels or [], use_ssl, use_sasl, char,
- allow_hosts, allow_nicks, disable_query)
+ """
+ client = IRCClient(
+ nick,
+ host,
+ port,
+ username,
+ password,
+ channels or [],
+ use_ssl,
+ use_sasl,
+ char,
+ allow_hosts,
+ allow_nicks,
+ disable_query,
+ )
client.io_loop.start()
diff --git a/salt/engines/junos_syslog.py b/salt/engines/junos_syslog.py
index 8de346c6409..a55b2b8dac7 100644
--- a/salt/engines/junos_syslog.py
+++ b/salt/engines/junos_syslog.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Junos Syslog Engine
==========================
@@ -84,18 +84,36 @@ Below is a sample syslog event which is received from the junos device:
The source for parsing the syslog messages is taken from:
https://gist.github.com/leandrosilva/3651640#file-xlog-py
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
-import re
import logging
+import re
import time
+import salt.utils.event as event
+
+# Import 3rd-party libs
+from salt.ext import six
+from salt.ext.six.moves import range # pylint: disable=redefined-builtin
+
try:
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor, threads
- from pyparsing import Word, alphas, Suppress, Combine, nums, string, \
- Optional, Regex, LineEnd, StringEnd, delimitedList
+ from pyparsing import (
+ Word,
+ alphas,
+ Suppress,
+ Combine,
+ nums,
+ string,
+ Optional,
+ Regex,
+ LineEnd,
+ StringEnd,
+ delimitedList,
+ )
+
HAS_TWISTED_AND_PYPARSING = True
except ImportError:
HAS_TWISTED_AND_PYPARSING = False
@@ -104,41 +122,33 @@ except ImportError:
class DatagramProtocol(object):
pass
-import salt.utils.event as event
-
-# Import 3rd-party libs
-from salt.ext import six
-from salt.ext.six.moves import range # pylint: disable=redefined-builtin
# logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
-__virtualname__ = 'junos_syslog'
+__virtualname__ = "junos_syslog"
def __virtual__():
- '''
+ """
Load only if twisted and pyparsing libs are present.
- '''
+ """
if not HAS_TWISTED_AND_PYPARSING:
- return (False, 'junos_syslog could not be loaded.'
- ' Make sure you have twisted and pyparsing python libraries.')
+ return (
+ False,
+ "junos_syslog could not be loaded."
+ " Make sure you have twisted and pyparsing python libraries.",
+ )
return True
class _Parser(object):
-
def __init__(self):
ints = Word(nums)
EOL = LineEnd().suppress()
# ip address of device
- ipAddress = Optional(
- delimitedList(
- ints,
- ".",
- combine=True) + Suppress(
- ":"))
+ ipAddress = Optional(delimitedList(ints, ".", combine=True) + Suppress(":"))
# priority
priority = Suppress("<") + ints + Suppress(">")
@@ -154,18 +164,24 @@ class _Parser(object):
hostname = Word(alphas + nums + "_" + "-" + ".")
# daemon
- daemon = Word(alphas + nums + "/" + "-" + "_" + ".") + Optional(
- Suppress("[") + ints + Suppress("]")) + Suppress(":")
+ daemon = (
+ Word(alphas + nums + "/" + "-" + "_" + ".")
+ + Optional(Suppress("[") + ints + Suppress("]"))
+ + Suppress(":")
+ )
# message
message = Regex(".*")
# pattern build
- self.__pattern = ipAddress + priority + timestamp + \
- hostname + daemon + message + StringEnd() | EOL
+ self.__pattern = (
+ ipAddress + priority + timestamp + hostname + daemon + message + StringEnd()
+ | EOL
+ )
- self.__pattern_without_daemon = ipAddress + priority + \
- timestamp + hostname + message + StringEnd() | EOL
+ self.__pattern_without_daemon = (
+ ipAddress + priority + timestamp + hostname + message + StringEnd() | EOL
+ )
def parse(self, line):
try:
@@ -182,10 +198,10 @@ class _Parser(object):
payload["facility"] = payload["priority"] >> 3
payload["timestamp"] = time.strftime("%Y-%m-%d %H:%M:%S")
payload["hostname"] = parsed[4]
- payload["daemon"] = 'unknown'
+ payload["daemon"] = "unknown"
payload["message"] = parsed[5]
- payload["event"] = 'SYSTEM'
- payload['raw'] = line
+ payload["event"] = "SYSTEM"
+ payload["raw"] = line
return payload
elif len(parsed) == 7:
payload = {}
@@ -196,8 +212,8 @@ class _Parser(object):
payload["hostname"] = parsed[4]
payload["daemon"] = parsed[5]
payload["message"] = parsed[6]
- payload["event"] = 'SYSTEM'
- obj = re.match(r'(\w+): (.*)', payload["message"])
+ payload["event"] = "SYSTEM"
+ obj = re.match(r"(\w+): (.*)", payload["message"])
if obj:
payload["message"] = obj.group(2)
payload["raw"] = line
@@ -212,8 +228,8 @@ class _Parser(object):
payload["daemon"] = parsed[5]
payload["pid"] = parsed[6]
payload["message"] = parsed[7]
- payload["event"] = 'SYSTEM'
- obj = re.match(r'(\w+): (.*)', payload["message"])
+ payload["event"] = "SYSTEM"
+ obj = re.match(r"(\w+): (.*)", payload["message"])
if obj:
payload["event"] = obj.group(1)
payload["message"] = obj.group(2)
@@ -230,8 +246,8 @@ class _Parser(object):
payload["daemon"] = parsed[6]
payload["pid"] = parsed[7]
payload["message"] = parsed[8]
- payload["event"] = 'SYSTEM'
- obj = re.match(r'(\w+): (.*)', payload["message"])
+ payload["event"] = "SYSTEM"
+ obj = re.match(r"(\w+): (.*)", payload["message"])
if obj:
payload["event"] = obj.group(1)
payload["message"] = obj.group(2)
@@ -240,7 +256,6 @@ class _Parser(object):
class _SyslogServerFactory(DatagramProtocol):
-
def __init__(self, options):
self.options = options
self.obj = _Parser()
@@ -254,40 +269,43 @@ class _SyslogServerFactory(DatagramProtocol):
"daemon",
"pid",
"message",
- "event"]
- if 'topic' in self.options:
+ "event",
+ ]
+ if "topic" in self.options:
# self.title = 'jnpr/syslog'
# To remove the stray '/', if not removed splitting the topic
# won't work properly. Eg: '/jnpr/syslog/event' won't be split
# properly if the starting '/' is not stripped
- self.options['topic'] = options['topic'].strip('/')
- topics = options['topic'].split("/")
+ self.options["topic"] = options["topic"].strip("/")
+ topics = options["topic"].split("/")
self.title = topics
- if len(topics) < 2 or topics[0] != 'jnpr' or topics[1] != 'syslog':
+ if len(topics) < 2 or topics[0] != "jnpr" or topics[1] != "syslog":
log.debug(
'The topic specified in configuration should start with \
- "jnpr/syslog". Using the default topic.')
- self.title = ['jnpr', 'syslog', 'hostname', 'event']
+ "jnpr/syslog". Using the default topic.'
+ )
+ self.title = ["jnpr", "syslog", "hostname", "event"]
else:
for i in range(2, len(topics)):
if topics[i] not in data:
log.debug(
- 'Please check the topic specified. \
+ "Please check the topic specified. \
Only the following keywords can be specified \
in the topic: hostip, priority, severity, \
facility, timestamp, hostname, daemon, pid, \
- message, event. Using the default topic.')
- self.title = ['jnpr', 'syslog', 'hostname', 'event']
+ message, event. Using the default topic."
+ )
+ self.title = ["jnpr", "syslog", "hostname", "event"]
break
# We are done processing the topic. All other arguments are the
# filters given by the user. While processing the filters we don't
# explicitly ignore the 'topic', but delete it here itself.
- del self.options['topic']
+ del self.options["topic"]
else:
- self.title = ['jnpr', 'syslog', 'hostname', 'event']
+ self.title = ["jnpr", "syslog", "hostname", "event"]
def parseData(self, data, host, port, options):
- '''
+ """
This function will parse the raw syslog data, dynamically create the
topic according to the topic specified by the user (if specified) and
decide whether to send the syslog data as an event on the master bus,
@@ -300,12 +318,11 @@ class _SyslogServerFactory(DatagramProtocol):
:return: The result dictionary which contains the data and the topic,
if the event is to be sent on the bus.
- '''
+ """
data = self.obj.parse(data)
- data['hostip'] = host
+ data["hostip"] = host
log.debug(
- 'Junos Syslog - received %s from %s, sent from port %s',
- data, host, port
+ "Junos Syslog - received %s from %s, sent from port %s", data, host, port
)
send_this_event = True
@@ -323,73 +340,68 @@ class _SyslogServerFactory(DatagramProtocol):
send_this_event = False
break
else:
- raise Exception(
- 'Arguments in config not specified properly')
+ raise Exception("Arguments in config not specified properly")
else:
raise Exception(
- 'Please check the arguments given to junos engine in the\
- configuration file')
+ "Please check the arguments given to junos engine in the\
+ configuration file"
+ )
if send_this_event:
- if 'event' in data:
- topic = 'jnpr/syslog'
+ if "event" in data:
+ topic = "jnpr/syslog"
for i in range(2, len(self.title)):
- topic += '/' + six.text_type(data[self.title[i]])
+ topic += "/" + six.text_type(data[self.title[i]])
log.debug(
- 'Junos Syslog - sending this event on the bus: %s from %s',
- data, host
+ "Junos Syslog - sending this event on the bus: %s from %s",
+ data,
+ host,
)
- result = {'send': True, 'data': data, 'topic': topic}
+ result = {"send": True, "data": data, "topic": topic}
return result
else:
- raise Exception(
- 'The incoming event data could not be parsed properly.')
+ raise Exception("The incoming event data could not be parsed properly.")
else:
- result = {'send': False}
+ result = {"send": False}
return result
def send_event_to_salt(self, result):
- '''
+ """
This function identifies whether the engine is running on the master
or the minion and sends the data to the master event bus accordingly.
:param result: It's a dictionary which has the final data and topic.
- '''
- if result['send']:
- data = result['data']
- topic = result['topic']
+ """
+ if result["send"]:
+ data = result["data"]
+ topic = result["topic"]
# If the engine is run on master, get the event bus and send the
# parsed event.
- if __opts__['__role'] == 'master':
- event.get_master_event(__opts__,
- __opts__['sock_dir']
- ).fire_event(data, topic)
+ if __opts__["__role"] == "master":
+ event.get_master_event(__opts__, __opts__["sock_dir"]).fire_event(
+ data, topic
+ )
# If the engine is run on minion, use the fire_master execution
# module to send event on the master bus.
else:
- __salt__['event.fire_master'](data=data, tag=topic)
+ __salt__["event.fire_master"](data=data, tag=topic)
def handle_error(self, err_msg):
- '''
+ """
Log the error messages.
- '''
+ """
log.error(err_msg.getErrorMessage)
def datagramReceived(self, data, connection_details):
(host, port) = connection_details
- d = threads.deferToThread(
- self.parseData,
- data,
- host,
- port,
- self.options)
+ d = threads.deferToThread(self.parseData, data, host, port, self.options)
d.addCallbacks(self.send_event_to_salt, self.handle_error)
def start(port=516, **kwargs):
- log.info('Starting junos syslog engine (port %s)', port)
+ log.info("Starting junos syslog engine (port %s)", port)
reactor.listenUDP(port, _SyslogServerFactory(kwargs))
reactor.run()
diff --git a/salt/engines/libvirt_events.py b/salt/engines/libvirt_events.py
index cdb5d1dfe84..c2594e821ae 100644
--- a/salt/engines/libvirt_events.py
+++ b/salt/engines/libvirt_events.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
-'''
+"""
An engine that listens for libvirt events and resends them to the salt event bus.
The minimal configuration is the following and will listen to all events on the
@@ -63,9 +63,10 @@ A polkit rule like the following one will allow `salt` user to connect to libvir
:depends: libvirt 1.0.0+ python binding
.. versionadded:: 2019.2.0
-'''
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
-from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import salt libs
@@ -73,6 +74,7 @@ import salt.utils.event
# pylint: disable=no-name-in-module,import-error
from salt.ext.six.moves.urllib.parse import urlparse
+
# pylint: enable=no-name-in-module,import-error
log = logging.getLogger(__name__)
@@ -85,112 +87,119 @@ except ImportError:
def __virtual__():
- '''
+ """
Only load if libvirt python binding is present
- '''
+ """
if libvirt is None:
- msg = 'libvirt module not found'
+ msg = "libvirt module not found"
elif libvirt.getVersion() < 1000000:
- msg = 'libvirt >= 1.0.0 required'
+ msg = "libvirt >= 1.0.0 required"
else:
- msg = ''
+ msg = ""
return not bool(msg), msg
REGISTER_FUNCTIONS = {
- 'domain': 'domainEventRegisterAny',
- 'network': 'networkEventRegisterAny',
- 'pool': 'storagePoolEventRegisterAny',
- 'nodedev': 'nodeDeviceEventRegisterAny',
- 'secret': 'secretEventRegisterAny'
+ "domain": "domainEventRegisterAny",
+ "network": "networkEventRegisterAny",
+ "pool": "storagePoolEventRegisterAny",
+ "nodedev": "nodeDeviceEventRegisterAny",
+ "secret": "secretEventRegisterAny",
}
# Handle either BLOCK_JOB or BLOCK_JOB_2, but prefer the latter
-if hasattr(libvirt, 'VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2'):
- BLOCK_JOB_ID = 'VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2'
+if hasattr(libvirt, "VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2"):
+ BLOCK_JOB_ID = "VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2"
else:
- BLOCK_JOB_ID = 'VIR_DOMAIN_EVENT_ID_BLOCK_JOB'
+ BLOCK_JOB_ID = "VIR_DOMAIN_EVENT_ID_BLOCK_JOB"
CALLBACK_DEFS = {
- 'domain': (('lifecycle', None),
- ('reboot', None),
- ('rtc_change', None),
- ('watchdog', None),
- ('graphics', None),
- ('io_error', 'VIR_DOMAIN_EVENT_ID_IO_ERROR_REASON'),
- ('control_error', None),
- ('disk_change', None),
- ('tray_change', None),
- ('pmwakeup', None),
- ('pmsuspend', None),
- ('balloon_change', None),
- ('pmsuspend_disk', None),
- ('device_removed', None),
- ('block_job', BLOCK_JOB_ID),
- ('tunable', None),
- ('agent_lifecycle', None),
- ('device_added', None),
- ('migration_iteration', None),
- ('job_completed', None),
- ('device_removal_failed', None),
- ('metadata_change', None),
- ('block_threshold', None)),
- 'network': (('lifecycle', None),),
- 'pool': (('lifecycle', None),
- ('refresh', None)),
- 'nodedev': (('lifecycle', None),
- ('update', None)),
- 'secret': (('lifecycle', None),
- ('value_changed', None))
+ "domain": (
+ ("lifecycle", None),
+ ("reboot", None),
+ ("rtc_change", None),
+ ("watchdog", None),
+ ("graphics", None),
+ ("io_error", "VIR_DOMAIN_EVENT_ID_IO_ERROR_REASON"),
+ ("control_error", None),
+ ("disk_change", None),
+ ("tray_change", None),
+ ("pmwakeup", None),
+ ("pmsuspend", None),
+ ("balloon_change", None),
+ ("pmsuspend_disk", None),
+ ("device_removed", None),
+ ("block_job", BLOCK_JOB_ID),
+ ("tunable", None),
+ ("agent_lifecycle", None),
+ ("device_added", None),
+ ("migration_iteration", None),
+ ("job_completed", None),
+ ("device_removal_failed", None),
+ ("metadata_change", None),
+ ("block_threshold", None),
+ ),
+ "network": (("lifecycle", None),),
+ "pool": (("lifecycle", None), ("refresh", None)),
+ "nodedev": (("lifecycle", None), ("update", None)),
+ "secret": (("lifecycle", None), ("value_changed", None)),
}
def _compute_subprefix(attr):
- '''
+ """
Get the part before the first '_' or the end of attr including
the potential '_'
- '''
- return ''.join((attr.split('_')[0], '_' if len(attr.split('_')) > 1 else ''))
+ """
+ return "".join((attr.split("_")[0], "_" if len(attr.split("_")) > 1 else ""))
def _get_libvirt_enum_string(prefix, value):
- '''
+ """
Convert the libvirt enum integer value into a human readable string.
:param prefix: start of the libvirt attribute to look for.
:param value: integer to convert to string
- '''
- attributes = [attr[len(prefix):] for attr in libvirt.__dict__ if attr.startswith(prefix)]
+ """
+ attributes = [
+ attr[len(prefix) :] for attr in libvirt.__dict__ if attr.startswith(prefix)
+ ]
# Filter out the values starting with a common base as they match another enum
prefixes = [_compute_subprefix(p) for p in attributes]
counts = {p: prefixes.count(p) for p in prefixes}
- sub_prefixes = [p for p, count in counts.items() if count > 1 or (p.endswith('_') and p[:-1] in prefixes)]
- filtered = [attr for attr in attributes if _compute_subprefix(attr) not in sub_prefixes]
+ sub_prefixes = [
+ p
+ for p, count in counts.items()
+ if count > 1 or (p.endswith("_") and p[:-1] in prefixes)
+ ]
+ filtered = [
+ attr for attr in attributes if _compute_subprefix(attr) not in sub_prefixes
+ ]
for candidate in filtered:
- if value == getattr(libvirt, ''.join((prefix, candidate))):
- name = candidate.lower().replace('_', ' ')
+ if value == getattr(libvirt, "".join((prefix, candidate))):
+ name = candidate.lower().replace("_", " ")
return name
- return 'unknown'
+ return "unknown"
def _get_domain_event_detail(event, detail):
- '''
+ """
Convert event and detail numeric values into a tuple of human readable strings
- '''
- event_name = _get_libvirt_enum_string('VIR_DOMAIN_EVENT_', event)
- if event_name == 'unknown':
- return event_name, 'unknown'
+ """
+ event_name = _get_libvirt_enum_string("VIR_DOMAIN_EVENT_", event)
+ if event_name == "unknown":
+ return event_name, "unknown"
- prefix = 'VIR_DOMAIN_EVENT_{0}_'.format(event_name.upper())
+ prefix = "VIR_DOMAIN_EVENT_{0}_".format(event_name.upper())
detail_name = _get_libvirt_enum_string(prefix, detail)
return event_name, detail_name
def _salt_send_event(opaque, conn, data):
- '''
+ """
Convenience function adding common data to the event and sending it
on the salt event bus.
@@ -198,10 +207,10 @@ def _salt_send_event(opaque, conn, data):
This is a dict with 'prefix', 'object' and 'event' keys.
:param conn: libvirt connection
:param data: additional event data dict to send
- '''
- tag_prefix = opaque['prefix']
- object_type = opaque['object']
- event_type = opaque['event']
+ """
+ tag_prefix = opaque["prefix"]
+ object_type = opaque["object"]
+ event_type = opaque["event"]
# Prepare the connection URI to fit in the tag
# qemu+ssh://user@host:1234/system -> qemu+ssh/user@host:1234/system
@@ -209,30 +218,28 @@ def _salt_send_event(opaque, conn, data):
uri_tag = [uri.scheme]
if uri.netloc:
uri_tag.append(uri.netloc)
- path = uri.path.strip('/')
+ path = uri.path.strip("/")
if path:
uri_tag.append(path)
uri_str = "/".join(uri_tag)
# Append some common data
- all_data = {
- 'uri': conn.getURI()
- }
+ all_data = {"uri": conn.getURI()}
all_data.update(data)
- tag = '/'.join((tag_prefix, uri_str, object_type, event_type))
+ tag = "/".join((tag_prefix, uri_str, object_type, event_type))
# Actually send the event in salt
- if __opts__.get('__role') == 'master':
- salt.utils.event.get_master_event(
- __opts__,
- __opts__['sock_dir']).fire_event(all_data, tag)
+ if __opts__.get("__role") == "master":
+ salt.utils.event.get_master_event(__opts__, __opts__["sock_dir"]).fire_event(
+ all_data, tag
+ )
else:
- __salt__['event.send'](tag, all_data)
+ __salt__["event.send"](tag, all_data)
def _salt_send_domain_event(opaque, conn, domain, event, event_data):
- '''
+ """
Helper function send a salt event for a libvirt domain.
:param opaque: the opaque data that is passed to the callback.
@@ -241,375 +248,428 @@ def _salt_send_domain_event(opaque, conn, domain, event, event_data):
:param domain: name of the domain related to the event
:param event: name of the event
:param event_data: additional event data dict to send
- '''
+ """
data = {
- 'domain': {
- 'name': domain.name(),
- 'id': domain.ID(),
- 'uuid': domain.UUIDString()
+ "domain": {
+ "name": domain.name(),
+ "id": domain.ID(),
+ "uuid": domain.UUIDString(),
},
- 'event': event
+ "event": event,
}
data.update(event_data)
_salt_send_event(opaque, conn, data)
def _domain_event_lifecycle_cb(conn, domain, event, detail, opaque):
- '''
+ """
Domain lifecycle events handler
- '''
+ """
event_str, detail_str = _get_domain_event_detail(event, detail)
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'event': event_str,
- 'detail': detail_str
- })
+ _salt_send_domain_event(
+ opaque,
+ conn,
+ domain,
+ opaque["event"],
+ {"event": event_str, "detail": detail_str},
+ )
def _domain_event_reboot_cb(conn, domain, opaque):
- '''
+ """
Domain reboot events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {})
+ """
+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {})
def _domain_event_rtc_change_cb(conn, domain, utcoffset, opaque):
- '''
+ """
Domain RTC change events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'utcoffset': utcoffset
- })
+ """
+ _salt_send_domain_event(
+ opaque, conn, domain, opaque["event"], {"utcoffset": utcoffset}
+ )
def _domain_event_watchdog_cb(conn, domain, action, opaque):
- '''
+ """
Domain watchdog events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'action': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_WATCHDOG_', action)
- })
+ """
+ _salt_send_domain_event(
+ opaque,
+ conn,
+ domain,
+ opaque["event"],
+ {"action": _get_libvirt_enum_string("VIR_DOMAIN_EVENT_WATCHDOG_", action)},
+ )
def _domain_event_io_error_cb(conn, domain, srcpath, devalias, action, reason, opaque):
- '''
+ """
Domain I/O Error events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'srcPath': srcpath,
- 'dev': devalias,
- 'action': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_IO_ERROR_', action),
- 'reason': reason
- })
+ """
+ _salt_send_domain_event(
+ opaque,
+ conn,
+ domain,
+ opaque["event"],
+ {
+ "srcPath": srcpath,
+ "dev": devalias,
+ "action": _get_libvirt_enum_string("VIR_DOMAIN_EVENT_IO_ERROR_", action),
+ "reason": reason,
+ },
+ )
-def _domain_event_graphics_cb(conn, domain, phase, local, remote, auth, subject, opaque):
- '''
+def _domain_event_graphics_cb(
+ conn, domain, phase, local, remote, auth, subject, opaque
+):
+ """
Domain graphics events handler
- '''
- prefix = 'VIR_DOMAIN_EVENT_GRAPHICS_'
+ """
+ prefix = "VIR_DOMAIN_EVENT_GRAPHICS_"
def get_address(addr):
- '''
+ """
transform address structure into event data piece
- '''
- return {'family': _get_libvirt_enum_string('{0}_ADDRESS_'.format(prefix), addr['family']),
- 'node': addr['node'],
- 'service': addr['service']}
+ """
+ return {
+ "family": _get_libvirt_enum_string(
+ "{0}_ADDRESS_".format(prefix), addr["family"]
+ ),
+ "node": addr["node"],
+ "service": addr["service"],
+ }
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'phase': _get_libvirt_enum_string(prefix, phase),
- 'local': get_address(local),
- 'remote': get_address(remote),
- 'authScheme': auth,
- 'subject': [{'type': item[0], 'name': item[1]} for item in subject]
- })
+ _salt_send_domain_event(
+ opaque,
+ conn,
+ domain,
+ opaque["event"],
+ {
+ "phase": _get_libvirt_enum_string(prefix, phase),
+ "local": get_address(local),
+ "remote": get_address(remote),
+ "authScheme": auth,
+ "subject": [{"type": item[0], "name": item[1]} for item in subject],
+ },
+ )
def _domain_event_control_error_cb(conn, domain, opaque):
- '''
+ """
Domain control error events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {})
+ """
+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {})
def _domain_event_disk_change_cb(conn, domain, old_src, new_src, dev, reason, opaque):
- '''
+ """
Domain disk change events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'oldSrcPath': old_src,
- 'newSrcPath': new_src,
- 'dev': dev,
- 'reason': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_DISK_', reason)
- })
+ """
+ _salt_send_domain_event(
+ opaque,
+ conn,
+ domain,
+ opaque["event"],
+ {
+ "oldSrcPath": old_src,
+ "newSrcPath": new_src,
+ "dev": dev,
+ "reason": _get_libvirt_enum_string("VIR_DOMAIN_EVENT_DISK_", reason),
+ },
+ )
def _domain_event_tray_change_cb(conn, domain, dev, reason, opaque):
- '''
+ """
Domain tray change events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'dev': dev,
- 'reason': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_TRAY_CHANGE_', reason)
- })
+ """
+ _salt_send_domain_event(
+ opaque,
+ conn,
+ domain,
+ opaque["event"],
+ {
+ "dev": dev,
+ "reason": _get_libvirt_enum_string("VIR_DOMAIN_EVENT_TRAY_CHANGE_", reason),
+ },
+ )
def _domain_event_pmwakeup_cb(conn, domain, reason, opaque):
- '''
+ """
Domain wakeup events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'reason': 'unknown' # currently unused
- })
+ """
+ _salt_send_domain_event(
+ opaque, conn, domain, opaque["event"], {"reason": "unknown"} # currently unused
+ )
def _domain_event_pmsuspend_cb(conn, domain, reason, opaque):
- '''
+ """
Domain suspend events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'reason': 'unknown' # currently unused
- })
+ """
+ _salt_send_domain_event(
+ opaque, conn, domain, opaque["event"], {"reason": "unknown"} # currently unused
+ )
def _domain_event_balloon_change_cb(conn, domain, actual, opaque):
- '''
+ """
Domain balloon change events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'actual': actual
- })
+ """
+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"actual": actual})
def _domain_event_pmsuspend_disk_cb(conn, domain, reason, opaque):
- '''
+ """
Domain disk suspend events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'reason': 'unknown' # currently unused
- })
+ """
+ _salt_send_domain_event(
+ opaque, conn, domain, opaque["event"], {"reason": "unknown"} # currently unused
+ )
def _domain_event_block_job_cb(conn, domain, disk, job_type, status, opaque):
- '''
+ """
Domain block job events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'disk': disk,
- 'type': _get_libvirt_enum_string('VIR_DOMAIN_BLOCK_JOB_TYPE_', job_type),
- 'status': _get_libvirt_enum_string('VIR_DOMAIN_BLOCK_JOB_', status)
- })
+ """
+ _salt_send_domain_event(
+ opaque,
+ conn,
+ domain,
+ opaque["event"],
+ {
+ "disk": disk,
+ "type": _get_libvirt_enum_string("VIR_DOMAIN_BLOCK_JOB_TYPE_", job_type),
+ "status": _get_libvirt_enum_string("VIR_DOMAIN_BLOCK_JOB_", status),
+ },
+ )
def _domain_event_device_removed_cb(conn, domain, dev, opaque):
- '''
+ """
Domain device removal events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'dev': dev
- })
+ """
+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"dev": dev})
def _domain_event_tunable_cb(conn, domain, params, opaque):
- '''
+ """
Domain tunable events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'params': params
- })
+ """
+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"params": params})
# pylint: disable=invalid-name
def _domain_event_agent_lifecycle_cb(conn, domain, state, reason, opaque):
- '''
+ """
Domain agent lifecycle events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'state': _get_libvirt_enum_string('VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_STATE_', state),
- 'reason': _get_libvirt_enum_string('VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_REASON_', reason)
- })
+ """
+ _salt_send_domain_event(
+ opaque,
+ conn,
+ domain,
+ opaque["event"],
+ {
+ "state": _get_libvirt_enum_string(
+ "VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_STATE_", state
+ ),
+ "reason": _get_libvirt_enum_string(
+ "VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_REASON_", reason
+ ),
+ },
+ )
def _domain_event_device_added_cb(conn, domain, dev, opaque):
- '''
+ """
Domain device addition events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'dev': dev
- })
+ """
+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"dev": dev})
# pylint: disable=invalid-name
def _domain_event_migration_iteration_cb(conn, domain, iteration, opaque):
- '''
+ """
Domain migration iteration events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'iteration': iteration
- })
+ """
+ _salt_send_domain_event(
+ opaque, conn, domain, opaque["event"], {"iteration": iteration}
+ )
def _domain_event_job_completed_cb(conn, domain, params, opaque):
- '''
+ """
Domain job completion events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'params': params
- })
+ """
+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"params": params})
def _domain_event_device_removal_failed_cb(conn, domain, dev, opaque):
- '''
+ """
Domain device removal failure events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'dev': dev
- })
+ """
+ _salt_send_domain_event(opaque, conn, domain, opaque["event"], {"dev": dev})
def _domain_event_metadata_change_cb(conn, domain, mtype, nsuri, opaque):
- '''
+ """
Domain metadata change events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'type': _get_libvirt_enum_string('VIR_DOMAIN_METADATA_', mtype),
- 'nsuri': nsuri
- })
+ """
+ _salt_send_domain_event(
+ opaque,
+ conn,
+ domain,
+ opaque["event"],
+ {
+ "type": _get_libvirt_enum_string("VIR_DOMAIN_METADATA_", mtype),
+ "nsuri": nsuri,
+ },
+ )
-def _domain_event_block_threshold_cb(conn, domain, dev, path, threshold, excess, opaque):
- '''
+def _domain_event_block_threshold_cb(
+ conn, domain, dev, path, threshold, excess, opaque
+):
+ """
Domain block threshold events handler
- '''
- _salt_send_domain_event(opaque, conn, domain, opaque['event'], {
- 'dev': dev,
- 'path': path,
- 'threshold': threshold,
- 'excess': excess
- })
+ """
+ _salt_send_domain_event(
+ opaque,
+ conn,
+ domain,
+ opaque["event"],
+ {"dev": dev, "path": path, "threshold": threshold, "excess": excess},
+ )
def _network_event_lifecycle_cb(conn, net, event, detail, opaque):
- '''
+ """
Network lifecycle events handler
- '''
+ """
- _salt_send_event(opaque, conn, {
- 'network': {
- 'name': net.name(),
- 'uuid': net.UUIDString()
+ _salt_send_event(
+ opaque,
+ conn,
+ {
+ "network": {"name": net.name(), "uuid": net.UUIDString()},
+ "event": _get_libvirt_enum_string("VIR_NETWORK_EVENT_", event),
+ "detail": "unknown", # currently unused
},
- 'event': _get_libvirt_enum_string('VIR_NETWORK_EVENT_', event),
- 'detail': 'unknown' # currently unused
- })
+ )
def _pool_event_lifecycle_cb(conn, pool, event, detail, opaque):
- '''
+ """
Storage pool lifecycle events handler
- '''
- _salt_send_event(opaque, conn, {
- 'pool': {
- 'name': pool.name(),
- 'uuid': pool.UUIDString()
+ """
+ _salt_send_event(
+ opaque,
+ conn,
+ {
+ "pool": {"name": pool.name(), "uuid": pool.UUIDString()},
+ "event": _get_libvirt_enum_string("VIR_STORAGE_POOL_EVENT_", event),
+ "detail": "unknown", # currently unused
},
- 'event': _get_libvirt_enum_string('VIR_STORAGE_POOL_EVENT_', event),
- 'detail': 'unknown' # currently unused
- })
+ )
def _pool_event_refresh_cb(conn, pool, opaque):
- '''
+ """
Storage pool refresh events handler
- '''
- _salt_send_event(opaque, conn, {
- 'pool': {
- 'name': pool.name(),
- 'uuid': pool.UUIDString()
+ """
+ _salt_send_event(
+ opaque,
+ conn,
+ {
+ "pool": {"name": pool.name(), "uuid": pool.UUIDString()},
+ "event": opaque["event"],
},
- 'event': opaque['event']
- })
+ )
def _nodedev_event_lifecycle_cb(conn, dev, event, detail, opaque):
- '''
+ """
Node device lifecycle events handler
- '''
- _salt_send_event(opaque, conn, {
- 'nodedev': {
- 'name': dev.name()
+ """
+ _salt_send_event(
+ opaque,
+ conn,
+ {
+ "nodedev": {"name": dev.name()},
+ "event": _get_libvirt_enum_string("VIR_NODE_DEVICE_EVENT_", event),
+ "detail": "unknown", # currently unused
},
- 'event': _get_libvirt_enum_string('VIR_NODE_DEVICE_EVENT_', event),
- 'detail': 'unknown' # currently unused
- })
+ )
def _nodedev_event_update_cb(conn, dev, opaque):
- '''
+ """
Node device update events handler
- '''
- _salt_send_event(opaque, conn, {
- 'nodedev': {
- 'name': dev.name()
- },
- 'event': opaque['event']
- })
+ """
+ _salt_send_event(
+ opaque, conn, {"nodedev": {"name": dev.name()}, "event": opaque["event"]}
+ )
def _secret_event_lifecycle_cb(conn, secret, event, detail, opaque):
- '''
+ """
Secret lifecycle events handler
- '''
- _salt_send_event(opaque, conn, {
- 'secret': {
- 'uuid': secret.UUIDString()
+ """
+ _salt_send_event(
+ opaque,
+ conn,
+ {
+ "secret": {"uuid": secret.UUIDString()},
+ "event": _get_libvirt_enum_string("VIR_SECRET_EVENT_", event),
+ "detail": "unknown", # currently unused
},
- 'event': _get_libvirt_enum_string('VIR_SECRET_EVENT_', event),
- 'detail': 'unknown' # currently unused
- })
+ )
def _secret_event_value_changed_cb(conn, secret, opaque):
- '''
+ """
Secret value change events handler
- '''
- _salt_send_event(opaque, conn, {
- 'secret': {
- 'uuid': secret.UUIDString()
- },
- 'event': opaque['event']
- })
+ """
+ _salt_send_event(
+ opaque,
+ conn,
+ {"secret": {"uuid": secret.UUIDString()}, "event": opaque["event"]},
+ )
def _cleanup(cnx):
- '''
+ """
Close the libvirt connection
:param cnx: libvirt connection
- '''
- log.debug('Closing libvirt connection: %s', cnx.getURI())
+ """
+ log.debug("Closing libvirt connection: %s", cnx.getURI())
cnx.close()
def _callbacks_cleanup(cnx, callback_ids):
- '''
+ """
Unregister all the registered callbacks
:param cnx: libvirt connection
:param callback_ids: dictionary mapping a libvirt object type to an ID list
of callbacks to deregister
- '''
+ """
for obj, ids in callback_ids.items():
register_name = REGISTER_FUNCTIONS[obj]
- deregister_name = register_name.replace('Reg', 'Dereg')
+ deregister_name = register_name.replace("Reg", "Dereg")
deregister = getattr(cnx, deregister_name)
for callback_id in ids:
deregister(callback_id)
def _register_callback(cnx, tag_prefix, obj, event, real_id):
- '''
+ """
Helper function registering a callback
:param cnx: libvirt connection
@@ -620,10 +680,10 @@ def _register_callback(cnx, tag_prefix, obj, event, real_id):
:param real_id: the libvirt name of an alternative event id to use or None
:rtype integer value needed to deregister the callback
- '''
+ """
libvirt_name = real_id
if real_id is None:
- libvirt_name = 'VIR_{0}_EVENT_ID_{1}'.format(obj, event).upper()
+ libvirt_name = "VIR_{0}_EVENT_ID_{1}".format(obj, event).upper()
if not hasattr(libvirt, libvirt_name):
log.warning('Skipping "%s/%s" events: libvirt too old', obj, event)
@@ -633,34 +693,34 @@ def _register_callback(cnx, tag_prefix, obj, event, real_id):
callback_name = "_{0}_event_{1}_cb".format(obj, event)
callback = globals().get(callback_name, None)
if callback is None:
- log.error('Missing function %s in engine', callback_name)
+ log.error("Missing function %s in engine", callback_name)
return None
register = getattr(cnx, REGISTER_FUNCTIONS[obj])
- return register(None, libvirt_id, callback,
- {'prefix': tag_prefix,
- 'object': obj,
- 'event': event})
+ return register(
+ None,
+ libvirt_id,
+ callback,
+ {"prefix": tag_prefix, "object": obj, "event": event},
+ )
def _append_callback_id(ids, obj, callback_id):
- '''
+ """
Helper function adding a callback ID to the IDs dict.
The callback ids dict maps an object to event callback ids.
:param ids: dict of callback IDs to update
:param obj: one of the keys of REGISTER_FUNCTIONS
:param callback_id: the result of _register_callback
- '''
+ """
if obj not in ids:
ids[obj] = []
ids[obj].append(callback_id)
-def start(uri=None,
- tag_prefix='salt/engines/libvirt_events',
- filters=None):
- '''
+def start(uri=None, tag_prefix="salt/engines/libvirt_events", filters=None):
+ """
Listen to libvirt events and forward them to salt.
:param uri: libvirt URI to listen on.
@@ -668,14 +728,14 @@ def start(uri=None,
:param tag_prefix: the begining of the salt event tag to use.
Defaults to 'salt/engines/libvirt_events'
:param filters: the list of event of listen on. Defaults to 'all'
- '''
+ """
if filters is None:
- filters = ['all']
+ filters = ["all"]
try:
libvirt.virEventRegisterDefaultImpl()
cnx = libvirt.openReadOnly(uri)
- log.debug('Opened libvirt uri: %s', cnx.getURI())
+ log.debug("Opened libvirt uri: %s", cnx.getURI())
callback_ids = {}
all_filters = "all" in filters
@@ -683,17 +743,20 @@ def start(uri=None,
for obj, event_defs in CALLBACK_DEFS.items():
for event, real_id in event_defs:
event_filter = "/".join((obj, event))
- if event_filter not in filters and obj not in filters and not all_filters:
+ if (
+ event_filter not in filters
+ and obj not in filters
+ and not all_filters
+ ):
continue
- registered_id = _register_callback(cnx, tag_prefix,
- obj, event, real_id)
+ registered_id = _register_callback(cnx, tag_prefix, obj, event, real_id)
if registered_id:
_append_callback_id(callback_ids, obj, registered_id)
exit_loop = False
while not exit_loop:
exit_loop = libvirt.virEventRunDefaultImpl() < 0
- log.debug('=== in the loop exit_loop %s ===', exit_loop)
+ log.debug("=== in the loop exit_loop %s ===", exit_loop)
except Exception as err: # pylint: disable=broad-except
log.exception(err)
diff --git a/salt/engines/logentries.py b/salt/engines/logentries.py
index 65e7ad0a567..83f622eac4d 100644
--- a/salt/engines/logentries.py
+++ b/salt/engines/logentries.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
An engine that sends events to the Logentries logging service.
:maintainer: Jimmy Tang (jimmy_tang@rapid7.com)
@@ -41,8 +41,17 @@ To test this engine
salt '*' test.ping cmd.run uptime
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
+
+import logging
+import random
+
+# Import Python libs
+import socket
+import time
+import uuid
+
# Import Salt libs
import salt.utils.event
import salt.utils.json
@@ -50,6 +59,7 @@ import salt.utils.json
# Import third party libs
try:
import certifi
+
HAS_CERTIFI = True
except ImportError:
HAS_CERTIFI = False
@@ -58,16 +68,11 @@ except ImportError:
# encrypted tcp connection
try:
import ssl
+
HAS_SSL = True
except ImportError: # for systems without TLS support.
HAS_SSL = False
-# Import Python libs
-import socket
-import random
-import time
-import uuid
-import logging
log = logging.getLogger(__name__)
@@ -77,11 +82,9 @@ def __virtual__():
class PlainTextSocketAppender(object):
- def __init__(self,
- verbose=True,
- LE_API='data.logentries.com',
- LE_PORT=80,
- LE_TLS_PORT=443):
+ def __init__(
+ self, verbose=True, LE_API="data.logentries.com", LE_PORT=80, LE_TLS_PORT=443
+ ):
self.LE_API = LE_API
self.LE_PORT = LE_PORT
@@ -89,10 +92,12 @@ class PlainTextSocketAppender(object):
self.MIN_DELAY = 0.1
self.MAX_DELAY = 10
# Error message displayed when an incorrect Token has been detected
- self.INVALID_TOKEN = ("\n\nIt appears the LOGENTRIES_TOKEN "
- "parameter you entered is incorrect!\n\n")
+ self.INVALID_TOKEN = (
+ "\n\nIt appears the LOGENTRIES_TOKEN "
+ "parameter you entered is incorrect!\n\n"
+ )
# Encoded unicode line separator
- self.LINE_SEP = salt.utils.stringutils.to_str('\u2028')
+ self.LINE_SEP = salt.utils.stringutils.to_str("\u2028")
self.verbose = verbose
self._conn = None
@@ -111,7 +116,7 @@ class PlainTextSocketAppender(object):
return
except Exception: # pylint: disable=broad-except
if self.verbose:
- log.warning('Unable to connect to Logentries')
+ log.warning("Unable to connect to Logentries")
root_delay *= 2
if root_delay > self.MAX_DELAY:
@@ -130,7 +135,9 @@ class PlainTextSocketAppender(object):
def put(self, data):
# Replace newlines with Unicode line separator for multi-line events
- multiline = data.replace('\n', self.LINE_SEP) + str('\n') # future lint: disable=blacklisted-function
+ multiline = data.replace("\n", self.LINE_SEP) + str(
+ "\n"
+ ) # future lint: disable=blacklisted-function
# Send data, reconnect if needed
while True:
try:
@@ -145,6 +152,7 @@ class PlainTextSocketAppender(object):
try:
import ssl
+
HAS_SSL = True
except ImportError: # for systems without TLS support.
SocketAppender = PlainTextSocketAppender
@@ -160,11 +168,11 @@ else:
certfile=None,
server_side=False,
cert_reqs=ssl.CERT_REQUIRED,
- ssl_version=getattr(
- ssl, 'PROTOCOL_TLSv1_2', ssl.PROTOCOL_TLSv1),
+ ssl_version=getattr(ssl, "PROTOCOL_TLSv1_2", ssl.PROTOCOL_TLSv1),
ca_certs=certifi.where(),
do_handshake_on_connect=True,
- suppress_ragged_eofs=True, )
+ suppress_ragged_eofs=True,
+ )
sock.connect((self.LE_API, self.LE_TLS_PORT))
self._conn = sock
@@ -172,34 +180,36 @@ else:
def event_bus_context(opts):
- if opts.get('id').endswith('_master'):
+ if opts.get("id").endswith("_master"):
event_bus = salt.utils.event.get_master_event(
- opts,
- opts['sock_dir'],
- listen=True)
+ opts, opts["sock_dir"], listen=True
+ )
else:
event_bus = salt.utils.event.get_event(
- 'minion',
- transport=opts['transport'],
+ "minion",
+ transport=opts["transport"],
opts=opts,
- sock_dir=opts['sock_dir'],
- listen=True)
+ sock_dir=opts["sock_dir"],
+ listen=True,
+ )
return event_bus
-def start(endpoint='data.logentries.com',
- port=10000,
- token=None,
- tag='salt/engines/logentries'):
- '''
+def start(
+ endpoint="data.logentries.com",
+ port=10000,
+ token=None,
+ tag="salt/engines/logentries",
+):
+ """
Listen to salt events and forward them to Logentries
- '''
+ """
with event_bus_context(__opts__) as event_bus:
- log.debug('Logentries engine started')
+ log.debug("Logentries engine started")
try:
val = uuid.UUID(token)
except ValueError:
- log.warning('Not a valid logentries token')
+ log.warning("Not a valid logentries token")
appender = SocketAppender(verbose=False, LE_API=endpoint, LE_PORT=port)
appender.reopen_connection()
@@ -208,11 +218,13 @@ def start(endpoint='data.logentries.com',
event = event_bus.get_event()
if event:
# future lint: disable=blacklisted-function
- msg = str(' ').join((
- salt.utils.stringutils.to_str(token),
- salt.utils.stringutils.to_str(tag),
- salt.utils.json.dumps(event)
- ))
+ msg = str(" ").join(
+ (
+ salt.utils.stringutils.to_str(token),
+ salt.utils.stringutils.to_str(tag),
+ salt.utils.json.dumps(event),
+ )
+ )
# future lint: enable=blacklisted-function
appender.put(msg)
diff --git a/salt/engines/logstash_engine.py b/salt/engines/logstash_engine.py
index 78a7c21d539..c699e0dfc4a 100644
--- a/salt/engines/logstash_engine.py
+++ b/salt/engines/logstash_engine.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
An engine that reads messages from the salt event bus and pushes
them onto a logstash endpoint.
@@ -18,10 +18,11 @@ them onto a logstash endpoint.
proto: tcp
:depends: logstash
-'''
+"""
# Import python libraries
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
# Import salt libs
@@ -35,47 +36,49 @@ except ImportError:
log = logging.getLogger(__name__)
-__virtualname__ = 'logstash'
+__virtualname__ = "logstash"
def __virtual__():
- return __virtualname__ \
- if logstash is not None \
- else (False, 'python-logstash not installed')
+ return (
+ __virtualname__
+ if logstash is not None
+ else (False, "python-logstash not installed")
+ )
def event_bus_context(opts):
- if opts.get('id').endswith('_master'):
+ if opts.get("id").endswith("_master"):
event_bus = salt.utils.event.get_master_event(
- opts,
- opts['sock_dir'],
- listen=True)
+ opts, opts["sock_dir"], listen=True
+ )
else:
event_bus = salt.utils.event.get_event(
- 'minion',
- transport=opts['transport'],
+ "minion",
+ transport=opts["transport"],
opts=opts,
- sock_dir=opts['sock_dir'],
- listen=True)
+ sock_dir=opts["sock_dir"],
+ listen=True,
+ )
return event_bus
-def start(host, port=5959, tag='salt/engine/logstash', proto='udp'):
- '''
+def start(host, port=5959, tag="salt/engine/logstash", proto="udp"):
+ """
Listen to salt events and forward them to logstash
- '''
+ """
- if proto == 'tcp':
+ if proto == "tcp":
logstashHandler = logstash.TCPLogstashHandler
- elif proto == 'udp':
+ elif proto == "udp":
logstashHandler = logstash.UDPLogstashHandler
- logstash_logger = logging.getLogger('python-logstash-logger')
+ logstash_logger = logging.getLogger("python-logstash-logger")
logstash_logger.setLevel(logging.INFO)
logstash_logger.addHandler(logstashHandler(host, port, version=1))
with event_bus_context(__opts__) as event_bus:
- log.debug('Logstash engine started')
+ log.debug("Logstash engine started")
while True:
event = event_bus.get_event()
if event:
diff --git a/salt/engines/napalm_syslog.py b/salt/engines/napalm_syslog.py
index f0cc02b7b98..550e84aa0a9 100644
--- a/salt/engines/napalm_syslog.py
+++ b/salt/engines/napalm_syslog.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
NAPALM syslog engine
====================
@@ -167,12 +167,17 @@ by the user in their environment and the complete OpenConfig object under
the variable name ``openconfig_structure``. Inside the Jinja template, the user
can process the object from ``openconfig_structure`` and define the bussiness
logic as required.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python stdlib
import logging
+# Import salt libs
+import salt.utils.event as event
+import salt.utils.network
+import salt.utils.stringutils
+
# Import third party libraries
from salt.utils.zeromq import zmq
@@ -180,22 +185,19 @@ try:
# pylint: disable=W0611
import napalm_logs
import napalm_logs.utils
+
# pylint: enable=W0611
HAS_NAPALM_LOGS = True
except ImportError:
HAS_NAPALM_LOGS = False
-# Import salt libs
-import salt.utils.event as event
-import salt.utils.network
-import salt.utils.stringutils
# ----------------------------------------------------------------------------------------------------------------------
# module properties
# ----------------------------------------------------------------------------------------------------------------------
log = logging.getLogger(__name__)
-__virtualname__ = 'napalm_syslog'
+__virtualname__ = "napalm_syslog"
# ----------------------------------------------------------------------------------------------------------------------
# helpers
@@ -203,12 +205,15 @@ __virtualname__ = 'napalm_syslog'
def __virtual__():
- '''
+ """
Load only if napalm-logs is installed.
- '''
+ """
if not HAS_NAPALM_LOGS or not zmq:
- return (False, 'napalm_syslog could not be loaded. \
- Please install napalm-logs library amd ZeroMQ.')
+ return (
+ False,
+ "napalm_syslog could not be loaded. \
+ Please install napalm-logs library amd ZeroMQ.",
+ )
return True
@@ -217,48 +222,41 @@ def _zmq(address, port, **kwargs):
socket = context.socket(zmq.SUB)
if salt.utils.network.is_ipv6(address):
socket.ipv6 = True
- socket.connect('tcp://{addr}:{port}'.format(
- addr=address,
- port=port)
- )
- socket.setsockopt(zmq.SUBSCRIBE, b'')
+ socket.connect("tcp://{addr}:{port}".format(addr=address, port=port))
+ socket.setsockopt(zmq.SUBSCRIBE, b"")
return socket.recv
-def _get_transport_recv(name='zmq',
- address='0.0.0.0',
- port=49017,
- **kwargs):
+def _get_transport_recv(name="zmq", address="0.0.0.0", port=49017, **kwargs):
if name not in TRANSPORT_FUN_MAP:
- log.error('Invalid transport: %s. Falling back to ZeroMQ.', name)
- name = 'zmq'
+ log.error("Invalid transport: %s. Falling back to ZeroMQ.", name)
+ name = "zmq"
return TRANSPORT_FUN_MAP[name](address, port, **kwargs)
-TRANSPORT_FUN_MAP = {
- 'zmq': _zmq,
- 'zeromq': _zmq
-}
+TRANSPORT_FUN_MAP = {"zmq": _zmq, "zeromq": _zmq}
# ----------------------------------------------------------------------------------------------------------------------
# main
# ----------------------------------------------------------------------------------------------------------------------
-def start(transport='zmq',
- address='0.0.0.0',
- port=49017,
- auth_address='0.0.0.0',
- auth_port=49018,
- disable_security=False,
- certificate=None,
- os_whitelist=None,
- os_blacklist=None,
- error_whitelist=None,
- error_blacklist=None,
- host_whitelist=None,
- host_blacklist=None):
- '''
+def start(
+ transport="zmq",
+ address="0.0.0.0",
+ port=49017,
+ auth_address="0.0.0.0",
+ auth_port=49018,
+ disable_security=False,
+ certificate=None,
+ os_whitelist=None,
+ os_blacklist=None,
+ error_whitelist=None,
+ error_blacklist=None,
+ host_whitelist=None,
+ host_blacklist=None,
+):
+ """
Listen to napalm-logs and publish events into the Salt event bus.
transport: ``zmq``
@@ -304,75 +302,73 @@ def start(transport='zmq',
host_blacklist: ``None``
List of hosts of IPs to be ignored.
- '''
+ """
if not disable_security:
if not certificate:
- log.critical('Please use a certificate, or disable the security.')
+ log.critical("Please use a certificate, or disable the security.")
return
- auth = napalm_logs.utils.ClientAuth(certificate,
- address=auth_address,
- port=auth_port)
+ auth = napalm_logs.utils.ClientAuth(
+ certificate, address=auth_address, port=auth_port
+ )
- transport_recv_fun = _get_transport_recv(name=transport,
- address=address,
- port=port)
+ transport_recv_fun = _get_transport_recv(name=transport, address=address, port=port)
if not transport_recv_fun:
- log.critical('Unable to start the engine', exc_info=True)
+ log.critical("Unable to start the engine", exc_info=True)
return
master = False
- if __opts__['__role'] == 'master':
+ if __opts__["__role"] == "master":
master = True
while True:
- log.debug('Waiting for napalm-logs to send anything...')
+ log.debug("Waiting for napalm-logs to send anything...")
raw_object = transport_recv_fun()
- log.debug('Received from napalm-logs:')
+ log.debug("Received from napalm-logs:")
log.debug(raw_object)
if not disable_security:
dict_object = auth.decrypt(raw_object)
else:
dict_object = napalm_logs.utils.unserialize(raw_object)
try:
- event_os = dict_object['os']
+ event_os = dict_object["os"]
if os_blacklist or os_whitelist:
valid_os = salt.utils.stringutils.check_whitelist_blacklist(
- event_os,
- whitelist=os_whitelist,
- blacklist=os_blacklist)
+ event_os, whitelist=os_whitelist, blacklist=os_blacklist
+ )
if not valid_os:
- log.info('Ignoring NOS %s as per whitelist/blacklist', event_os)
+ log.info("Ignoring NOS %s as per whitelist/blacklist", event_os)
continue
- event_error = dict_object['error']
+ event_error = dict_object["error"]
if error_blacklist or error_whitelist:
valid_error = salt.utils.stringutils.check_whitelist_blacklist(
- event_error,
- whitelist=error_whitelist,
- blacklist=error_blacklist)
+ event_error, whitelist=error_whitelist, blacklist=error_blacklist
+ )
if not valid_error:
- log.info('Ignoring error %s as per whitelist/blacklist', event_error)
+ log.info(
+ "Ignoring error %s as per whitelist/blacklist", event_error
+ )
continue
- event_host = dict_object.get('host') or dict_object.get('ip')
+ event_host = dict_object.get("host") or dict_object.get("ip")
if host_blacklist or host_whitelist:
valid_host = salt.utils.stringutils.check_whitelist_blacklist(
- event_host,
- whitelist=host_whitelist,
- blacklist=host_blacklist)
+ event_host, whitelist=host_whitelist, blacklist=host_blacklist
+ )
if not valid_host:
- log.info('Ignoring messages from %s as per whitelist/blacklist', event_host)
+ log.info(
+ "Ignoring messages from %s as per whitelist/blacklist",
+ event_host,
+ )
continue
- tag = 'napalm/syslog/{os}/{error}/{host}'.format(
- os=event_os,
- error=event_error,
- host=event_host
+ tag = "napalm/syslog/{os}/{error}/{host}".format(
+ os=event_os, error=event_error, host=event_host
)
except KeyError as kerr:
- log.warning('Missing keys from the napalm-logs object:', exc_info=True)
+ log.warning("Missing keys from the napalm-logs object:", exc_info=True)
log.warning(dict_object)
continue # jump to the next object in the queue
- log.debug('Sending event %s', tag)
+ log.debug("Sending event %s", tag)
log.debug(raw_object)
if master:
- event.get_master_event(__opts__,
- __opts__['sock_dir']
- ).fire_event(dict_object, tag)
+ event.get_master_event(__opts__, __opts__["sock_dir"]).fire_event(
+ dict_object, tag
+ )
else:
- __salt__['event.send'](tag, dict_object)
+ __salt__["event.send"](tag, dict_object)
diff --git a/salt/engines/reactor.py b/salt/engines/reactor.py
index af3c90f64ee..9f8c71884e1 100644
--- a/salt/engines/reactor.py
+++ b/salt/engines/reactor.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Setup Reactor
Example Config in Master or Minion config
@@ -16,21 +16,20 @@ Example Config in Master or Minion config
- 'salt/cloud/*/destroyed':
- /srv/reactor/destroy/*.sls
-'''
+"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
-
# Import salt libs
import salt.utils.reactor
def start(refresh_interval=None, worker_threads=None, worker_hwm=None):
if refresh_interval is not None:
- __opts__['reactor_refresh_interval'] = refresh_interval
+ __opts__["reactor_refresh_interval"] = refresh_interval
if worker_threads is not None:
- __opts__['reactor_worker_threads'] = worker_threads
+ __opts__["reactor_worker_threads"] = worker_threads
if worker_hwm is not None:
- __opts__['reactor_worker_hwm'] = worker_hwm
+ __opts__["reactor_worker_hwm"] = worker_hwm
salt.utils.reactor.Reactor(__opts__).run()
diff --git a/salt/engines/redis_sentinel.py b/salt/engines/redis_sentinel.py
index ec1c276c141..a89464b2690 100644
--- a/salt/engines/redis_sentinel.py
+++ b/salt/engines/redis_sentinel.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
An engine that reads messages from the redis sentinel pubsub and sends reactor
events based on the channels they are subscribed to.
@@ -23,10 +23,11 @@ events based on the channels they are subscribed to.
- '-odown'
:depends: redis
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
# Import salt libs
@@ -42,64 +43,77 @@ except ImportError:
log = logging.getLogger(__name__)
-__virtualname__ = 'redis'
+__virtualname__ = "redis"
log = logging.getLogger(__name__)
def __virtual__():
- return __virtualname__ \
- if redis is not None \
- else (False, 'redis python module is not installed')
+ return (
+ __virtualname__
+ if redis is not None
+ else (False, "redis python module is not installed")
+ )
class Listener(object):
def __init__(self, host=None, port=None, channels=None, tag=None):
if host is None:
- host = 'localhost'
+ host = "localhost"
if port is None:
port = 26379
if channels is None:
- channels = ['*']
+ channels = ["*"]
if tag is None:
- tag = 'salt/engine/redis_sentinel'
+ tag = "salt/engine/redis_sentinel"
super(Listener, self).__init__()
self.tag = tag
self.redis = redis.StrictRedis(host=host, port=port, decode_responses=True)
self.pubsub = self.redis.pubsub()
self.pubsub.psubscribe(channels)
- self.fire_master = salt.utils.event.get_master_event(__opts__, __opts__['sock_dir']).fire_event
+ self.fire_master = salt.utils.event.get_master_event(
+ __opts__, __opts__["sock_dir"]
+ ).fire_event
def work(self, item):
- ret = {'channel': item['channel']}
- if isinstance(item['data'], six.integer_types):
- ret['code'] = item['data']
- elif item['channel'] == '+switch-master':
- ret.update(dict(list(zip(
- ('master', 'old_host', 'old_port', 'new_host', 'new_port'), item['data'].split(' ')
- ))))
- elif item['channel'] in ('+odown', '-odown'):
- ret.update(dict(list(zip(
- ('master', 'host', 'port'), item['data'].split(' ')[1:]
- ))))
+ ret = {"channel": item["channel"]}
+ if isinstance(item["data"], six.integer_types):
+ ret["code"] = item["data"]
+ elif item["channel"] == "+switch-master":
+ ret.update(
+ dict(
+ list(
+ zip(
+ ("master", "old_host", "old_port", "new_host", "new_port"),
+ item["data"].split(" "),
+ )
+ )
+ )
+ )
+ elif item["channel"] in ("+odown", "-odown"):
+ ret.update(
+ dict(list(zip(("master", "host", "port"), item["data"].split(" ")[1:])))
+ )
else:
ret = {
- 'channel': item['channel'],
- 'data': item['data'],
+ "channel": item["channel"],
+ "data": item["data"],
}
- self.fire_master(ret, '{0}/{1}'.format(self.tag, item['channel']))
+ self.fire_master(ret, "{0}/{1}".format(self.tag, item["channel"]))
def run(self):
- log.debug('Start Listener')
+ log.debug("Start Listener")
for item in self.pubsub.listen():
- log.debug('Item: %s', item)
+ log.debug("Item: %s", item)
self.work(item)
def start(hosts, channels, tag=None):
if tag is None:
- tag = 'salt/engine/redis_sentinel'
+ tag = "salt/engine/redis_sentinel"
local = salt.client.LocalClient()
- ips = local.cmd(hosts['matching'], 'network.ip_addrs', [hosts['interface']]).values()
- client = Listener(host=ips.pop()[0], port=hosts['port'], channels=channels, tag=tag)
+ ips = local.cmd(
+ hosts["matching"], "network.ip_addrs", [hosts["interface"]]
+ ).values()
+ client = Listener(host=ips.pop()[0], port=hosts["port"], channels=channels, tag=tag)
client.run()
diff --git a/salt/engines/script.py b/salt/engines/script.py
index aa61210e949..b46772cd3b2 100644
--- a/salt/engines/script.py
+++ b/salt/engines/script.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Send events based on a script's stdout
Example Config
@@ -18,48 +18,50 @@ Script engine configs:
output: Any available saltstack deserializer
interval: How often in seconds to execute the command
-'''
+"""
from __future__ import absolute_import, print_function
+
import logging
import shlex
-import time
import subprocess
+import time
+
+import salt.loader
# import salt libs
import salt.utils.event
import salt.utils.process
-import salt.loader
from salt.exceptions import CommandExecutionError
-
from salt.ext import six
-
log = logging.getLogger(__name__)
def _read_stdout(proc):
- '''
+ """
Generator that returns stdout
- '''
+ """
for line in iter(proc.stdout.readline, ""):
yield line
def _get_serializer(output):
- '''
+ """
Helper to return known serializer based on
pass output argument
- '''
+ """
serializers = salt.loader.serializers(__opts__)
try:
return getattr(serializers, output)
except AttributeError:
- raise CommandExecutionError('Unknown serializer `{}` found for output option'.format(output))
+ raise CommandExecutionError(
+ "Unknown serializer `{}` found for output option".format(output)
+ )
-def start(cmd, output='json', interval=1):
- '''
+def start(cmd, output="json", interval=1):
+ """
Parse stdout of a command and generate an event
The script engine will scrap stdout of the
@@ -73,17 +75,19 @@ def start(cmd, output='json', interval=1):
Given the following json output from a script:
- { "tag" : "lots/of/tacos",
- "data" : { "toppings" : "cilantro" }
- }
+ .. code-block:: json
+
+ { "tag" : "lots/of/tacos",
+ "data" : { "toppings" : "cilantro" }
+ }
This will fire the event 'lots/of/tacos'
on the event bus with the data obj as is.
:param cmd: The command to execute
:param output: How to deserialize stdout of the script
- :param interval: How often to execute the script.
- '''
+ :param interval: How often to execute the script
+ """
try:
cmd = shlex.split(cmd)
except AttributeError:
@@ -92,19 +96,19 @@ def start(cmd, output='json', interval=1):
serializer = _get_serializer(output)
- if __opts__.get('__role') == 'master':
+ if __opts__.get("__role") == "master":
fire_master = salt.utils.event.get_master_event(
- __opts__,
- __opts__['sock_dir']).fire_event
+ __opts__, __opts__["sock_dir"]
+ ).fire_event
else:
- fire_master = __salt__['event.send']
+ fire_master = __salt__["event.send"]
while True:
try:
- proc = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
+ proc = subprocess.Popen(
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
+ )
log.debug("Starting script with pid %d", proc.pid)
@@ -112,11 +116,11 @@ def start(cmd, output='json', interval=1):
log.debug(raw_event)
event = serializer.deserialize(raw_event)
- tag = event.get('tag', None)
- data = event.get('data', {})
+ tag = event.get("tag", None)
+ data = event.get("data", {})
- if data and 'id' not in data:
- data['id'] = __opts__['id']
+ if data and "id" not in data:
+ data["id"] = __opts__["id"]
if tag:
log.info("script engine firing event with tag %s", tag)
diff --git a/salt/engines/slack.py b/salt/engines/slack.py
index e0094b1a7a3..46256b99296 100644
--- a/salt/engines/slack.py
+++ b/salt/engines/slack.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
An engine that reads messages from Slack and can act on them
.. versionadded: 2016.3.0
@@ -144,30 +144,24 @@ must be quoted, or else PyYAML will fail to load the configuration.
commands:
- '*'
-'''
+"""
# Import python libraries
from __future__ import absolute_import, print_function, unicode_literals
+
import ast
import datetime
import itertools
import logging
-import time
import re
+import time
import traceback
-log = logging.getLogger(__name__)
-
-try:
- import slackclient
- HAS_SLACKCLIENT = True
-except ImportError:
- HAS_SLACKCLIENT = False
-
# Import salt libs
import salt.client
import salt.loader
import salt.minion
+import salt.output
import salt.runner
import salt.utils.args
import salt.utils.event
@@ -175,15 +169,24 @@ import salt.utils.http
import salt.utils.json
import salt.utils.slack
import salt.utils.yaml
-import salt.output
from salt.ext import six
-__virtualname__ = 'slack'
+log = logging.getLogger(__name__)
+
+try:
+ import slackclient
+
+ HAS_SLACKCLIENT = True
+except ImportError:
+ HAS_SLACKCLIENT = False
+
+
+__virtualname__ = "slack"
def __virtual__():
if not HAS_SLACKCLIENT:
- return (False, 'The \'slackclient\' Python module could not be loaded')
+ return (False, "The 'slackclient' Python module could not be loaded")
return __virtualname__
@@ -195,56 +198,52 @@ class SlackClient(object):
self.slack_connect = self.sc.rtm_connect()
def get_slack_users(self, token):
- '''
+ """
Get all users from Slack
- '''
+ """
- ret = salt.utils.slack.query(function='users',
- api_key=token,
- opts=__opts__)
+ ret = salt.utils.slack.query(function="users", api_key=token, opts=__opts__)
users = {}
- if 'message' in ret:
- for item in ret['message']:
- if 'is_bot' in item:
- if not item['is_bot']:
- users[item['name']] = item['id']
- users[item['id']] = item['name']
+ if "message" in ret:
+ for item in ret["message"]:
+ if "is_bot" in item:
+ if not item["is_bot"]:
+ users[item["name"]] = item["id"]
+ users[item["id"]] = item["name"]
return users
def get_slack_channels(self, token):
- '''
+ """
Get all channel names from Slack
- '''
+ """
ret = salt.utils.slack.query(
- function='rooms',
+ function="rooms",
api_key=token,
# These won't be honored until https://github.com/saltstack/salt/pull/41187/files is merged
- opts={
- 'exclude_archived': True,
- 'exclude_members': True
- })
+ opts={"exclude_archived": True, "exclude_members": True},
+ )
channels = {}
- if 'message' in ret:
- for item in ret['message']:
- channels[item['id']] = item['name']
+ if "message" in ret:
+ for item in ret["message"]:
+ channels[item["id"]] = item["name"]
return channels
def get_config_groups(self, groups_conf, groups_pillar_name):
- '''
+ """
get info from groups in config, and from the named pillar
todo: add specification for the minion to use to recover pillar
- '''
+ """
# Get groups
# Default to returning something that'll never match
ret_groups = {
- 'default': {
- 'users': set(),
- 'commands': set(),
- 'aliases': {},
- 'default_target': {},
- 'targets': {}
+ "default": {
+ "users": set(),
+ "commands": set(),
+ "aliases": {},
+ "default_target": {},
+ "targets": {},
}
}
@@ -257,36 +256,50 @@ class SlackClient(object):
# First obtain group lists from pillars, then in case there is any overlap, iterate over the groups
# that come from pillars. The configuration in files on disk/from startup
# will override any configs from pillars. They are meant to be complementary not to provide overrides.
- log.debug('use_groups %s', use_groups)
+ log.debug("use_groups %s", use_groups)
try:
- groups_gen = itertools.chain(self._groups_from_pillar(groups_pillar_name).items(), use_groups.items())
+ groups_gen = itertools.chain(
+ self._groups_from_pillar(groups_pillar_name).items(), use_groups.items()
+ )
except AttributeError:
- log.warning('Failed to get groups from %s: %s or from config: %s',
+ log.warning(
+ "Failed to get groups from %s: %s or from config: %s",
groups_pillar_name,
self._groups_from_pillar(groups_pillar_name),
- use_groups
+ use_groups,
)
groups_gen = []
for name, config in groups_gen:
- log.info('Trying to get %s and %s to be useful', name, config)
- ret_groups.setdefault(name, {
- 'users': set(), 'commands': set(), 'aliases': {},
- 'default_target': {}, 'targets': {}
- })
+ log.info("Trying to get %s and %s to be useful", name, config)
+ ret_groups.setdefault(
+ name,
+ {
+ "users": set(),
+ "commands": set(),
+ "aliases": {},
+ "default_target": {},
+ "targets": {},
+ },
+ )
try:
- ret_groups[name]['users'].update(set(config.get('users', [])))
- ret_groups[name]['commands'].update(set(config.get('commands', [])))
- ret_groups[name]['aliases'].update(config.get('aliases', {}))
- ret_groups[name]['default_target'].update(config.get('default_target', {}))
- ret_groups[name]['targets'].update(config.get('targets', {}))
+ ret_groups[name]["users"].update(set(config.get("users", [])))
+ ret_groups[name]["commands"].update(set(config.get("commands", [])))
+ ret_groups[name]["aliases"].update(config.get("aliases", {}))
+ ret_groups[name]["default_target"].update(
+ config.get("default_target", {})
+ )
+ ret_groups[name]["targets"].update(config.get("targets", {}))
except (IndexError, AttributeError):
- log.warning("Couldn't use group %s. Check that targets is a dictionary and not a list", name)
+ log.warning(
+ "Couldn't use group %s. Check that targets is a dictionary and not a list",
+ name,
+ )
- log.debug('Got the groups: %s', ret_groups)
+ log.debug("Got the groups: %s", ret_groups)
return ret_groups
def _groups_from_pillar(self, pillar_name):
- '''
+ """
pillar_prefix is the pillar.get syntax for the pillar to be queried.
Group name is gotten via the equivalent of using
``salt['pillar.get']('{}:{}'.format(pillar_prefix, group_name))``
@@ -295,36 +308,36 @@ class SlackClient(object):
returns a dictionary (unless the pillar is mis-formatted)
XXX: instead of using Caller, make the minion to use configurable so there could be some
restrictions placed on what pillars can be used.
- '''
- if pillar_name and __opts__['__role'] == 'minion':
- pillar_groups = __salt__['pillar.get'](pillar_name, {})
- log.debug('Got pillar groups %s from pillar %s', pillar_groups, pillar_name)
- log.debug('pillar groups is %s', pillar_groups)
- log.debug('pillar groups type is %s', type(pillar_groups))
+ """
+ if pillar_name and __opts__["__role"] == "minion":
+ pillar_groups = __salt__["pillar.get"](pillar_name, {})
+ log.debug("Got pillar groups %s from pillar %s", pillar_groups, pillar_name)
+ log.debug("pillar groups is %s", pillar_groups)
+ log.debug("pillar groups type is %s", type(pillar_groups))
else:
pillar_groups = {}
return pillar_groups
def fire(self, tag, msg):
- '''
+ """
This replaces a function in main called 'fire'
It fires an event into the salt bus.
- '''
- if __opts__.get('__role') == 'master':
+ """
+ if __opts__.get("__role") == "master":
fire_master = salt.utils.event.get_master_event(
- __opts__,
- __opts__['sock_dir']).fire_master
+ __opts__, __opts__["sock_dir"]
+ ).fire_master
else:
fire_master = None
if fire_master:
fire_master(msg, tag)
else:
- __salt__['event.send'](tag, msg)
+ __salt__["event.send"](tag, msg)
def can_user_run(self, user, command, groups):
- '''
+ """
Break out the permissions into the following:
Check whether a user is in any group, including whether a group has the '*' membership
@@ -345,42 +358,51 @@ class SlackClient(object):
On failure it returns an empty tuple
- '''
- log.info('%s wants to run %s with groups %s', user, command, groups)
+ """
+ log.info("%s wants to run %s with groups %s", user, command, groups)
for key, val in groups.items():
- if user not in val['users']:
- if '*' not in val['users']:
+ if user not in val["users"]:
+ if "*" not in val["users"]:
continue # this doesn't grant permissions, pass
- if (command not in val['commands']) and (command not in val.get('aliases', {}).keys()):
- if '*' not in val['commands']:
+ if (command not in val["commands"]) and (
+ command not in val.get("aliases", {}).keys()
+ ):
+ if "*" not in val["commands"]:
continue # again, pass
- log.info('Slack user %s permitted to run %s', user, command)
- return (key, val,) # matched this group, return the group
- log.info('Slack user %s denied trying to run %s', user, command)
+ log.info("Slack user %s permitted to run %s", user, command)
+ return (
+ key,
+ val,
+ ) # matched this group, return the group
+ log.info("Slack user %s denied trying to run %s", user, command)
return ()
def commandline_to_list(self, cmdline_str, trigger_string):
- '''
+ """
cmdline_str is the string of the command line
trigger_string is the trigger string, to be removed
- '''
- cmdline = salt.utils.args.shlex_split(cmdline_str[len(trigger_string):])
+ """
+ cmdline = salt.utils.args.shlex_split(cmdline_str[len(trigger_string) :])
# Remove slack url parsing
# Translate target=
# to target=host.domain.net
cmdlist = []
for cmditem in cmdline:
- pattern = r'(?P.*)(<.*\|)(?P.*)(>)(?P.*)'
+ pattern = r"(?P.*)(<.*\|)(?P.*)(>)(?P.*)"
mtch = re.match(pattern, cmditem)
if mtch:
- origtext = mtch.group('begin') + mtch.group('url') + mtch.group('remainder')
+ origtext = (
+ mtch.group("begin") + mtch.group("url") + mtch.group("remainder")
+ )
cmdlist.append(origtext)
else:
cmdlist.append(cmditem)
return cmdlist
- def control_message_target(self, slack_user_name, text, loaded_groups, trigger_string):
- '''Returns a tuple of (target, cmdline,) for the response
+ def control_message_target(
+ self, slack_user_name, text, loaded_groups, trigger_string
+ ):
+ """Returns a tuple of (target, cmdline,) for the response
Raises IndexError if a user can't be looked up from all_slack_users
@@ -393,13 +415,17 @@ class SlackClient(object):
The cmdline that is returned is the actual list that should be
processed by salt, and not the alias.
- '''
+ """
# Trim the trigger string from the front
# cmdline = _text[1:].split(' ', 1)
cmdline = self.commandline_to_list(text, trigger_string)
permitted_group = self.can_user_run(slack_user_name, cmdline[0], loaded_groups)
- log.debug('slack_user_name is %s and the permitted group is %s', slack_user_name, permitted_group)
+ log.debug(
+ "slack_user_name is %s and the permitted group is %s",
+ slack_user_name,
+ permitted_group,
+ )
if not permitted_group:
return (False, None, cmdline[0])
@@ -407,8 +433,10 @@ class SlackClient(object):
return (False, None, cmdline[0])
# maybe there are aliases, so check on that
- if cmdline[0] in permitted_group[1].get('aliases', {}).keys():
- use_cmdline = self.commandline_to_list(permitted_group[1]['aliases'][cmdline[0]].get('cmd', ''), '')
+ if cmdline[0] in permitted_group[1].get("aliases", {}).keys():
+ use_cmdline = self.commandline_to_list(
+ permitted_group[1]["aliases"][cmdline[0]].get("cmd", ""), ""
+ )
# Include any additional elements from cmdline
use_cmdline.extend(cmdline[1:])
else:
@@ -417,36 +445,40 @@ class SlackClient(object):
# Remove target and tgt_type from commandline
# that is sent along to Salt
- use_cmdline = [item for item
- in use_cmdline
- if all(not item.startswith(x) for x in ('target', 'tgt_type'))]
+ use_cmdline = [
+ item
+ for item in use_cmdline
+ if all(not item.startswith(x) for x in ("target", "tgt_type"))
+ ]
return (True, target, use_cmdline)
def message_text(self, m_data):
- '''
+ """
Raises ValueError if a value doesn't work out, and TypeError if
this isn't a message type
- '''
- if m_data.get('type') != 'message':
- raise TypeError('This is not a message')
+ """
+ if m_data.get("type") != "message":
+ raise TypeError("This is not a message")
# Edited messages have text in message
- _text = m_data.get('text', None) or m_data.get('message', {}).get('text', None)
+ _text = m_data.get("text", None) or m_data.get("message", {}).get("text", None)
try:
- log.info('Message is %s', _text) # this can violate the ascii codec
+ log.info("Message is %s", _text) # this can violate the ascii codec
except UnicodeEncodeError as uee:
- log.warning('Got a message that I could not log. The reason is: %s', uee)
+ log.warning("Got a message that I could not log. The reason is: %s", uee)
# Convert UTF to string
_text = salt.utils.json.dumps(_text)
_text = salt.utils.yaml.safe_load(_text)
if not _text:
- raise ValueError('_text has no value')
+ raise ValueError("_text has no value")
return _text
- def generate_triggered_messages(self, token, trigger_string, groups, groups_pillar_name):
- '''
+ def generate_triggered_messages(
+ self, token, trigger_string, groups, groups_pillar_name
+ ):
+ """
slack_token = string
trigger_string = string
input_valid_users = set
@@ -472,44 +504,50 @@ class SlackClient(object):
its own processing and check back for more data later.
This relies on the caller sleeping between checks, otherwise this could flood
- '''
- all_slack_users = self.get_slack_users(token) # re-checks this if we have an negative lookup result
- all_slack_channels = self.get_slack_channels(token) # re-checks this if we have an negative lookup result
+ """
+ all_slack_users = self.get_slack_users(
+ token
+ ) # re-checks this if we have an negative lookup result
+ all_slack_channels = self.get_slack_channels(
+ token
+ ) # re-checks this if we have an negative lookup result
def just_data(m_data):
- '''Always try to return the user and channel anyway'''
- if 'user' not in m_data:
- if 'message' in m_data and 'user' in m_data['message']:
- log.debug('Message was edited, '
- 'so we look for user in '
- 'the original message.')
- user_id = m_data['message']['user']
- elif 'comment' in m_data and 'user' in m_data['comment']:
- log.debug('Comment was added, '
- 'so we look for user in '
- 'the comment.')
- user_id = m_data['comment']['user']
+ """Always try to return the user and channel anyway"""
+ if "user" not in m_data:
+ if "message" in m_data and "user" in m_data["message"]:
+ log.debug(
+ "Message was edited, "
+ "so we look for user in "
+ "the original message."
+ )
+ user_id = m_data["message"]["user"]
+ elif "comment" in m_data and "user" in m_data["comment"]:
+ log.debug(
+ "Comment was added, " "so we look for user in " "the comment."
+ )
+ user_id = m_data["comment"]["user"]
else:
- user_id = m_data.get('user')
- channel_id = m_data.get('channel')
- if channel_id.startswith('D'): # private chate with bot user
- channel_name = 'private chat'
+ user_id = m_data.get("user")
+ channel_id = m_data.get("channel")
+ if channel_id.startswith("D"): # private chate with bot user
+ channel_name = "private chat"
else:
channel_name = all_slack_channels.get(channel_id)
data = {
- 'message_data': m_data,
- 'user_id': user_id,
- 'user_name': all_slack_users.get(user_id),
- 'channel_name': channel_name
+ "message_data": m_data,
+ "user_id": user_id,
+ "user_name": all_slack_users.get(user_id),
+ "channel_name": channel_name,
}
- if not data['user_name']:
+ if not data["user_name"]:
all_slack_users.clear()
all_slack_users.update(self.get_slack_users(token))
- data['user_name'] = all_slack_users.get(user_id)
- if not data['channel_name']:
+ data["user_name"] = all_slack_users.get(user_id)
+ if not data["channel_name"]:
all_slack_channels.clear()
all_slack_channels.update(self.get_slack_channels(token))
- data['channel_name'] = all_slack_channels.get(channel_id)
+ data["channel_name"] = all_slack_channels.get(channel_id)
return data
for sleeps in (5, 10, 30, 60):
@@ -517,59 +555,78 @@ class SlackClient(object):
break
else:
# see https://api.slack.com/docs/rate-limits
- log.warning('Slack connection is invalid. Server: %s, sleeping %s', self.sc.server, sleeps)
- time.sleep(sleeps) # respawning too fast makes the slack API unhappy about the next reconnection
+ log.warning(
+ "Slack connection is invalid. Server: %s, sleeping %s",
+ self.sc.server,
+ sleeps,
+ )
+ time.sleep(
+ sleeps
+ ) # respawning too fast makes the slack API unhappy about the next reconnection
else:
- raise UserWarning('Connection to slack is still invalid, giving up: {}'.format(self.slack_connect)) # Boom!
+ raise UserWarning(
+ "Connection to slack is still invalid, giving up: {}".format(
+ self.slack_connect
+ )
+ ) # Boom!
while True:
msg = self.sc.rtm_read()
for m_data in msg:
try:
msg_text = self.message_text(m_data)
except (ValueError, TypeError) as msg_err:
- log.debug('Got an error from trying to get the message text %s', msg_err)
- yield {'message_data': m_data} # Not a message type from the API?
+ log.debug(
+ "Got an error from trying to get the message text %s", msg_err
+ )
+ yield {"message_data": m_data} # Not a message type from the API?
continue
# Find the channel object from the channel name
- channel = self.sc.server.channels.find(m_data['channel'])
+ channel = self.sc.server.channels.find(m_data["channel"])
data = just_data(m_data)
if msg_text.startswith(trigger_string):
loaded_groups = self.get_config_groups(groups, groups_pillar_name)
- if not data.get('user_name'):
+ if not data.get("user_name"):
log.error(
- 'The user %s can not be looked up via slack. What has happened here?',
- m_data.get('user')
+ "The user %s can not be looked up via slack. What has happened here?",
+ m_data.get("user"),
)
- channel.send_message('The user {} can not be looked up via slack. Not running {}'.format(
- data['user_id'], msg_text))
- yield {'message_data': m_data}
+ channel.send_message(
+ "The user {} can not be looked up via slack. Not running {}".format(
+ data["user_id"], msg_text
+ )
+ )
+ yield {"message_data": m_data}
continue
(allowed, target, cmdline) = self.control_message_target(
- data['user_name'], msg_text, loaded_groups, trigger_string)
- log.debug('Got target: %s, cmdline: %s', target, cmdline)
+ data["user_name"], msg_text, loaded_groups, trigger_string
+ )
+ log.debug("Got target: %s, cmdline: %s", target, cmdline)
if allowed:
yield {
- 'message_data': m_data,
- 'channel': m_data['channel'],
- 'user': data['user_id'],
- 'user_name': data['user_name'],
- 'cmdline': cmdline,
- 'target': target
+ "message_data": m_data,
+ "channel": m_data["channel"],
+ "user": data["user_id"],
+ "user_name": data["user_name"],
+ "cmdline": cmdline,
+ "target": target,
}
continue
else:
- channel.send_message('{0} is not allowed to use command {1}.'.format(
- data['user_name'], cmdline))
+ channel.send_message(
+ "{0} is not allowed to use command {1}.".format(
+ data["user_name"], cmdline
+ )
+ )
yield data
continue
else:
yield data
continue
- yield {'done': True}
+ yield {"done": True}
def get_target(self, permitted_group, cmdline, alias_cmdline):
- '''
+ """
When we are permitted to run a command on a target, look to see
what the default targeting is for that group, and for that specific
command (if provided).
@@ -595,86 +652,91 @@ class SlackClient(object):
Run each of them through ``get_configured_target(('foo', f), 'pillar.get')`` and confirm a valid target
- '''
+ """
# Default to targeting all minions with a type of glob
- null_target = {'target': '*', 'tgt_type': 'glob'}
+ null_target = {"target": "*", "tgt_type": "glob"}
def check_cmd_against_group(cmd):
- '''
+ """
Validate cmd against the group to return the target, or a null target
- '''
+ """
name, group_config = permitted_group
- target = group_config.get('default_target')
+ target = group_config.get("default_target")
if not target: # Empty, None, or False
target = null_target
- if group_config.get('targets'):
- if group_config['targets'].get(cmd):
- target = group_config['targets'][cmd]
- if not target.get('target'):
- log.debug('Group %s is not configured to have a target for cmd %s.', name, cmd)
+ if group_config.get("targets"):
+ if group_config["targets"].get(cmd):
+ target = group_config["targets"][cmd]
+ if not target.get("target"):
+ log.debug(
+ "Group %s is not configured to have a target for cmd %s.", name, cmd
+ )
return target
for this_cl in cmdline, alias_cmdline:
_, kwargs = self.parse_args_and_kwargs(this_cl)
- if 'target' in kwargs:
- log.debug('target is in kwargs %s.', kwargs)
- if 'tgt_type' in kwargs:
- log.debug('tgt_type is in kwargs %s.', kwargs)
- return {'target': kwargs['target'], 'tgt_type': kwargs['tgt_type']}
- return {'target': kwargs['target'], 'tgt_type': 'glob'}
+ if "target" in kwargs:
+ log.debug("target is in kwargs %s.", kwargs)
+ if "tgt_type" in kwargs:
+ log.debug("tgt_type is in kwargs %s.", kwargs)
+ return {"target": kwargs["target"], "tgt_type": kwargs["tgt_type"]}
+ return {"target": kwargs["target"], "tgt_type": "glob"}
for this_cl in cmdline, alias_cmdline:
checked = check_cmd_against_group(this_cl[0])
- log.debug('this cmdline has target %s.', this_cl)
- if checked.get('target'):
+ log.debug("this cmdline has target %s.", this_cl)
+ if checked.get("target"):
return checked
return null_target
- def format_return_text(self, data, function, **kwargs): # pylint: disable=unused-argument
- '''
+ def format_return_text(
+ self, data, function, **kwargs
+ ): # pylint: disable=unused-argument
+ """
Print out YAML using the block mode
- '''
+ """
# emulate the yaml_out output formatter. It relies on a global __opts__ object which
# we can't obviously pass in
try:
try:
- outputter = data[next(iter(data))].get('out')
+ outputter = data[next(iter(data))].get("out")
except (StopIteration, AttributeError):
outputter = None
return salt.output.string_format(
- {x: y['return'] for x, y in six.iteritems(data)},
+ {x: y["return"] for x, y in six.iteritems(data)},
out=outputter,
opts=__opts__,
)
except Exception as exc: # pylint: disable=broad-except
import pprint
+
log.exception(
- 'Exception encountered when trying to serialize %s',
- pprint.pformat(data)
+ "Exception encountered when trying to serialize %s",
+ pprint.pformat(data),
)
- return 'Got an error trying to serialze/clean up the response'
+ return "Got an error trying to serialze/clean up the response"
def parse_args_and_kwargs(self, cmdline):
- '''
+ """
cmdline: list
returns tuple of: args (list), kwargs (dict)
- '''
+ """
# Parse args and kwargs
args = []
kwargs = {}
if len(cmdline) > 1:
for item in cmdline[1:]:
- if '=' in item:
- (key, value) = item.split('=', 1)
+ if "=" in item:
+ (key, value) = item.split("=", 1)
kwargs[key] = value
else:
args.append(item)
return (args, kwargs)
def get_jobs_from_runner(self, outstanding_jids):
- '''
+ """
Given a list of job_ids, return a dictionary of those job_ids that have
completed and their results.
@@ -683,39 +745,41 @@ class SlackClient(object):
completed.
returns a dictionary of job id: result
- '''
+ """
# Can't use the runner because of https://github.com/saltstack/salt/issues/40671
runner = salt.runner.RunnerClient(__opts__)
- source = __opts__.get('ext_job_cache')
+ source = __opts__.get("ext_job_cache")
if not source:
- source = __opts__.get('master_job_cache')
+ source = __opts__.get("master_job_cache")
results = {}
for jid in outstanding_jids:
# results[jid] = runner.cmd('jobs.lookup_jid', [jid])
- if self.master_minion.returners['{}.get_jid'.format(source)](jid):
- job_result = runner.cmd('jobs.list_job', [jid])
- jid_result = job_result.get('Result', {})
- jid_function = job_result.get('Function', {})
+ if self.master_minion.returners["{}.get_jid".format(source)](jid):
+ job_result = runner.cmd("jobs.list_job", [jid])
+ jid_result = job_result.get("Result", {})
+ jid_function = job_result.get("Function", {})
# emulate lookup_jid's return, which is just minion:return
results[jid] = {
- 'data': salt.utils.json.loads(salt.utils.json.dumps(jid_result)),
- 'function': jid_function
+ "data": salt.utils.json.loads(salt.utils.json.dumps(jid_result)),
+ "function": jid_function,
}
return results
- def run_commands_from_slack_async(self, message_generator, fire_all, tag, control, interval=1):
- '''
+ def run_commands_from_slack_async(
+ self, message_generator, fire_all, tag, control, interval=1
+ ):
+ """
Pull any pending messages from the message_generator, sending each
one to either the event bus, the command_async or both, depending on
the values of fire_all and command
- '''
+ """
outstanding = {} # set of job_id that we need to check for
while True:
- log.trace('Sleeping for interval of %s', interval)
+ log.trace("Sleeping for interval of %s", interval)
time.sleep(interval)
# Drain the slack messages, up to 10 messages at a clip
count = 0
@@ -723,59 +787,84 @@ class SlackClient(object):
# The message_generator yields dicts. Leave this loop
# on a dict that looks like {'done': True} or when we've done it
# 10 times without taking a break.
- log.trace('Got a message from the generator: %s', msg.keys())
+ log.trace("Got a message from the generator: %s", msg.keys())
if count > 10:
- log.warning('Breaking in getting messages because count is exceeded')
+ log.warning(
+ "Breaking in getting messages because count is exceeded"
+ )
break
if not msg:
count += 1
- log.warning('Skipping an empty message.')
+ log.warning("Skipping an empty message.")
continue # This one is a dud, get the next message
- if msg.get('done'):
- log.trace('msg is done')
+ if msg.get("done"):
+ log.trace("msg is done")
break
if fire_all:
- log.debug('Firing message to the bus with tag: %s', tag)
- log.debug('%s %s', tag, msg)
- self.fire('{0}/{1}'.format(tag, msg['message_data'].get('type')), msg)
- if control and (len(msg) > 1) and msg.get('cmdline'):
- channel = self.sc.server.channels.find(msg['channel'])
+ log.debug("Firing message to the bus with tag: %s", tag)
+ log.debug("%s %s", tag, msg)
+ self.fire(
+ "{0}/{1}".format(tag, msg["message_data"].get("type")), msg
+ )
+ if control and (len(msg) > 1) and msg.get("cmdline"):
+ channel = self.sc.server.channels.find(msg["channel"])
jid = self.run_command_async(msg)
- log.debug('Submitted a job and got jid: %s', jid)
- outstanding[jid] = msg # record so we can return messages to the caller
- channel.send_message("@{}'s job is submitted as salt jid {}".format(msg['user_name'], jid))
+ log.debug("Submitted a job and got jid: %s", jid)
+ outstanding[
+ jid
+ ] = msg # record so we can return messages to the caller
+ channel.send_message(
+ "@{}'s job is submitted as salt jid {}".format(
+ msg["user_name"], jid
+ )
+ )
count += 1
start_time = time.time()
- job_status = self.get_jobs_from_runner(outstanding.keys()) # dict of job_ids:results are returned
- log.trace('Getting %s jobs status took %s seconds', len(job_status), time.time() - start_time)
+ job_status = self.get_jobs_from_runner(
+ outstanding.keys()
+ ) # dict of job_ids:results are returned
+ log.trace(
+ "Getting %s jobs status took %s seconds",
+ len(job_status),
+ time.time() - start_time,
+ )
for jid in job_status:
- result = job_status[jid]['data']
- function = job_status[jid]['function']
+ result = job_status[jid]["data"]
+ function = job_status[jid]["function"]
if result:
- log.debug('ret to send back is %s', result)
+ log.debug("ret to send back is %s", result)
# formatting function?
this_job = outstanding[jid]
- channel = self.sc.server.channels.find(this_job['channel'])
+ channel = self.sc.server.channels.find(this_job["channel"])
return_text = self.format_return_text(result, function)
return_prefix = "@{}'s job `{}` (id: {}) (target: {}) returned".format(
- this_job['user_name'], this_job['cmdline'], jid, this_job['target'])
+ this_job["user_name"],
+ this_job["cmdline"],
+ jid,
+ this_job["target"],
+ )
channel.send_message(return_prefix)
ts = time.time()
- st = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S%f')
- filename = 'salt-results-{0}.yaml'.format(st)
+ st = datetime.datetime.fromtimestamp(ts).strftime("%Y%m%d%H%M%S%f")
+ filename = "salt-results-{0}.yaml".format(st)
r = self.sc.api_call(
- 'files.upload', channels=channel.id, filename=filename,
- content=return_text)
+ "files.upload",
+ channels=channel.id,
+ filename=filename,
+ content=return_text,
+ )
# Handle unicode return
- log.debug('Got back %s via the slack client', r)
+ log.debug("Got back %s via the slack client", r)
resp = salt.utils.yaml.safe_load(salt.utils.json.dumps(r))
- if 'ok' in resp and resp['ok'] is False:
- this_job['channel'].send_message('Error: {0}'.format(resp['error']))
+ if "ok" in resp and resp["ok"] is False:
+ this_job["channel"].send_message(
+ "Error: {0}".format(resp["error"])
+ )
del outstanding[jid]
def run_command_async(self, msg):
- '''
+ """
:type message_generator: generator of dict
:param message_generator: Generates messages from slack that should be run
@@ -788,62 +877,74 @@ class SlackClient(object):
:type interval: int
:param interval: time to wait between ending a loop and beginning the next
- '''
- log.debug('Going to run a command asynchronous')
+ """
+ log.debug("Going to run a command asynchronous")
runner_functions = sorted(salt.runner.Runner(__opts__).functions)
# Parse args and kwargs
- cmd = msg['cmdline'][0]
+ cmd = msg["cmdline"][0]
- args, kwargs = self.parse_args_and_kwargs(msg['cmdline'])
+ args, kwargs = self.parse_args_and_kwargs(msg["cmdline"])
# Check for pillar string representation of dict and convert it to dict
- if 'pillar' in kwargs:
- kwargs.update(pillar=ast.literal_eval(kwargs['pillar']))
+ if "pillar" in kwargs:
+ kwargs.update(pillar=ast.literal_eval(kwargs["pillar"]))
# Check for target. Otherwise assume None
- target = msg['target']['target']
+ target = msg["target"]["target"]
# Check for tgt_type. Otherwise assume glob
- tgt_type = msg['target']['tgt_type']
- log.debug('target_type is: %s', tgt_type)
+ tgt_type = msg["target"]["tgt_type"]
+ log.debug("target_type is: %s", tgt_type)
if cmd in runner_functions:
runner = salt.runner.RunnerClient(__opts__)
- log.debug('Command %s will run via runner_functions', cmd)
+ log.debug("Command %s will run via runner_functions", cmd)
# pylint is tripping
# pylint: disable=missing-whitespace-after-comma
- job_id_dict = runner.asynchronous(cmd, {'args': args, 'kwargs': kwargs})
- job_id = job_id_dict['jid']
+ job_id_dict = runner.asynchronous(cmd, {"args": args, "kwargs": kwargs})
+ job_id = job_id_dict["jid"]
# Default to trying to run as a client module.
else:
local = salt.client.LocalClient()
- log.debug('Command %s will run via local.cmd_async, targeting %s', cmd, target)
- log.debug('Running %s, %s, %s, %s, %s', target, cmd, args, kwargs, tgt_type)
+ log.debug(
+ "Command %s will run via local.cmd_async, targeting %s", cmd, target
+ )
+ log.debug("Running %s, %s, %s, %s, %s", target, cmd, args, kwargs, tgt_type)
# according to https://github.com/saltstack/salt-api/issues/164, tgt_type has changed to expr_form
- job_id = local.cmd_async(six.text_type(target), cmd, arg=args, kwarg=kwargs, tgt_type=six.text_type(tgt_type))
- log.info('ret from local.cmd_async is %s', job_id)
+ job_id = local.cmd_async(
+ six.text_type(target),
+ cmd,
+ arg=args,
+ kwarg=kwargs,
+ tgt_type=six.text_type(tgt_type),
+ )
+ log.info("ret from local.cmd_async is %s", job_id)
return job_id
-def start(token,
- control=False,
- trigger='!',
- groups=None,
- groups_pillar_name=None,
- fire_all=False,
- tag='salt/engines/slack'):
- '''
+def start(
+ token,
+ control=False,
+ trigger="!",
+ groups=None,
+ groups_pillar_name=None,
+ fire_all=False,
+ tag="salt/engines/slack",
+):
+ """
Listen to slack events and forward them to salt, new version
- '''
+ """
- if (not token) or (not token.startswith('xoxb')):
+ if (not token) or (not token.startswith("xoxb")):
time.sleep(2) # don't respawn too quickly
- log.error('Slack bot token not found, bailing...')
- raise UserWarning('Slack Engine bot token not configured')
+ log.error("Slack bot token not found, bailing...")
+ raise UserWarning("Slack Engine bot token not configured")
try:
client = SlackClient(token=token)
- message_generator = client.generate_triggered_messages(token, trigger, groups, groups_pillar_name)
+ message_generator = client.generate_triggered_messages(
+ token, trigger, groups, groups_pillar_name
+ )
client.run_commands_from_slack_async(message_generator, fire_all, tag, control)
except Exception: # pylint: disable=broad-except
- raise Exception('{}'.format(traceback.format_exc()))
+ raise Exception("{}".format(traceback.format_exc()))
diff --git a/salt/engines/sqs_events.py b/salt/engines/sqs_events.py
index 16f16406e5e..6fadd5c90e6 100644
--- a/salt/engines/sqs_events.py
+++ b/salt/engines/sqs_events.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
An engine that continuously reads messages from SQS and fires them as events.
Note that long polling is utilized to avoid excessive CPU usage.
@@ -71,30 +71,35 @@ Additionally you can define cross account sqs:
queue: prod
owner_acct_id: 111111111111
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
import time
+import salt.utils.event
+
# Import salt libs
import salt.utils.json
-import salt.utils.event
+from salt.ext import six
# Import third party libs
try:
import boto.sqs
+
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
-from salt.ext import six
-
def __virtual__():
if not HAS_BOTO:
- return (False, 'Cannot import engine sqs_events because the required boto module is missing')
+ return (
+ False,
+ "Cannot import engine sqs_events because the required boto module is missing",
+ )
else:
return True
@@ -103,64 +108,75 @@ log = logging.getLogger(__name__)
def _get_sqs_conn(profile, region=None, key=None, keyid=None):
- '''
+ """
Get a boto connection to SQS.
- '''
+ """
if profile:
if isinstance(profile, six.string_types):
_profile = __opts__[profile]
elif isinstance(profile, dict):
_profile = profile
- key = _profile.get('key', None)
- keyid = _profile.get('keyid', None)
- region = _profile.get('region', None)
+ key = _profile.get("key", None)
+ keyid = _profile.get("keyid", None)
+ region = _profile.get("region", None)
if not region:
- region = __opts__.get('sqs.region', 'us-east-1')
+ region = __opts__.get("sqs.region", "us-east-1")
if not key:
- key = __opts__.get('sqs.key', None)
+ key = __opts__.get("sqs.key", None)
if not keyid:
- keyid = __opts__.get('sqs.keyid', None)
+ keyid = __opts__.get("sqs.keyid", None)
try:
- conn = boto.sqs.connect_to_region(region, aws_access_key_id=keyid,
- aws_secret_access_key=key)
+ conn = boto.sqs.connect_to_region(
+ region, aws_access_key_id=keyid, aws_secret_access_key=key
+ )
except boto.exception.NoAuthHandlerFound:
- log.error('No authentication credentials found when attempting to'
- ' make sqs_event engine connection to AWS.')
+ log.error(
+ "No authentication credentials found when attempting to"
+ " make sqs_event engine connection to AWS."
+ )
return None
return conn
-def _process_queue(q, q_name, fire_master, tag='salt/engine/sqs', owner_acct_id=None, message_format=None):
+def _process_queue(
+ q,
+ q_name,
+ fire_master,
+ tag="salt/engine/sqs",
+ owner_acct_id=None,
+ message_format=None,
+):
if not q:
log.warning(
- 'failure connecting to queue: %s, waiting 10 seconds.',
- ':'.join([_f for _f in (six.text_type(owner_acct_id), q_name) if _f])
+ "failure connecting to queue: %s, waiting 10 seconds.",
+ ":".join([_f for _f in (six.text_type(owner_acct_id), q_name) if _f]),
)
time.sleep(10)
else:
msgs = q.get_messages(wait_time_seconds=20)
for msg in msgs:
if message_format == "json":
- fire_master(tag=tag, data={'message': salt.utils.json.loads(msg.get_body())})
+ fire_master(
+ tag=tag, data={"message": salt.utils.json.loads(msg.get_body())}
+ )
else:
- fire_master(tag=tag, data={'message': msg.get_body()})
+ fire_master(tag=tag, data={"message": msg.get_body()})
msg.delete()
-def start(queue, profile=None, tag='salt/engine/sqs', owner_acct_id=None):
- '''
+def start(queue, profile=None, tag="salt/engine/sqs", owner_acct_id=None):
+ """
Listen to sqs and fire message on event bus
- '''
- if __opts__.get('__role') == 'master':
+ """
+ if __opts__.get("__role") == "master":
fire_master = salt.utils.event.get_master_event(
- __opts__,
- __opts__['sock_dir'],
- listen=False).fire_event
+ __opts__, __opts__["sock_dir"], listen=False
+ ).fire_event
else:
- fire_master = __salt__['event.send']
+ fire_master = __salt__["event.send"]
- message_format = __opts__.get('sqs.message_format', None)
+ message_format = __opts__.get("sqs.message_format", None)
sqs = _get_sqs_conn(profile)
q = None
@@ -169,4 +185,11 @@ def start(queue, profile=None, tag='salt/engine/sqs', owner_acct_id=None):
q = sqs.get_queue(queue, owner_acct_id=owner_acct_id)
q.set_message_class(boto.sqs.message.RawMessage)
- _process_queue(q, queue, fire_master, tag=tag, owner_acct_id=owner_acct_id, message_format=message_format)
+ _process_queue(
+ q,
+ queue,
+ fire_master,
+ tag=tag,
+ owner_acct_id=owner_acct_id,
+ message_format=message_format,
+ )
diff --git a/salt/engines/stalekey.py b/salt/engines/stalekey.py
index 18e9b0bd45b..b216b3fe01c 100644
--- a/salt/engines/stalekey.py
+++ b/salt/engines/stalekey.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
An engine that uses presence detection to keep track of which minions
have been recently connected and remove their keys if they have not been
connected for a certain period of time.
@@ -19,12 +19,13 @@ Requires that the :conf_master:`minion_data_cache` option be enabled.
interval: 3600
expire: 86400
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
+import logging
import os
import time
-import logging
# Import salt libs
import salt.config
@@ -41,31 +42,31 @@ log = logging.getLogger(__name__)
def __virtual__():
- if not __opts__.get('minion_data_cache'):
- return (False, 'stalekey engine requires minion_data_cache to be enabled')
+ if not __opts__.get("minion_data_cache"):
+ return (False, "stalekey engine requires minion_data_cache to be enabled")
return True
def _get_keys():
keys = salt.key.get_key(__opts__)
minions = keys.all_keys()
- return minions['minions']
+ return minions["minions"]
def start(interval=3600, expire=604800):
ck = salt.utils.minions.CkMinions(__opts__)
- presence_file = '{0}/presence.p'.format(__opts__['cachedir'])
+ presence_file = "{0}/presence.p".format(__opts__["cachedir"])
wheel = salt.wheel.WheelClient(__opts__)
while True:
- log.debug('Checking for present minions')
+ log.debug("Checking for present minions")
minions = {}
if os.path.exists(presence_file):
try:
- with salt.utils.files.fopen(presence_file, 'r') as f:
+ with salt.utils.files.fopen(presence_file, "r") as f:
minions = salt.utils.msgpack.load(f)
except IOError as e:
- log.error('Could not open presence file %s: %s', presence_file, e)
+ log.error("Could not open presence file %s: %s", presence_file, e)
time.sleep(interval)
continue
@@ -83,7 +84,7 @@ def start(interval=3600, expire=604800):
elif m in present:
minions[m] = now
- log.debug('Finished checking for present minions')
+ log.debug("Finished checking for present minions")
# Delete old keys
stale_keys = []
for m, seen in six.iteritems(minions):
@@ -92,13 +93,13 @@ def start(interval=3600, expire=604800):
if stale_keys:
for k in stale_keys:
- log.info('Removing stale key for %s', k)
- wheel.cmd('key.delete', stale_keys)
+ log.info("Removing stale key for %s", k)
+ wheel.cmd("key.delete", stale_keys)
del minions[k]
try:
- with salt.utils.files.fopen(presence_file, 'w') as f:
+ with salt.utils.files.fopen(presence_file, "w") as f:
salt.utils.msgpack.dump(minions, f)
except IOError as e:
- log.error('Could not write to presence file %s: %s', presence_file, e)
+ log.error("Could not write to presence file %s: %s", presence_file, e)
time.sleep(interval)
diff --git a/salt/engines/test.py b/salt/engines/test.py
index e91dbe59612..3aefbfad70f 100644
--- a/salt/engines/test.py
+++ b/salt/engines/test.py
@@ -1,10 +1,11 @@
# -*- coding: utf-8 -*-
-'''
+"""
A simple test engine, not intended for real use but as an example
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
# Import salt libs
@@ -15,26 +16,26 @@ log = logging.getLogger(__name__)
def event_bus_context(opts):
- if opts['__role'] == 'master':
+ if opts["__role"] == "master":
event_bus = salt.utils.event.get_master_event(
- opts,
- opts['sock_dir'],
- listen=True)
+ opts, opts["sock_dir"], listen=True
+ )
else:
event_bus = salt.utils.event.get_event(
- 'minion',
- transport=opts['transport'],
+ "minion",
+ transport=opts["transport"],
opts=opts,
- sock_dir=opts['sock_dir'],
- listen=True)
- log.debug('test engine started')
+ sock_dir=opts["sock_dir"],
+ listen=True,
+ )
+ log.debug("test engine started")
return event_bus
def start():
- '''
+ """
Listen to events and write them to a log file
- '''
+ """
with event_bus_context(__opts__) as event_bus:
while True:
event = event_bus.get_event()
diff --git a/salt/engines/thorium.py b/salt/engines/thorium.py
index 241fef63ded..406bec187c2 100644
--- a/salt/engines/thorium.py
+++ b/salt/engines/thorium.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
-'''
+"""
Manage the Thorium complex event reaction system
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import salt libs
@@ -9,13 +9,8 @@ import salt.thorium
def start(grains=False, grain_keys=None, pillar=False, pillar_keys=None):
- '''
+ """
Execute the Thorium runtime
- '''
- state = salt.thorium.ThorState(
- __opts__,
- grains,
- grain_keys,
- pillar,
- pillar_keys)
+ """
+ state = salt.thorium.ThorState(__opts__, grains, grain_keys, pillar, pillar_keys)
state.start_runtime()
diff --git a/salt/engines/webhook.py b/salt/engines/webhook.py
index 5cb269ae35b..a2ab964a3a0 100644
--- a/salt/engines/webhook.py
+++ b/salt/engines/webhook.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
-'''
+"""
Send events from webhook api
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
import salt.ext.tornado.httpserver
@@ -13,7 +13,7 @@ import salt.utils.event
def start(address=None, port=5000, ssl_crt=None, ssl_key=None):
- '''
+ """
Api to listen for webhooks to send to the reactor.
Implement the webhook behavior in an engine.
@@ -50,37 +50,43 @@ def start(address=None, port=5000, ssl_crt=None, ssl_key=None):
.. note: For making an unsigned key, use the following command
`salt-call --local tls.create_self_signed_cert`
- '''
- if __opts__.get('__role') == 'master':
- fire_master = salt.utils.event.get_master_event(__opts__, __opts__['sock_dir']).fire_event
+ """
+ if __opts__.get("__role") == "master":
+ fire_master = salt.utils.event.get_master_event(
+ __opts__, __opts__["sock_dir"]
+ ).fire_event
else:
fire_master = None
def fire(tag, msg):
- '''
+ """
How to fire the event
- '''
+ """
if fire_master:
fire_master(msg, tag)
else:
- __salt__['event.send'](tag, msg)
+ __salt__["event.send"](tag, msg)
- class WebHook(salt.ext.tornado.web.RequestHandler): # pylint: disable=abstract-method
+ class WebHook(
+ salt.ext.tornado.web.RequestHandler
+ ): # pylint: disable=abstract-method
def post(self, tag): # pylint: disable=arguments-differ
body = self.request.body
headers = self.request.headers
payload = {
- 'headers': headers if isinstance(headers, dict) else dict(headers),
- 'body': body,
+ "headers": headers if isinstance(headers, dict) else dict(headers),
+ "body": body,
}
- fire('salt/engines/hook/' + tag, payload)
+ fire("salt/engines/hook/" + tag, payload)
- application = salt.ext.tornado.web.Application([(r"/(.*)", WebHook), ])
+ application = salt.ext.tornado.web.Application([(r"/(.*)", WebHook)])
ssl_options = None
if all([ssl_crt, ssl_key]):
ssl_options = {"certfile": ssl_crt, "keyfile": ssl_key}
io_loop = salt.ext.tornado.ioloop.IOLoop(make_current=False)
io_loop.make_current()
- http_server = salt.ext.tornado.httpserver.HTTPServer(application, ssl_options=ssl_options)
+ http_server = salt.ext.tornado.httpserver.HTTPServer(
+ application, ssl_options=ssl_options
+ )
http_server.listen(port, address=address)
io_loop.start()
diff --git a/salt/exceptions.py b/salt/exceptions.py
index 58cb8b92041..5c255c6d280 100644
--- a/salt/exceptions.py
+++ b/salt/exceptions.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
-'''
+"""
This module is a central location for all salt exceptions
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
@@ -17,117 +17,125 @@ log = logging.getLogger(__name__)
def _nested_output(obj):
- '''
+ """
Serialize obj and format for output
- '''
+ """
# Explicit late import to avoid circular import
from salt.output import nested
+
nested.__opts__ = {}
ret = nested.output(obj).rstrip()
return ret
def get_error_message(error):
- '''
+ """
Get human readable message from Python Exception
- '''
- return error.args[0] if error.args else ''
+ """
+ return error.args[0] if error.args else ""
class SaltException(Exception):
- '''
+ """
Base exception class; all Salt-specific exceptions should subclass this
- '''
- def __init__(self, message=''):
+ """
+
+ def __init__(self, message=""):
# Avoid circular import
import salt.utils.stringutils
+
if not isinstance(message, six.string_types):
message = six.text_type(message)
- if six.PY3 or isinstance(message, unicode): # pylint: disable=incompatible-py3-code,undefined-variable
- super(SaltException, self).__init__(
- salt.utils.stringutils.to_str(message)
- )
+ # pylint: disable=incompatible-py3-code,undefined-variable
+ if six.PY3 or isinstance(message, unicode):
+ super(SaltException, self).__init__(salt.utils.stringutils.to_str(message))
self.message = self.strerror = message
+ # pylint: enable=incompatible-py3-code,undefined-variable
elif isinstance(message, str):
super(SaltException, self).__init__(message)
- self.message = self.strerror = \
- salt.utils.stringutils.to_unicode(message)
+ self.message = self.strerror = salt.utils.stringutils.to_unicode(message)
else:
# Some non-string input was passed. Run the parent dunder init with
# a str version, and convert the passed value to unicode for the
# message/strerror attributes.
- super(SaltException, self).__init__(str(message)) # future lint: blacklisted-function
- self.message = self.strerror = unicode(message) # pylint: disable=incompatible-py3-code,undefined-variable
+ # futurdisable lint: blacklisteenable-function
+ super(SaltException, self).__init__(str(message))
+ # future lint: blacklisteenable-function
+ # pylint: disable=incompatible-py3-code,undefined-variable
+ self.message = self.strerror = unicode(message)
+ # pylint: enable=incompatible-py3-code,undefined-variable
def __unicode__(self):
return self.strerror
def pack(self):
- '''
+ """
Pack this exception into a serializable dictionary that is safe for
transport via msgpack
- '''
+ """
if six.PY3:
- return {'message': six.text_type(self), 'args': self.args}
+ return {"message": six.text_type(self), "args": self.args}
return dict(message=self.__unicode__(), args=self.args)
class SaltClientError(SaltException):
- '''
+ """
Problem reading the master root key
- '''
+ """
class SaltMasterError(SaltException):
- '''
+ """
Problem reading the master root key
- '''
+ """
class SaltNoMinionsFound(SaltException):
- '''
+ """
An attempt to retrieve a list of minions failed
- '''
+ """
class SaltSyndicMasterError(SaltException):
- '''
+ """
Problem while proxying a request in the syndication master
- '''
+ """
class SaltMasterUnresolvableError(SaltException):
- '''
+ """
Problem resolving the name of the Salt master
- '''
+ """
class MasterExit(SystemExit):
- '''
+ """
Rise when the master exits
- '''
+ """
class AuthenticationError(SaltException):
- '''
+ """
If sha256 signature fails during decryption
- '''
+ """
class CommandNotFoundError(SaltException):
- '''
+ """
Used in modules or grains when a required binary is not available
- '''
+ """
class CommandExecutionError(SaltException):
- '''
+ """
Used when a module runs a command which returns an error and wants
to show the user the output gracefully instead of dying
- '''
- def __init__(self, message='', info=None):
+ """
+
+ def __init__(self, message="", info=None):
# Avoid circular import
import salt.utils.stringutils
+
try:
exc_str_prefix = salt.utils.stringutils.to_unicode(message)
except TypeError:
@@ -139,16 +147,18 @@ class CommandExecutionError(SaltException):
try:
exc_str_prefix = six.text_type(message)
except UnicodeDecodeError:
- exc_str_prefix = salt.utils.stringutils.to_unicode(str(message)) # future lint: disable=blacklisted-function
+ exc_str_prefix = salt.utils.stringutils.to_unicode(
+ str(message)
+ ) # future lint: disable=blacklisted-function
self.error = exc_str_prefix
self.info = info
if self.info:
if exc_str_prefix:
- if exc_str_prefix[-1] not in '.?!':
- exc_str_prefix += '.'
- exc_str_prefix += ' '
+ if exc_str_prefix[-1] not in ".?!":
+ exc_str_prefix += "."
+ exc_str_prefix += " "
- exc_str_prefix += 'Additional info follows:\n\n'
+ exc_str_prefix += "Additional info follows:\n\n"
# NOTE: exc_str will be passed to the parent class' constructor and
# become self.strerror.
exc_str = exc_str_prefix + _nested_output(self.info)
@@ -159,10 +169,11 @@ class CommandExecutionError(SaltException):
# this information would be redundant).
if isinstance(self.info, dict):
info_without_changes = copy.deepcopy(self.info)
- info_without_changes.pop('changes', None)
+ info_without_changes.pop("changes", None)
if info_without_changes:
- self.strerror_without_changes = \
- exc_str_prefix + _nested_output(info_without_changes)
+ self.strerror_without_changes = exc_str_prefix + _nested_output(
+ info_without_changes
+ )
else:
# 'changes' was the only key in the info dictionary. We no
# longer have any additional info to display. Use the
@@ -180,40 +191,43 @@ class CommandExecutionError(SaltException):
class LoaderError(SaltException):
- '''
+ """
Problems loading the right renderer
- '''
+ """
class PublishError(SaltException):
- '''
+ """
Problems encountered when trying to publish a command
- '''
+ """
class MinionError(SaltException):
- '''
+ """
Minion problems reading uris such as salt:// or http://
- '''
+ """
class FileserverConfigError(SaltException):
- '''
+ """
Used when invalid fileserver settings are detected
- '''
+ """
class FileLockError(SaltException):
- '''
+ """
Used when an error occurs obtaining a file lock
- '''
- def __init__(self, message, time_start=None, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg
+ """
+
+ def __init__(
+ self, message, time_start=None, *args, **kwargs
+ ): # pylint: disable=keyword-arg-before-vararg
super(FileLockError, self).__init__(message, *args, **kwargs)
if time_start is None:
log.warning(
- 'time_start should be provided when raising a FileLockError. '
- 'Defaulting to current time as a fallback, but this may '
- 'result in an inaccurate timeout.'
+ "time_start should be provided when raising a FileLockError. "
+ "Defaulting to current time as a fallback, but this may "
+ "result in an inaccurate timeout."
)
self.time_start = time.time()
else:
@@ -221,7 +235,7 @@ class FileLockError(SaltException):
class GitLockError(SaltException):
- '''
+ """
Raised when an uncaught error occurs in the midst of obtaining an
update/checkout lock in salt.utils.gitfs.
@@ -229,47 +243,52 @@ class GitLockError(SaltException):
class is *not* as subclass of OSError. This is done intentionally, so that
this exception class can be caught in a try/except without being caught as
an OSError.
- '''
+ """
+
def __init__(self, errno, message, *args, **kwargs):
super(GitLockError, self).__init__(message, *args, **kwargs)
self.errno = errno
class GitRemoteError(SaltException):
- '''
+ """
Used by GitFS to denote a problem with the existence of the "origin" remote
or part of its configuration
- '''
+ """
class SaltInvocationError(SaltException, TypeError):
- '''
+ """
Used when the wrong number of arguments are sent to modules or invalid
arguments are specified on the command line
- '''
+ """
class PkgParseError(SaltException):
- '''
+ """
Used when of the pkg modules cannot correctly parse the output from
the CLI tool (pacman, yum, apt, aptitude, etc)
- '''
+ """
class SaltRenderError(SaltException):
- '''
+ """
Used when a renderer needs to raise an explicit error. If a line number and
buffer string are passed, get_context will be invoked to get the location
of the error.
- '''
- def __init__(self,
- message,
- line_num=None,
- buf='',
- marker=' <======================',
- trace=None):
+ """
+
+ def __init__(
+ self,
+ message,
+ line_num=None,
+ buf="",
+ marker=" <======================",
+ trace=None,
+ ):
# Avoid circular import
import salt.utils.stringutils
+
self.error = message
try:
exc_str = salt.utils.stringutils.to_unicode(message)
@@ -282,131 +301,136 @@ class SaltRenderError(SaltException):
try:
exc_str = six.text_type(message)
except UnicodeDecodeError:
- exc_str = salt.utils.stringutils.to_unicode(str(message)) # future lint: disable=blacklisted-function
+ exc_str = salt.utils.stringutils.to_unicode(
+ str(message)
+ ) # future lint: disable=blacklisted-function
self.line_num = line_num
self.buffer = buf
- self.context = ''
+ self.context = ""
if trace:
- exc_str += '\n{0}\n'.format(trace)
+ exc_str += "\n{0}\n".format(trace)
if self.line_num and self.buffer:
# Avoid circular import
import salt.utils.templates
+
self.context = salt.utils.stringutils.get_context(
- self.buffer,
- self.line_num,
- marker=marker
+ self.buffer, self.line_num, marker=marker
)
- exc_str += '; line {0}\n\n{1}'.format(
- self.line_num,
- salt.utils.stringutils.to_unicode(self.context),
+ exc_str += "; line {0}\n\n{1}".format(
+ self.line_num, salt.utils.stringutils.to_unicode(self.context),
)
super(SaltRenderError, self).__init__(exc_str)
class SaltClientTimeout(SaltException):
- '''
+ """
Thrown when a job sent through one of the Client interfaces times out
Takes the ``jid`` as a parameter
- '''
- def __init__(self, message, jid=None, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg
+ """
+
+ def __init__(
+ self, message, jid=None, *args, **kwargs
+ ): # pylint: disable=keyword-arg-before-vararg
super(SaltClientTimeout, self).__init__(message, *args, **kwargs)
self.jid = jid
class SaltCacheError(SaltException):
- '''
+ """
Thrown when a problem was encountered trying to read or write from the salt cache
- '''
+ """
class TimeoutError(SaltException):
- '''
+ """
Thrown when an opration cannot be completet within a given time limit.
- '''
+ """
class SaltReqTimeoutError(SaltException):
- '''
+ """
Thrown when a salt master request call fails to return within the timeout
- '''
+ """
class TimedProcTimeoutError(SaltException):
- '''
+ """
Thrown when a timed subprocess does not terminate within the timeout,
or if the specified timeout is not an int or a float
- '''
+ """
class EauthAuthenticationError(SaltException):
- '''
+ """
Thrown when eauth authentication fails
- '''
+ """
class TokenAuthenticationError(SaltException):
- '''
+ """
Thrown when token authentication fails
- '''
+ """
class SaltDeserializationError(SaltException):
- '''
+ """
Thrown when salt cannot deserialize data.
- '''
+ """
class AuthorizationError(SaltException):
- '''
+ """
Thrown when runner or wheel execution fails due to permissions
- '''
+ """
class SaltDaemonNotRunning(SaltException):
- '''
+ """
Throw when a running master/minion/syndic is not running but is needed to
perform the requested operation (e.g., eauth).
- '''
+ """
class SaltRunnerError(SaltException):
- '''
+ """
Problem in runner
- '''
+ """
class SaltWheelError(SaltException):
- '''
+ """
Problem in wheel
- '''
+ """
class SaltConfigurationError(SaltException):
- '''
+ """
Configuration error
- '''
+ """
class SaltSystemExit(SystemExit):
- '''
+ """
This exception is raised when an unsolvable problem is found. There's
nothing else to do, salt should just exit.
- '''
+ """
+
def __init__(self, code=0, msg=None):
SystemExit.__init__(self, msg)
class SaltCloudException(SaltException):
- '''
+ """
Generic Salt Cloud Exception
- '''
+ """
class SaltCloudSystemExit(SaltCloudException):
- '''
+ """
This exception is raised when the execution should be stopped.
- '''
+ """
+
def __init__(self, message, exit_code=salt.defaults.exitcodes.EX_GENERIC):
super(SaltCloudSystemExit, self).__init__(message)
self.message = message
@@ -414,167 +438,167 @@ class SaltCloudSystemExit(SaltCloudException):
class SaltCloudConfigError(SaltCloudException):
- '''
+ """
Raised when a configuration setting is not found and should exist.
- '''
+ """
class SaltCloudNotFound(SaltCloudException):
- '''
+ """
Raised when some cloud provider function cannot find what's being searched.
- '''
+ """
class SaltCloudExecutionTimeout(SaltCloudException):
- '''
+ """
Raised when too much time has passed while querying/waiting for data.
- '''
+ """
class SaltCloudExecutionFailure(SaltCloudException):
- '''
+ """
Raised when too much failures have occurred while querying/waiting for data.
- '''
+ """
class SaltCloudPasswordError(SaltCloudException):
- '''
+ """
Raise when virtual terminal password input failed
- '''
+ """
class NotImplemented(SaltException):
- '''
+ """
Used when a module runs a command which returns an error and wants
to show the user the output gracefully instead of dying
- '''
+ """
class TemplateError(SaltException):
- '''
+ """
Used when a custom error is triggered in a template
- '''
+ """
class ArgumentValueError(CommandExecutionError):
- '''
+ """
Used when an invalid argument was passed to a command execution
- '''
+ """
class CheckError(CommandExecutionError):
- '''
+ """
Used when a check fails
- '''
+ """
# Validation related exceptions
class InvalidConfigError(CommandExecutionError):
- '''
+ """
Used when the config is invalid
- '''
+ """
class InvalidEntityError(CommandExecutionError):
- '''
+ """
Used when an entity fails validation
- '''
+ """
# VMware related exceptions
class VMwareSaltError(CommandExecutionError):
- '''
+ """
Used when a VMware object cannot be retrieved
- '''
+ """
class VMwareRuntimeError(VMwareSaltError):
- '''
+ """
Used when a runtime error is encountered when communicating with the
vCenter
- '''
+ """
class VMwareConnectionError(VMwareSaltError):
- '''
+ """
Used when the client fails to connect to a either a VMware vCenter server or
to a ESXi host
- '''
+ """
class VMwareObjectRetrievalError(VMwareSaltError):
- '''
+ """
Used when a VMware object cannot be retrieved
- '''
+ """
class VMwareObjectNotFoundError(VMwareSaltError):
- '''
+ """
Used when a VMware object was not found
- '''
+ """
class VMwareObjectExistsError(VMwareSaltError):
- '''
+ """
Used when a VMware object already exists
- '''
+ """
class VMwareMultipleObjectsError(VMwareObjectRetrievalError):
- '''
+ """
Used when multiple objects were retrieved (and one was expected)
- '''
+ """
class VMwareNotFoundError(VMwareSaltError):
- '''
+ """
Used when a VMware object was not found
- '''
+ """
class VMwareApiError(VMwareSaltError):
- '''
+ """
Used when representing a generic VMware API error
- '''
+ """
class VMwareFileNotFoundError(VMwareApiError):
- '''
+ """
Used when representing a generic VMware error if a file not found
- '''
+ """
class VMwareSystemError(VMwareSaltError):
- '''
+ """
Used when representing a generic VMware system error
- '''
+ """
class VMwarePowerOnError(VMwareSaltError):
- '''
+ """
Used when error occurred during power on
- '''
+ """
class VMwareVmRegisterError(VMwareSaltError):
- '''
+ """
Used when a configuration parameter is incorrect
- '''
+ """
class VMwareVmCreationError(VMwareSaltError):
- '''
+ """
Used when a configuration parameter is incorrect
- '''
+ """
class MissingSmb(SaltException):
- '''
+ """
Raised when no smb library is found.
- '''
+ """
class LoggingRuntimeError(RuntimeError):
- '''
+ """
Raised when we encounter an error while logging
- '''
+ """
diff --git a/salt/executors/__init__.py b/salt/executors/__init__.py
index 8e0f6918c6a..ccb0e8706d1 100644
--- a/salt/executors/__init__.py
+++ b/salt/executors/__init__.py
@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
-'''
+"""
Executors Directory
-'''
+"""
diff --git a/salt/executors/direct_call.py b/salt/executors/direct_call.py
index 76aae3ffa90..d3d32d9621b 100644
--- a/salt/executors/direct_call.py
+++ b/salt/executors/direct_call.py
@@ -1,12 +1,12 @@
# -*- coding: utf-8 -*-
-'''
+"""
Direct call executor module
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
def execute(opts, data, func, args, kwargs):
- '''
+ """
Directly calls the given function with arguments
- '''
+ """
return func(*args, **kwargs)
diff --git a/salt/executors/docker.py b/salt/executors/docker.py
index 1360c8fb3e4..3e97945f190 100644
--- a/salt/executors/docker.py
+++ b/salt/executors/docker.py
@@ -1,46 +1,54 @@
# -*- coding: utf-8 -*-
-'''
+"""
Docker executor module
.. versionadded: 2019.2.0
Used with the docker proxy minion.
-'''
+"""
from __future__ import absolute_import, unicode_literals
-
-__virtualname__ = 'docker'
+__virtualname__ = "docker"
DOCKER_MOD_MAP = {
- 'state.sls': 'docker.sls',
- 'state.apply': 'docker.apply',
- 'state.highstate': 'docker.highstate',
+ "state.sls": "docker.sls",
+ "state.apply": "docker.apply",
+ "state.highstate": "docker.highstate",
}
def __virtual__():
- if 'proxy' not in __opts__:
- return False, 'Docker executor is only meant to be used with Docker Proxy Minions'
- if __opts__.get('proxy', {}).get('proxytype') != __virtualname__:
- return False, 'Proxytype does not match: {0}'.format(__virtualname__)
+ if "proxy" not in __opts__:
+ return (
+ False,
+ "Docker executor is only meant to be used with Docker Proxy Minions",
+ )
+ if __opts__.get("proxy", {}).get("proxytype") != __virtualname__:
+ return False, "Proxytype does not match: {0}".format(__virtualname__)
return True
def execute(opts, data, func, args, kwargs):
- '''
+ """
Directly calls the given function with arguments
- '''
- if data['fun'] == 'saltutil.find_job':
- return __executors__['direct_call.execute'](opts, data, func, args, kwargs)
- if data['fun'] in DOCKER_MOD_MAP:
- return __executors__['direct_call.execute'](opts, data, __salt__[DOCKER_MOD_MAP[data['fun']]], [opts['proxy']['name']] + args, kwargs)
- return __salt__['docker.call'](opts['proxy']['name'], data['fun'], *args, **kwargs)
+ """
+ if data["fun"] == "saltutil.find_job":
+ return __executors__["direct_call.execute"](opts, data, func, args, kwargs)
+ if data["fun"] in DOCKER_MOD_MAP:
+ return __executors__["direct_call.execute"](
+ opts,
+ data,
+ __salt__[DOCKER_MOD_MAP[data["fun"]]],
+ [opts["proxy"]["name"]] + args,
+ kwargs,
+ )
+ return __salt__["docker.call"](opts["proxy"]["name"], data["fun"], *args, **kwargs)
def allow_missing_func(function): # pylint: disable=unused-argument
- '''
+ """
Allow all calls to be passed through to docker container.
The docker call will use direct_call, which will return back if the module
was unable to be run.
- '''
+ """
return True
diff --git a/salt/executors/splay.py b/salt/executors/splay.py
index 8c815a1fdd7..d30826559f3 100644
--- a/salt/executors/splay.py
+++ b/salt/executors/splay.py
@@ -1,11 +1,12 @@
# -*- coding: utf-8 -*-
-'''
+"""
Splay function calls across targeted minions
-'''
+"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
-import time
+
import logging
+import time
import salt.utils.stringutils
@@ -22,15 +23,15 @@ def __init__(opts):
def _get_hash():
- '''
+ """
Jenkins One-At-A-Time Hash Function
More Info: http://en.wikipedia.org/wiki/Jenkins_hash_function#one-at-a-time
- '''
+ """
# Using bitmask to emulate rollover behavior of C unsigned 32 bit int
- bitmask = 0xffffffff
+ bitmask = 0xFFFFFFFF
h = 0
- for i in bytearray(salt.utils.stringutils.to_bytes(__grains__['id'])):
+ for i in bytearray(salt.utils.stringutils.to_bytes(__grains__["id"])):
h = (h + i) & bitmask
h = (h + (h << 10)) & bitmask
h = (h ^ (h >> 6)) & bitmask
@@ -47,7 +48,7 @@ def _calc_splay(splaytime):
def execute(opts, data, func, args, kwargs):
- '''
+ """
Splay a salt function call execution time across minions over
a number of seconds (default: 300)
@@ -69,14 +70,14 @@ def execute(opts, data, func, args, kwargs):
# With specified splaytime (5 minutes) and timeout with 10 second buffer
salt -t 310 --module-executors='[splay, direct_call]' --executor-opts='{splaytime: 300}' '*' pkg.version cowsay
- '''
- if 'executor_opts' in data and 'splaytime' in data['executor_opts']:
- splaytime = data['executor_opts']['splaytime']
+ """
+ if "executor_opts" in data and "splaytime" in data["executor_opts"]:
+ splaytime = data["executor_opts"]["splaytime"]
else:
- splaytime = opts.get('splaytime', _DEFAULT_SPLAYTIME)
+ splaytime = opts.get("splaytime", _DEFAULT_SPLAYTIME)
if splaytime <= 0:
- raise ValueError('splaytime must be a positive integer')
- fun_name = data.get('fun')
+ raise ValueError("splaytime must be a positive integer")
+ fun_name = data.get("fun")
my_delay = _calc_splay(splaytime)
log.debug("Splay is sleeping %s secs on %s", my_delay, fun_name)
diff --git a/salt/executors/sudo.py b/salt/executors/sudo.py
index 258aa8ead1c..fb5d3123569 100644
--- a/salt/executors/sudo.py
+++ b/salt/executors/sudo.py
@@ -1,29 +1,29 @@
# -*- coding: utf-8 -*-
-'''
+"""
Sudo executor module
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+import salt.syspaths
+
# Import salt libs
import salt.utils.json
import salt.utils.path
-import salt.syspaths
-
from salt.ext import six
from salt.ext.six.moves import shlex_quote as _cmd_quote
-__virtualname__ = 'sudo'
+__virtualname__ = "sudo"
def __virtual__():
- if salt.utils.path.which('sudo') and __opts__.get('sudo_user'):
+ if salt.utils.path.which("sudo") and __opts__.get("sudo_user"):
return __virtualname__
return False
def execute(opts, data, func, args, kwargs):
- '''
+ """
Allow for the calling of execution modules via sudo.
This module is invoked by the minion if the ``sudo_user`` minion config is
@@ -50,30 +50,35 @@ def execute(opts, data, func, args, kwargs):
sudo -u saltdev salt-call cmd.run 'cat /etc/sudoers'
being run on ``sudo_minion``.
- '''
- cmd = ['sudo',
- '-u', opts.get('sudo_user'),
- 'salt-call',
- '--out', 'json',
- '--metadata',
- '-c', opts.get('config_dir'),
- '--',
- data.get('fun')]
- if data['fun'] in ('state.sls', 'state.highstate', 'state.apply'):
- kwargs['concurrent'] = True
+ """
+ cmd = [
+ "sudo",
+ "-u",
+ opts.get("sudo_user"),
+ "salt-call",
+ "--out",
+ "json",
+ "--metadata",
+ "-c",
+ opts.get("config_dir"),
+ "--",
+ data.get("fun"),
+ ]
+ if data["fun"] in ("state.sls", "state.highstate", "state.apply"):
+ kwargs["concurrent"] = True
for arg in args:
cmd.append(_cmd_quote(six.text_type(arg)))
for key in kwargs:
- cmd.append(_cmd_quote('{0}={1}'.format(key, kwargs[key])))
+ cmd.append(_cmd_quote("{0}={1}".format(key, kwargs[key])))
- cmd_ret = __salt__['cmd.run_all'](cmd, use_vt=True, python_shell=False)
+ cmd_ret = __salt__["cmd.run_all"](cmd, use_vt=True, python_shell=False)
- if cmd_ret['retcode'] == 0:
- cmd_meta = salt.utils.json.loads(cmd_ret['stdout'])['local']
- ret = cmd_meta['return']
- __context__['retcode'] = cmd_meta.get('retcode', 0)
+ if cmd_ret["retcode"] == 0:
+ cmd_meta = salt.utils.json.loads(cmd_ret["stdout"])["local"]
+ ret = cmd_meta["return"]
+ __context__["retcode"] = cmd_meta.get("retcode", 0)
else:
- ret = cmd_ret['stderr']
- __context__['retcode'] = cmd_ret['retcode']
+ ret = cmd_ret["stderr"]
+ __context__["retcode"] = cmd_ret["retcode"]
return ret
diff --git a/salt/ext/tornado/gen.py b/salt/ext/tornado/gen.py
index 6cb19730bf1..72f422ce28f 100644
--- a/salt/ext/tornado/gen.py
+++ b/salt/ext/tornado/gen.py
@@ -115,13 +115,13 @@ try:
# py35+
from collections.abc import Generator as GeneratorType # type: ignore
except ImportError:
- from backports_abc import Generator as GeneratorType # type: ignore
+ from salt.ext.backports_abc import Generator as GeneratorType # type: ignore
try:
# py35+
from inspect import isawaitable # type: ignore
except ImportError:
- from backports_abc import isawaitable
+ from salt.ext.backports_abc import isawaitable
except ImportError:
if 'APPENGINE_RUNTIME' not in os.environ:
raise
diff --git a/salt/fileclient.py b/salt/fileclient.py
index f154e4fd433..d606bea99e6 100644
--- a/salt/fileclient.py
+++ b/salt/fileclient.py
@@ -1,29 +1,25 @@
# -*- coding: utf-8 -*-
-'''
+"""
Classes that manage file clients
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import contextlib
import errno
+import ftplib
import logging
import os
-import string
import shutil
-import ftplib
-from salt.ext.tornado.httputil import parse_response_start_line, HTTPHeaders, HTTPInputError
-import salt.utils.atomicfile
+import string
-# Import salt libs
-from salt.exceptions import (
- CommandExecutionError, MinionError
-)
import salt.client
+import salt.ext.six.moves.BaseHTTPServer as BaseHTTPServer
+import salt.fileserver
import salt.loader
import salt.payload
import salt.transport.client
-import salt.fileserver
+import salt.utils.atomicfile
import salt.utils.data
import salt.utils.files
import salt.utils.gzip_util
@@ -35,13 +31,21 @@ import salt.utils.stringutils
import salt.utils.templates
import salt.utils.url
import salt.utils.versions
-from salt.utils.openstack.swift import SaltSwift
+
+# Import salt libs
+from salt.exceptions import CommandExecutionError, MinionError
# pylint: disable=no-name-in-module,import-error
from salt.ext import six
-import salt.ext.six.moves.BaseHTTPServer as BaseHTTPServer
from salt.ext.six.moves.urllib.error import HTTPError, URLError
from salt.ext.six.moves.urllib.parse import urlparse, urlunparse
+from salt.ext.tornado.httputil import (
+ HTTPHeaders,
+ HTTPInputError,
+ parse_response_start_line,
+)
+from salt.utils.openstack.swift import SaltSwift
+
# pylint: enable=no-name-in-module,import-error
log = logging.getLogger(__name__)
@@ -49,26 +53,24 @@ MAX_FILENAME_LENGTH = 255
def get_file_client(opts, pillar=False):
- '''
+ """
Read in the ``file_client`` option and return the correct type of file
server
- '''
- client = opts.get('file_client', 'remote')
- if pillar and client == 'local':
- client = 'pillar'
- return {
- 'remote': RemoteClient,
- 'local': FSClient,
- 'pillar': PillarClient,
- }.get(client, RemoteClient)(opts)
+ """
+ client = opts.get("file_client", "remote")
+ if pillar and client == "local":
+ client = "pillar"
+ return {"remote": RemoteClient, "local": FSClient, "pillar": PillarClient}.get(
+ client, RemoteClient
+ )(opts)
def decode_dict_keys_to_str(src):
- '''
+ """
Convert top level keys from bytes to strings if possible.
This is necessary because Python 3 makes a distinction
between these types.
- '''
+ """
if not six.PY3 or not isinstance(src, dict):
return src
@@ -84,9 +86,10 @@ def decode_dict_keys_to_str(src):
class Client(object):
- '''
+ """
Base class for Salt file interactions
- '''
+ """
+
def __init__(self, opts):
self.opts = opts
self.utils = salt.loader.utils(self.opts)
@@ -100,24 +103,24 @@ class Client(object):
def __setstate__(self, state):
# This will polymorphically call __init__
# in the derived class.
- self.__init__(state['opts'])
+ self.__init__(state["opts"])
def __getstate__(self):
- return {'opts': self.opts}
+ return {"opts": self.opts}
def _check_proto(self, path):
- '''
+ """
Make sure that this path is intended for the salt master and trim it
- '''
- if not path.startswith('salt://'):
- raise MinionError('Unsupported path: {0}'.format(path))
+ """
+ if not path.startswith("salt://"):
+ raise MinionError("Unsupported path: {0}".format(path))
file_path, saltenv = salt.utils.url.parse(path)
return file_path
def _file_local_list(self, dest):
- '''
+ """
Helper util to return a list of files in a directory
- '''
+ """
if os.path.isdir(dest):
destdir = dest
else:
@@ -133,15 +136,12 @@ class Client(object):
return filelist
@contextlib.contextmanager
- def _cache_loc(self, path, saltenv='base', cachedir=None):
- '''
+ def _cache_loc(self, path, saltenv="base", cachedir=None):
+ """
Return the local location to cache the file, cache dirs will be made
- '''
+ """
cachedir = self.get_cachedir(cachedir)
- dest = salt.utils.path.join(cachedir,
- 'files',
- saltenv,
- path)
+ dest = salt.utils.path.join(cachedir, "files", saltenv, path)
destdir = os.path.dirname(dest)
with salt.utils.files.set_umask(0o077):
# remove destdir if it is a regular file to avoid an OSError when
@@ -160,88 +160,91 @@ class Client(object):
def get_cachedir(self, cachedir=None):
if cachedir is None:
- cachedir = self.opts['cachedir']
+ cachedir = self.opts["cachedir"]
elif not os.path.isabs(cachedir):
- cachedir = os.path.join(self.opts['cachedir'], cachedir)
+ cachedir = os.path.join(self.opts["cachedir"], cachedir)
return cachedir
- def get_file(self,
- path,
- dest='',
- makedirs=False,
- saltenv='base',
- gzip=None,
- cachedir=None):
- '''
+ def get_file(
+ self, path, dest="", makedirs=False, saltenv="base", gzip=None, cachedir=None
+ ):
+ """
Copies a file from the local files or master depending on
implementation
- '''
+ """
raise NotImplementedError
- def file_list_emptydirs(self, saltenv='base', prefix=''):
- '''
+ def file_list_emptydirs(self, saltenv="base", prefix=""):
+ """
List the empty dirs
- '''
+ """
raise NotImplementedError
- def cache_file(self, path, saltenv='base', cachedir=None, source_hash=None):
- '''
+ def cache_file(self, path, saltenv="base", cachedir=None, source_hash=None):
+ """
Pull a file down from the file server and store it in the minion
file cache
- '''
+ """
return self.get_url(
- path, '', True, saltenv, cachedir=cachedir, source_hash=source_hash)
+ path, "", True, saltenv, cachedir=cachedir, source_hash=source_hash
+ )
- def cache_files(self, paths, saltenv='base', cachedir=None):
- '''
+ def cache_files(self, paths, saltenv="base", cachedir=None):
+ """
Download a list of files stored on the master and put them in the
minion file cache
- '''
+ """
ret = []
if isinstance(paths, six.string_types):
- paths = paths.split(',')
+ paths = paths.split(",")
for path in paths:
ret.append(self.cache_file(path, saltenv, cachedir=cachedir))
return ret
- def cache_master(self, saltenv='base', cachedir=None):
- '''
+ def cache_master(self, saltenv="base", cachedir=None):
+ """
Download and cache all files on a master in a specified environment
- '''
+ """
ret = []
for path in self.file_list(saltenv):
ret.append(
- self.cache_file(
- salt.utils.url.create(path), saltenv, cachedir=cachedir)
+ self.cache_file(salt.utils.url.create(path), saltenv, cachedir=cachedir)
)
return ret
- def cache_dir(self, path, saltenv='base', include_empty=False,
- include_pat=None, exclude_pat=None, cachedir=None):
- '''
+ def cache_dir(
+ self,
+ path,
+ saltenv="base",
+ include_empty=False,
+ include_pat=None,
+ exclude_pat=None,
+ cachedir=None,
+ ):
+ """
Download all of the files in a subdir of the master
- '''
+ """
ret = []
path = self._check_proto(salt.utils.data.decode(path))
# We want to make sure files start with this *directory*, use
# '/' explicitly because the master (that's generating the
# list of files) only runs on POSIX
- if not path.endswith('/'):
- path = path + '/'
+ if not path.endswith("/"):
+ path = path + "/"
- log.info(
- 'Caching directory \'%s\' for environment \'%s\'', path, saltenv
- )
+ log.info("Caching directory '%s' for environment '%s'", path, saltenv)
# go through the list of all files finding ones that are in
# the target directory and caching them
for fn_ in self.file_list(saltenv):
fn_ = salt.utils.data.decode(fn_)
if fn_.strip() and fn_.startswith(path):
if salt.utils.stringutils.check_include_exclude(
- fn_, include_pat, exclude_pat):
+ fn_, include_pat, exclude_pat
+ ):
fn_ = self.cache_file(
- salt.utils.url.create(fn_), saltenv, cachedir=cachedir)
+ salt.utils.url.create(fn_), saltenv, cachedir=cachedir
+ )
if fn_:
ret.append(fn_)
@@ -257,22 +260,21 @@ class Client(object):
# prefix = separated[0]
cachedir = self.get_cachedir(cachedir)
- dest = salt.utils.path.join(cachedir, 'files', saltenv)
+ dest = salt.utils.path.join(cachedir, "files", saltenv)
for fn_ in self.file_list_emptydirs(saltenv):
fn_ = salt.utils.data.decode(fn_)
if fn_.startswith(path):
- minion_dir = '{0}/{1}'.format(dest, fn_)
+ minion_dir = "{0}/{1}".format(dest, fn_)
if not os.path.isdir(minion_dir):
os.makedirs(minion_dir)
ret.append(minion_dir)
return ret
def cache_local_file(self, path, **kwargs):
- '''
+ """
Cache a local file on the minion in the localfiles cache
- '''
- dest = os.path.join(self.opts['cachedir'], 'localfiles',
- path.lstrip('/'))
+ """
+ dest = os.path.join(self.opts["cachedir"], "localfiles", path.lstrip("/"))
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
@@ -281,41 +283,41 @@ class Client(object):
shutil.copyfile(path, dest)
return dest
- def file_local_list(self, saltenv='base'):
- '''
+ def file_local_list(self, saltenv="base"):
+ """
List files in the local minion files and localfiles caches
- '''
- filesdest = os.path.join(self.opts['cachedir'], 'files', saltenv)
- localfilesdest = os.path.join(self.opts['cachedir'], 'localfiles')
+ """
+ filesdest = os.path.join(self.opts["cachedir"], "files", saltenv)
+ localfilesdest = os.path.join(self.opts["cachedir"], "localfiles")
fdest = self._file_local_list(filesdest)
ldest = self._file_local_list(localfilesdest)
return sorted(fdest.union(ldest))
- def file_list(self, saltenv='base', prefix=''):
- '''
+ def file_list(self, saltenv="base", prefix=""):
+ """
This function must be overwritten
- '''
+ """
return []
- def dir_list(self, saltenv='base', prefix=''):
- '''
+ def dir_list(self, saltenv="base", prefix=""):
+ """
This function must be overwritten
- '''
+ """
return []
- def symlink_list(self, saltenv='base', prefix=''):
- '''
+ def symlink_list(self, saltenv="base", prefix=""):
+ """
This function must be overwritten
- '''
+ """
return {}
- def is_cached(self, path, saltenv='base', cachedir=None):
- '''
+ def is_cached(self, path, saltenv="base", cachedir=None):
+ """
Returns the full path to a file if it is cached locally on the minion
otherwise returns a blank string
- '''
- if path.startswith('salt://'):
+ """
+ if path.startswith("salt://"):
path, senv = salt.utils.url.parse(path)
if senv:
saltenv = senv
@@ -324,69 +326,70 @@ class Client(object):
# also strip escape character '|'
localsfilesdest = os.path.join(
- self.opts['cachedir'], 'localfiles', path.lstrip('|/'))
+ self.opts["cachedir"], "localfiles", path.lstrip("|/")
+ )
filesdest = os.path.join(
- self.opts['cachedir'], 'files', saltenv, path.lstrip('|/'))
+ self.opts["cachedir"], "files", saltenv, path.lstrip("|/")
+ )
extrndest = self._extrn_path(path, saltenv, cachedir=cachedir)
if os.path.exists(filesdest):
return salt.utils.url.escape(filesdest) if escaped else filesdest
elif os.path.exists(localsfilesdest):
- return salt.utils.url.escape(localsfilesdest) \
- if escaped \
- else localsfilesdest
+ return (
+ salt.utils.url.escape(localsfilesdest) if escaped else localsfilesdest
+ )
elif os.path.exists(extrndest):
return extrndest
- return ''
+ return ""
def list_states(self, saltenv):
- '''
+ """
Return a list of all available sls modules on the master for a given
environment
- '''
+ """
states = set()
for path in self.file_list(saltenv):
if salt.utils.platform.is_windows():
- path = path.replace('\\', '/')
- if path.endswith('.sls'):
+ path = path.replace("\\", "/")
+ if path.endswith(".sls"):
# is an sls module!
- if path.endswith('/init.sls'):
- states.add(path.replace('/', '.')[:-9])
+ if path.endswith("/init.sls"):
+ states.add(path.replace("/", ".")[:-9])
else:
- states.add(path.replace('/', '.')[:-4])
+ states.add(path.replace("/", ".")[:-4])
return sorted(states)
def get_state(self, sls, saltenv, cachedir=None):
- '''
+ """
Get a state file from the master and store it in the local minion
cache; return the location of the file
- '''
- if '.' in sls:
- sls = sls.replace('.', '/')
- sls_url = salt.utils.url.create(sls + '.sls')
- init_url = salt.utils.url.create(sls + '/init.sls')
+ """
+ if "." in sls:
+ sls = sls.replace(".", "/")
+ sls_url = salt.utils.url.create(sls + ".sls")
+ init_url = salt.utils.url.create(sls + "/init.sls")
for path in [sls_url, init_url]:
dest = self.cache_file(path, saltenv, cachedir=cachedir)
if dest:
- return {'source': path, 'dest': dest}
+ return {"source": path, "dest": dest}
return {}
- def get_dir(self, path, dest='', saltenv='base', gzip=None,
- cachedir=None):
- '''
+ def get_dir(self, path, dest="", saltenv="base", gzip=None, cachedir=None):
+ """
Get a directory recursively from the salt-master
- '''
+ """
ret = []
# Strip trailing slash
- path = self._check_proto(path).rstrip('/')
+ path = self._check_proto(path).rstrip("/")
# Break up the path into a list containing the bottom-level directory
# (the one being recursively copied) and the directories preceding it
- separated = path.rsplit('/', 1)
+ separated = path.rsplit("/", 1)
if len(separated) != 2:
# No slashes in path. (This means all files in saltenv will be
# copied)
- prefix = ''
+ prefix = ""
else:
prefix = separated[0]
@@ -395,19 +398,21 @@ class Client(object):
# Prevent files in "salt://foobar/" (or salt://foo.sh) from
# matching a path of "salt://foo"
try:
- if fn_[len(path)] != '/':
+ if fn_[len(path)] != "/":
continue
except IndexError:
continue
# Remove the leading directories from path to derive
# the relative path on the minion.
- minion_relpath = fn_[len(prefix):].lstrip('/')
+ minion_relpath = fn_[len(prefix) :].lstrip("/")
ret.append(
- self.get_file(
- salt.utils.url.create(fn_),
- '{0}/{1}'.format(dest, minion_relpath),
- True, saltenv, gzip
- )
+ self.get_file(
+ salt.utils.url.create(fn_),
+ "{0}/{1}".format(dest, minion_relpath),
+ True,
+ saltenv,
+ gzip,
+ )
)
# Replicate empty dirs from master
try:
@@ -415,14 +420,14 @@ class Client(object):
# Prevent an empty dir "salt://foobar/" from matching a path of
# "salt://foo"
try:
- if fn_[len(path)] != '/':
+ if fn_[len(path)] != "/":
continue
except IndexError:
continue
# Remove the leading directories from path to derive
# the relative path on the minion.
- minion_relpath = fn_[len(prefix):].lstrip('/')
- minion_mkdir = '{0}/{1}'.format(dest, minion_relpath)
+ minion_relpath = fn_[len(prefix) :].lstrip("/")
+ minion_mkdir = "{0}/{1}".format(dest, minion_relpath)
if not os.path.isdir(minion_mkdir):
os.makedirs(minion_mkdir)
ret.append(minion_mkdir)
@@ -431,23 +436,33 @@ class Client(object):
ret.sort()
return ret
- def get_url(self, url, dest, makedirs=False, saltenv='base',
- no_cache=False, cachedir=None, source_hash=None):
- '''
+ def get_url(
+ self,
+ url,
+ dest,
+ makedirs=False,
+ saltenv="base",
+ no_cache=False,
+ cachedir=None,
+ source_hash=None,
+ ):
+ """
Get a single file from a URL.
- '''
+ """
url_data = urlparse(url)
url_scheme = url_data.scheme
- url_path = os.path.join(
- url_data.netloc, url_data.path).rstrip(os.sep)
+ url_path = os.path.join(url_data.netloc, url_data.path).rstrip(os.sep)
# If dest is a directory, rewrite dest with filename
- if dest is not None \
- and (os.path.isdir(dest) or dest.endswith(('/', '\\'))):
- if url_data.query or len(url_data.path) > 1 and not url_data.path.endswith('/'):
- strpath = url.split('/')[-1]
+ if dest is not None and (os.path.isdir(dest) or dest.endswith(("/", "\\"))):
+ if (
+ url_data.query
+ or len(url_data.path) > 1
+ and not url_data.path.endswith("/")
+ ):
+ strpath = url.split("/")[-1]
else:
- strpath = 'index.html'
+ strpath = "index.html"
if salt.utils.platform.is_windows():
strpath = salt.utils.path.sanitize_win_path(strpath)
@@ -455,25 +470,25 @@ class Client(object):
dest = os.path.join(dest, strpath)
if url_scheme and url_scheme.lower() in string.ascii_lowercase:
- url_path = ':'.join((url_scheme, url_path))
- url_scheme = 'file'
+ url_path = ":".join((url_scheme, url_path))
+ url_scheme = "file"
- if url_scheme in ('file', ''):
+ if url_scheme in ("file", ""):
# Local filesystem
if not os.path.isabs(url_path):
raise CommandExecutionError(
- 'Path \'{0}\' is not absolute'.format(url_path)
+ "Path '{0}' is not absolute".format(url_path)
)
if dest is None:
- with salt.utils.files.fopen(url_path, 'rb') as fp_:
+ with salt.utils.files.fopen(url_path, "rb") as fp_:
data = fp_.read()
return data
return url_path
- if url_scheme == 'salt':
+ if url_scheme == "salt":
result = self.get_file(url, dest, makedirs, saltenv, cachedir=cachedir)
if result and dest is None:
- with salt.utils.files.fopen(result, 'rb') as fp_:
+ with salt.utils.files.fopen(result, "rb") as fp_:
data = fp_.read()
return data
return result
@@ -484,17 +499,20 @@ class Client(object):
if makedirs:
os.makedirs(destdir)
else:
- return ''
+ return ""
elif not no_cache:
dest = self._extrn_path(url, saltenv, cachedir=cachedir)
if source_hash is not None:
try:
- source_hash = source_hash.split('=')[-1]
+ source_hash = source_hash.split("=")[-1]
form = salt.utils.files.HASHES_REVMAP[len(source_hash)]
if salt.utils.hashutils.get_hash(dest, form) == source_hash:
log.debug(
- 'Cached copy of %s (%s) matches source_hash %s, '
- 'skipping download', url, dest, source_hash
+ "Cached copy of %s (%s) matches source_hash %s, "
+ "skipping download",
+ url,
+ dest,
+ source_hash,
)
return dest
except (AttributeError, KeyError, IOError, OSError):
@@ -503,37 +521,41 @@ class Client(object):
if not os.path.isdir(destdir):
os.makedirs(destdir)
- if url_data.scheme == 's3':
+ if url_data.scheme == "s3":
try:
+
def s3_opt(key, default=None):
- '''
+ """
Get value of s3. from Minion config or from Pillar
- '''
- if 's3.' + key in self.opts:
- return self.opts['s3.' + key]
+ """
+ if "s3." + key in self.opts:
+ return self.opts["s3." + key]
try:
- return self.opts['pillar']['s3'][key]
+ return self.opts["pillar"]["s3"][key]
except (KeyError, TypeError):
return default
- self.utils['s3.query'](method='GET',
- bucket=url_data.netloc,
- path=url_data.path[1:],
- return_bin=False,
- local_file=dest,
- action=None,
- key=s3_opt('key'),
- keyid=s3_opt('keyid'),
- service_url=s3_opt('service_url'),
- verify_ssl=s3_opt('verify_ssl', True),
- location=s3_opt('location'),
- path_style=s3_opt('path_style', False),
- https_enable=s3_opt('https_enable', True))
+
+ self.utils["s3.query"](
+ method="GET",
+ bucket=url_data.netloc,
+ path=url_data.path[1:],
+ return_bin=False,
+ local_file=dest,
+ action=None,
+ key=s3_opt("key"),
+ keyid=s3_opt("keyid"),
+ service_url=s3_opt("service_url"),
+ verify_ssl=s3_opt("verify_ssl", True),
+ location=s3_opt("location"),
+ path_style=s3_opt("path_style", False),
+ https_enable=s3_opt("https_enable", True),
+ )
return dest
except Exception as exc: # pylint: disable=broad-except
raise MinionError(
- 'Could not fetch from {0}. Exception: {1}'.format(url, exc)
+ "Could not fetch from {0}. Exception: {1}".format(url, exc)
)
- if url_data.scheme == 'ftp':
+ if url_data.scheme == "ftp":
try:
ftp = ftplib.FTP()
ftp_port = url_data.port
@@ -541,50 +563,61 @@ class Client(object):
ftp_port = 21
ftp.connect(url_data.hostname, ftp_port)
ftp.login(url_data.username, url_data.password)
- remote_file_path = url_data.path.lstrip('/')
- with salt.utils.files.fopen(dest, 'wb') as fp_:
- ftp.retrbinary('RETR {0}'.format(remote_file_path), fp_.write)
+ remote_file_path = url_data.path.lstrip("/")
+ with salt.utils.files.fopen(dest, "wb") as fp_:
+ ftp.retrbinary("RETR {0}".format(remote_file_path), fp_.write)
ftp.quit()
return dest
except Exception as exc: # pylint: disable=broad-except
- raise MinionError('Could not retrieve {0} from FTP server. Exception: {1}'.format(url, exc))
+ raise MinionError(
+ "Could not retrieve {0} from FTP server. Exception: {1}".format(
+ url, exc
+ )
+ )
- if url_data.scheme == 'swift':
+ if url_data.scheme == "swift":
try:
+
def swift_opt(key, default):
- '''
+ """
Get value of from Minion config or from Pillar
- '''
+ """
if key in self.opts:
return self.opts[key]
try:
- return self.opts['pillar'][key]
+ return self.opts["pillar"][key]
except (KeyError, TypeError):
return default
- swift_conn = SaltSwift(swift_opt('keystone.user', None),
- swift_opt('keystone.tenant', None),
- swift_opt('keystone.auth_url', None),
- swift_opt('keystone.password', None))
+ swift_conn = SaltSwift(
+ swift_opt("keystone.user", None),
+ swift_opt("keystone.tenant", None),
+ swift_opt("keystone.auth_url", None),
+ swift_opt("keystone.password", None),
+ )
- swift_conn.get_object(url_data.netloc,
- url_data.path[1:],
- dest)
+ swift_conn.get_object(url_data.netloc, url_data.path[1:], dest)
return dest
except Exception: # pylint: disable=broad-except
- raise MinionError('Could not fetch from {0}'.format(url))
+ raise MinionError("Could not fetch from {0}".format(url))
get_kwargs = {}
- if url_data.username is not None \
- and url_data.scheme in ('http', 'https'):
+ if url_data.username is not None and url_data.scheme in ("http", "https"):
netloc = url_data.netloc
- at_sign_pos = netloc.rfind('@')
+ at_sign_pos = netloc.rfind("@")
if at_sign_pos != -1:
- netloc = netloc[at_sign_pos + 1:]
+ netloc = netloc[at_sign_pos + 1 :]
fixed_url = urlunparse(
- (url_data.scheme, netloc, url_data.path,
- url_data.params, url_data.query, url_data.fragment))
- get_kwargs['auth'] = (url_data.username, url_data.password)
+ (
+ url_data.scheme,
+ netloc,
+ url_data.path,
+ url_data.params,
+ url_data.query,
+ url_data.fragment,
+ )
+ )
+ get_kwargs["auth"] = (url_data.username, url_data.password)
else:
fixed_url = url
@@ -620,7 +653,7 @@ class Client(object):
def on_header(hdr):
if write_body[1] is not False and write_body[2] is None:
- if not hdr.strip() and 'Content-Type' not in write_body[1]:
+ if not hdr.strip() and "Content-Type" not in write_body[1]:
# If write_body[0] is True, then we are not following a
# redirect (initial response was a 200 OK). So there is
# no need to reset write_body[0].
@@ -634,16 +667,18 @@ class Client(object):
# Try to find out what content type encoding is used if
# this is a text file
write_body[1].parse_line(hdr) # pylint: disable=no-member
- if 'Content-Type' in write_body[1]:
- content_type = write_body[1].get('Content-Type') # pylint: disable=no-member
- if not content_type.startswith('text'):
+ if "Content-Type" in write_body[1]:
+ content_type = write_body[1].get(
+ "Content-Type"
+ ) # pylint: disable=no-member
+ if not content_type.startswith("text"):
write_body[1] = write_body[2] = False
else:
- encoding = 'utf-8'
- fields = content_type.split(';')
+ encoding = "utf-8"
+ fields = content_type.split(";")
for field in fields:
- if 'encoding' in field:
- encoding = field.split('encoding=')[-1]
+ if "encoding" in field:
+ encoding = field.split("encoding=")[-1]
write_body[2] = encoding
# We have found our encoding. Stop processing headers.
write_body[1] = False
@@ -676,11 +711,14 @@ class Client(object):
if write_body[2]:
chunk = chunk.decode(write_body[2])
result.append(chunk)
+
else:
- dest_tmp = u"{0}.part".format(dest)
+ dest_tmp = "{0}.part".format(dest)
# We need an open filehandle to use in the on_chunk callback,
# that's why we're not using a with clause here.
- destfp = salt.utils.files.fopen(dest_tmp, 'wb') # pylint: disable=resource-leakage
+ # pylint: disable=resource-leakage
+ destfp = salt.utils.files.fopen(dest_tmp, "wb")
+ # pylint: enable=resource-leakage
def on_chunk(chunk):
if write_body[0]:
@@ -696,64 +734,64 @@ class Client(object):
opts=self.opts,
**get_kwargs
)
- if 'handle' not in query:
- raise MinionError('Error: {0} reading {1}'.format(query['error'], url))
+ if "handle" not in query:
+ raise MinionError("Error: {0} reading {1}".format(query["error"], url))
if no_cache:
if write_body[2]:
- return ''.join(result)
- return b''.join(result)
+ return "".join(result)
+ return b"".join(result)
else:
destfp.close()
destfp = None
salt.utils.files.rename(dest_tmp, dest)
return dest
except HTTPError as exc:
- raise MinionError('HTTP error {0} reading {1}: {3}'.format(
- exc.code,
- url,
- *BaseHTTPServer.BaseHTTPRequestHandler.responses[exc.code]))
+ raise MinionError(
+ "HTTP error {0} reading {1}: {3}".format(
+ exc.code,
+ url,
+ *BaseHTTPServer.BaseHTTPRequestHandler.responses[exc.code]
+ )
+ )
except URLError as exc:
- raise MinionError('Error reading {0}: {1}'.format(url, exc.reason))
+ raise MinionError("Error reading {0}: {1}".format(url, exc.reason))
finally:
if destfp is not None:
destfp.close()
def get_template(
- self,
- url,
- dest,
- template='jinja',
- makedirs=False,
- saltenv='base',
- cachedir=None,
- **kwargs):
- '''
+ self,
+ url,
+ dest,
+ template="jinja",
+ makedirs=False,
+ saltenv="base",
+ cachedir=None,
+ **kwargs
+ ):
+ """
Cache a file then process it as a template
- '''
- if 'env' in kwargs:
+ """
+ if "env" in kwargs:
# "env" is not supported; Use "saltenv".
- kwargs.pop('env')
+ kwargs.pop("env")
- kwargs['saltenv'] = saltenv
+ kwargs["saltenv"] = saltenv
url_data = urlparse(url)
sfn = self.cache_file(url, saltenv, cachedir=cachedir)
if not sfn or not os.path.exists(sfn):
- return ''
+ return ""
if template in salt.utils.templates.TEMPLATE_REGISTRY:
- data = salt.utils.templates.TEMPLATE_REGISTRY[template](
- sfn,
- **kwargs
- )
+ data = salt.utils.templates.TEMPLATE_REGISTRY[template](sfn, **kwargs)
else:
log.error(
- 'Attempted to render template with unavailable engine %s',
- template
+ "Attempted to render template with unavailable engine %s", template
)
- return ''
- if not data['result']:
+ return ""
+ if not data["result"]:
# Failed to render the template
- log.error('Failed to render template with error: %s', data['data'])
- return ''
+ log.error("Failed to render template with error: %s", data["data"])
+ return ""
if not dest:
# No destination passed, set the dest as an extrn_files cache
dest = self._extrn_path(url, saltenv, cachedir=cachedir)
@@ -765,15 +803,15 @@ class Client(object):
if makedirs:
os.makedirs(destdir)
else:
- salt.utils.files.safe_rm(data['data'])
- return ''
- shutil.move(data['data'], dest)
+ salt.utils.files.safe_rm(data["data"])
+ return ""
+ shutil.move(data["data"], dest)
return dest
def _extrn_path(self, url, saltenv, cachedir=None):
- '''
+ """
Return the extrn_filepath for a given url
- '''
+ """
url_data = urlparse(url)
if salt.utils.platform.is_windows():
netloc = salt.utils.path.sanitize_win_path(url_data.netloc)
@@ -781,144 +819,137 @@ class Client(object):
netloc = url_data.netloc
# Strip user:pass from URLs
- netloc = netloc.split('@')[-1]
+ netloc = netloc.split("@")[-1]
if cachedir is None:
- cachedir = self.opts['cachedir']
+ cachedir = self.opts["cachedir"]
elif not os.path.isabs(cachedir):
- cachedir = os.path.join(self.opts['cachedir'], cachedir)
+ cachedir = os.path.join(self.opts["cachedir"], cachedir)
if url_data.query:
- file_name = '-'.join([url_data.path, url_data.query])
+ file_name = "-".join([url_data.path, url_data.query])
else:
file_name = url_data.path
if len(file_name) > MAX_FILENAME_LENGTH:
file_name = salt.utils.hashutils.sha256_digest(file_name)
- return salt.utils.path.join(
- cachedir,
- 'extrn_files',
- saltenv,
- netloc,
- file_name
- )
+ return salt.utils.path.join(cachedir, "extrn_files", saltenv, netloc, file_name)
class PillarClient(Client):
- '''
+ """
Used by pillar to handle fileclient requests
- '''
- def _find_file(self, path, saltenv='base'):
- '''
+ """
+
+ def _find_file(self, path, saltenv="base"):
+ """
Locate the file path
- '''
- fnd = {'path': '',
- 'rel': ''}
+ """
+ fnd = {"path": "", "rel": ""}
if salt.utils.url.is_escaped(path):
# The path arguments are escaped
path = salt.utils.url.unescape(path)
- for root in self.opts['pillar_roots'].get(saltenv, []):
+ for root in self.opts["pillar_roots"].get(saltenv, []):
full = os.path.join(root, path)
if os.path.isfile(full):
- fnd['path'] = full
- fnd['rel'] = path
+ fnd["path"] = full
+ fnd["rel"] = path
return fnd
return fnd
- def get_file(self,
- path,
- dest='',
- makedirs=False,
- saltenv='base',
- gzip=None,
- cachedir=None):
- '''
+ def get_file(
+ self, path, dest="", makedirs=False, saltenv="base", gzip=None, cachedir=None
+ ):
+ """
Copies a file from the local files directory into :param:`dest`
gzip compression settings are ignored for local files
- '''
+ """
path = self._check_proto(path)
fnd = self._find_file(path, saltenv)
- fnd_path = fnd.get('path')
+ fnd_path = fnd.get("path")
if not fnd_path:
- return ''
+ return ""
return fnd_path
- def file_list(self, saltenv='base', prefix=''):
- '''
+ def file_list(self, saltenv="base", prefix=""):
+ """
Return a list of files in the given environment
with optional relative prefix path to limit directory traversal
- '''
+ """
ret = []
- prefix = prefix.strip('/')
- for path in self.opts['pillar_roots'].get(saltenv, []):
+ prefix = prefix.strip("/")
+ for path in self.opts["pillar_roots"].get(saltenv, []):
for root, dirs, files in salt.utils.path.os_walk(
os.path.join(path, prefix), followlinks=True
):
# Don't walk any directories that match file_ignore_regex or glob
- dirs[:] = [d for d in dirs if not salt.fileserver.is_file_ignored(self.opts, d)]
+ dirs[:] = [
+ d for d in dirs if not salt.fileserver.is_file_ignored(self.opts, d)
+ ]
for fname in files:
relpath = os.path.relpath(os.path.join(root, fname), path)
ret.append(salt.utils.data.decode(relpath))
return ret
- def file_list_emptydirs(self, saltenv='base', prefix=''):
- '''
+ def file_list_emptydirs(self, saltenv="base", prefix=""):
+ """
List the empty dirs in the pillar_roots
with optional relative prefix path to limit directory traversal
- '''
+ """
ret = []
- prefix = prefix.strip('/')
- for path in self.opts['pillar_roots'].get(saltenv, []):
+ prefix = prefix.strip("/")
+ for path in self.opts["pillar_roots"].get(saltenv, []):
for root, dirs, files in salt.utils.path.os_walk(
os.path.join(path, prefix), followlinks=True
):
# Don't walk any directories that match file_ignore_regex or glob
- dirs[:] = [d for d in dirs if not salt.fileserver.is_file_ignored(self.opts, d)]
+ dirs[:] = [
+ d for d in dirs if not salt.fileserver.is_file_ignored(self.opts, d)
+ ]
if not dirs and not files:
ret.append(salt.utils.data.decode(os.path.relpath(root, path)))
return ret
- def dir_list(self, saltenv='base', prefix=''):
- '''
+ def dir_list(self, saltenv="base", prefix=""):
+ """
List the dirs in the pillar_roots
with optional relative prefix path to limit directory traversal
- '''
+ """
ret = []
- prefix = prefix.strip('/')
- for path in self.opts['pillar_roots'].get(saltenv, []):
+ prefix = prefix.strip("/")
+ for path in self.opts["pillar_roots"].get(saltenv, []):
for root, dirs, files in salt.utils.path.os_walk(
os.path.join(path, prefix), followlinks=True
):
ret.append(salt.utils.data.decode(os.path.relpath(root, path)))
return ret
- def __get_file_path(self, path, saltenv='base'):
- '''
+ def __get_file_path(self, path, saltenv="base"):
+ """
Return either a file path or the result of a remote find_file call.
- '''
+ """
try:
path = self._check_proto(path)
except MinionError as err:
# Local file path
if not os.path.isfile(path):
log.warning(
- 'specified file %s is not present to generate hash: %s',
- path, err
+ "specified file %s is not present to generate hash: %s", path, err
)
return None
else:
return path
return self._find_file(path, saltenv)
- def hash_file(self, path, saltenv='base'):
- '''
+ def hash_file(self, path, saltenv="base"):
+ """
Return the hash of a file, to get the hash of a file in the pillar_roots
prepend the path with salt:// otherwise, prepend the
file with / for a local file.
- '''
+ """
ret = {}
fnd = self.__get_file_path(path, saltenv)
if fnd is None:
@@ -926,25 +957,25 @@ class PillarClient(Client):
try:
# Remote file path (self._find_file() invoked)
- fnd_path = fnd['path']
+ fnd_path = fnd["path"]
except TypeError:
# Local file path
fnd_path = fnd
- hash_type = self.opts.get('hash_type', 'md5')
- ret['hsum'] = salt.utils.hashutils.get_hash(fnd_path, form=hash_type)
- ret['hash_type'] = hash_type
+ hash_type = self.opts.get("hash_type", "md5")
+ ret["hsum"] = salt.utils.hashutils.get_hash(fnd_path, form=hash_type)
+ ret["hash_type"] = hash_type
return ret
- def hash_and_stat_file(self, path, saltenv='base'):
- '''
+ def hash_and_stat_file(self, path, saltenv="base"):
+ """
Return the hash of a file, to get the hash of a file in the pillar_roots
prepend the path with salt:// otherwise, prepend the
file with / for a local file.
Additionally, return the stat result of the file, or None if no stat
results were found.
- '''
+ """
ret = {}
fnd = self.__get_file_path(path, saltenv)
if fnd is None:
@@ -952,8 +983,8 @@ class PillarClient(Client):
try:
# Remote file path (self._find_file() invoked)
- fnd_path = fnd['path']
- fnd_stat = fnd.get('stat')
+ fnd_path = fnd["path"]
+ fnd_stat = fnd.get("stat")
except TypeError:
# Local file path
fnd_path = fnd
@@ -962,34 +993,34 @@ class PillarClient(Client):
except Exception: # pylint: disable=broad-except
fnd_stat = None
- hash_type = self.opts.get('hash_type', 'md5')
- ret['hsum'] = salt.utils.hashutils.get_hash(fnd_path, form=hash_type)
- ret['hash_type'] = hash_type
+ hash_type = self.opts.get("hash_type", "md5")
+ ret["hsum"] = salt.utils.hashutils.get_hash(fnd_path, form=hash_type)
+ ret["hash_type"] = hash_type
return ret, fnd_stat
- def list_env(self, saltenv='base'):
- '''
+ def list_env(self, saltenv="base"):
+ """
Return a list of the files in the file server's specified environment
- '''
+ """
return self.file_list(saltenv)
def master_opts(self):
- '''
+ """
Return the master opts data
- '''
+ """
return self.opts
def envs(self):
- '''
+ """
Return the available environments
- '''
+ """
ret = []
- for saltenv in self.opts['pillar_roots']:
+ for saltenv in self.opts["pillar_roots"]:
ret.append(saltenv)
return ret
def master_tops(self):
- '''
+ """
Originally returned information via the external_nodes subsystem.
External_nodes was deprecated and removed in
2014.1.6 in favor of master_tops (which had been around since pre-0.17).
@@ -1000,27 +1031,28 @@ class PillarClient(Client):
if 'external_nodes' not in opts: return {}
So since external_nodes is gone now, we are just returning the
empty dict.
- '''
+ """
return {}
class RemoteClient(Client):
- '''
+ """
Interact with the salt master file server.
- '''
+ """
+
def __init__(self, opts):
Client.__init__(self, opts)
self._closing = False
self.channel = salt.transport.client.ReqChannel.factory(self.opts)
- if hasattr(self.channel, 'auth'):
+ if hasattr(self.channel, "auth"):
self.auth = self.channel.auth
else:
- self.auth = ''
+ self.auth = ""
def _refresh_channel(self):
- '''
+ """
Reset the channel, in the event of an interruption
- '''
+ """
# Close the previous channel
self.channel.close()
# Instantiate a new one
@@ -1030,6 +1062,7 @@ class RemoteClient(Client):
# pylint: disable=W1701
def __del__(self):
self.destroy()
+
# pylint: enable=W1701
def destroy(self):
@@ -1045,19 +1078,15 @@ class RemoteClient(Client):
if channel is not None:
channel.close()
- def get_file(self,
- path,
- dest='',
- makedirs=False,
- saltenv='base',
- gzip=None,
- cachedir=None):
- '''
+ def get_file(
+ self, path, dest="", makedirs=False, saltenv="base", gzip=None, cachedir=None
+ ):
+ """
Get a single file from the salt-master
path must be a salt server location, aka, salt://path/to/file, if
dest is omitted, then the downloaded file will be placed in the minion
cache
- '''
+ """
path, senv = salt.utils.url.split_env(path)
if senv:
saltenv = senv
@@ -1074,20 +1103,18 @@ class RemoteClient(Client):
# Check if file exists on server, before creating files and
# directories
- if hash_server == '':
- log.debug(
- 'Could not find file \'%s\' in saltenv \'%s\'',
- path, saltenv
- )
+ if hash_server == "":
+ log.debug("Could not find file '%s' in saltenv '%s'", path, saltenv)
return False
# If dest is a directory, rewrite dest with filename
- if dest is not None \
- and (os.path.isdir(dest) or dest.endswith(('/', '\\'))):
+ if dest is not None and (os.path.isdir(dest) or dest.endswith(("/", "\\"))):
dest = os.path.join(dest, os.path.basename(path))
log.debug(
- 'In saltenv \'%s\', \'%s\' is a directory. Changing dest to '
- '\'%s\'', saltenv, os.path.dirname(dest), dest
+ "In saltenv '%s', '%s' is a directory. Changing dest to " "'%s'",
+ saltenv,
+ os.path.dirname(dest),
+ dest,
)
# Hash compare local copy with master and skip download
@@ -1097,22 +1124,24 @@ class RemoteClient(Client):
rel_path = self._check_proto(path)
log.debug(
- 'In saltenv \'%s\', looking at rel_path \'%s\' to resolve '
- '\'%s\'', saltenv, rel_path, path
+ "In saltenv '%s', looking at rel_path '%s' to resolve " "'%s'",
+ saltenv,
+ rel_path,
+ path,
)
- with self._cache_loc(
- rel_path, saltenv, cachedir=cachedir) as cache_dest:
+ with self._cache_loc(rel_path, saltenv, cachedir=cachedir) as cache_dest:
dest2check = cache_dest
log.debug(
- 'In saltenv \'%s\', ** considering ** path \'%s\' to resolve '
- '\'%s\'', saltenv, dest2check, path
+ "In saltenv '%s', ** considering ** path '%s' to resolve " "'%s'",
+ saltenv,
+ dest2check,
+ path,
)
if dest2check and os.path.isfile(dest2check):
if not salt.utils.platform.is_windows():
- hash_local, stat_local = \
- self.hash_and_stat_file(dest2check, saltenv)
+ hash_local, stat_local = self.hash_and_stat_file(dest2check, saltenv)
try:
mode_local = stat_local[0]
except (IndexError, TypeError):
@@ -1125,18 +1154,15 @@ class RemoteClient(Client):
return dest2check
log.debug(
- 'Fetching file from saltenv \'%s\', ** attempting ** \'%s\'',
- saltenv, path
+ "Fetching file from saltenv '%s', ** attempting ** '%s'", saltenv, path
)
d_tries = 0
transport_tries = 0
path = self._check_proto(path)
- load = {'path': path,
- 'saltenv': saltenv,
- 'cmd': '_serve_file'}
+ load = {"path": path, "saltenv": saltenv, "cmd": "_serve_file"}
if gzip:
gzip = int(gzip)
- load['gzip'] = gzip
+ load["gzip"] = gzip
fn_ = None
if dest:
@@ -1152,15 +1178,17 @@ class RemoteClient(Client):
return False
# We need an open filehandle here, that's why we're not using a
# with clause:
- fn_ = salt.utils.files.fopen(dest, 'wb+') # pylint: disable=resource-leakage
+ # pylint: disable=resource-leakage
+ fn_ = salt.utils.files.fopen(dest, "wb+")
+ # pylint: enable=resource-leakage
else:
- log.debug('No dest file found')
+ log.debug("No dest file found")
while True:
if not fn_:
- load['loc'] = 0
+ load["loc"] = 0
else:
- load['loc'] = fn_.tell()
+ load["loc"] = fn_.tell()
data = self.channel.send(load, raw=True)
if six.PY3:
# Sometimes the source is local (eg when using
@@ -1170,43 +1198,47 @@ class RemoteClient(Client):
# strings for the top-level keys to simplify things.
data = decode_dict_keys_to_str(data)
try:
- if not data['data']:
- if not fn_ and data['dest']:
+ if not data["data"]:
+ if not fn_ and data["dest"]:
# This is a 0 byte file on the master
with self._cache_loc(
- data['dest'],
- saltenv,
- cachedir=cachedir) as cache_dest:
+ data["dest"], saltenv, cachedir=cachedir
+ ) as cache_dest:
dest = cache_dest
- with salt.utils.files.fopen(cache_dest, 'wb+') as ofile:
- ofile.write(data['data'])
- if 'hsum' in data and d_tries < 3:
+ with salt.utils.files.fopen(cache_dest, "wb+") as ofile:
+ ofile.write(data["data"])
+ if "hsum" in data and d_tries < 3:
# Master has prompted a file verification, if the
# verification fails, re-download the file. Try 3 times
d_tries += 1
- hsum = salt.utils.hashutils.get_hash(dest, salt.utils.stringutils.to_str(data.get('hash_type', b'md5')))
- if hsum != data['hsum']:
+ hsum = salt.utils.hashutils.get_hash(
+ dest,
+ salt.utils.stringutils.to_str(
+ data.get("hash_type", b"md5")
+ ),
+ )
+ if hsum != data["hsum"]:
log.warning(
- 'Bad download of file %s, attempt %d of 3',
- path, d_tries
+ "Bad download of file %s, attempt %d of 3",
+ path,
+ d_tries,
)
continue
break
if not fn_:
with self._cache_loc(
- data['dest'],
- saltenv,
- cachedir=cachedir) as cache_dest:
+ data["dest"], saltenv, cachedir=cachedir
+ ) as cache_dest:
dest = cache_dest
# If a directory was formerly cached at this path, then
# remove it to avoid a traceback trying to write the file
if os.path.isdir(dest):
salt.utils.files.rm_rf(dest)
- fn_ = salt.utils.atomicfile.atomic_open(dest, 'wb+')
- if data.get('gzip', None):
- data = salt.utils.gzip_util.uncompress(data['data'])
+ fn_ = salt.utils.atomicfile.atomic_open(dest, "wb+")
+ if data.get("gzip", None):
+ data = salt.utils.gzip_util.uncompress(data["data"])
else:
- data = data['data']
+ data = data["data"]
if six.PY3 and isinstance(data, str):
data = data.encode()
fn_.write(data)
@@ -1218,110 +1250,112 @@ class RemoteClient(Client):
data_type = six.text_type(type(data))
transport_tries += 1
log.warning(
- 'Data transport is broken, got: %s, type: %s, '
- 'exception: %s, attempt %d of 3',
- data, data_type, exc, transport_tries
+ "Data transport is broken, got: %s, type: %s, "
+ "exception: %s, attempt %d of 3",
+ data,
+ data_type,
+ exc,
+ transport_tries,
)
self._refresh_channel()
if transport_tries > 3:
log.error(
- 'Data transport is broken, got: %s, type: %s, '
- 'exception: %s, retry attempts exhausted',
- data, data_type, exc
+ "Data transport is broken, got: %s, type: %s, "
+ "exception: %s, retry attempts exhausted",
+ data,
+ data_type,
+ exc,
)
break
if fn_:
fn_.close()
- log.info(
- 'Fetching file from saltenv \'%s\', ** done ** \'%s\'',
- saltenv, path
- )
+ log.info("Fetching file from saltenv '%s', ** done ** '%s'", saltenv, path)
else:
log.debug(
- 'In saltenv \'%s\', we are ** missing ** the file \'%s\'',
- saltenv, path
+ "In saltenv '%s', we are ** missing ** the file '%s'", saltenv, path
)
return dest
- def file_list(self, saltenv='base', prefix=''):
- '''
+ def file_list(self, saltenv="base", prefix=""):
+ """
List the files on the master
- '''
- load = {'saltenv': saltenv,
- 'prefix': prefix,
- 'cmd': '_file_list'}
- return salt.utils.data.decode(self.channel.send(load)) if six.PY2 \
+ """
+ load = {"saltenv": saltenv, "prefix": prefix, "cmd": "_file_list"}
+ return (
+ salt.utils.data.decode(self.channel.send(load))
+ if six.PY2
else self.channel.send(load)
+ )
- def file_list_emptydirs(self, saltenv='base', prefix=''):
- '''
+ def file_list_emptydirs(self, saltenv="base", prefix=""):
+ """
List the empty dirs on the master
- '''
- load = {'saltenv': saltenv,
- 'prefix': prefix,
- 'cmd': '_file_list_emptydirs'}
- return salt.utils.data.decode(self.channel.send(load)) if six.PY2 \
+ """
+ load = {"saltenv": saltenv, "prefix": prefix, "cmd": "_file_list_emptydirs"}
+ return (
+ salt.utils.data.decode(self.channel.send(load))
+ if six.PY2
else self.channel.send(load)
+ )
- def dir_list(self, saltenv='base', prefix=''):
- '''
+ def dir_list(self, saltenv="base", prefix=""):
+ """
List the dirs on the master
- '''
- load = {'saltenv': saltenv,
- 'prefix': prefix,
- 'cmd': '_dir_list'}
- return salt.utils.data.decode(self.channel.send(load)) if six.PY2 \
+ """
+ load = {"saltenv": saltenv, "prefix": prefix, "cmd": "_dir_list"}
+ return (
+ salt.utils.data.decode(self.channel.send(load))
+ if six.PY2
else self.channel.send(load)
+ )
- def symlink_list(self, saltenv='base', prefix=''):
- '''
+ def symlink_list(self, saltenv="base", prefix=""):
+ """
List symlinked files and dirs on the master
- '''
- load = {'saltenv': saltenv,
- 'prefix': prefix,
- 'cmd': '_symlink_list'}
- return salt.utils.data.decode(self.channel.send(load)) if six.PY2 \
+ """
+ load = {"saltenv": saltenv, "prefix": prefix, "cmd": "_symlink_list"}
+ return (
+ salt.utils.data.decode(self.channel.send(load))
+ if six.PY2
else self.channel.send(load)
+ )
- def __hash_and_stat_file(self, path, saltenv='base'):
- '''
+ def __hash_and_stat_file(self, path, saltenv="base"):
+ """
Common code for hashing and stating files
- '''
+ """
try:
path = self._check_proto(path)
except MinionError as err:
if not os.path.isfile(path):
log.warning(
- 'specified file %s is not present to generate hash: %s',
- path, err
+ "specified file %s is not present to generate hash: %s", path, err
)
return {}, None
else:
ret = {}
- hash_type = self.opts.get('hash_type', 'md5')
- ret['hsum'] = salt.utils.hashutils.get_hash(path, form=hash_type)
- ret['hash_type'] = hash_type
+ hash_type = self.opts.get("hash_type", "md5")
+ ret["hsum"] = salt.utils.hashutils.get_hash(path, form=hash_type)
+ ret["hash_type"] = hash_type
return ret
- load = {'path': path,
- 'saltenv': saltenv,
- 'cmd': '_file_hash'}
+ load = {"path": path, "saltenv": saltenv, "cmd": "_file_hash"}
return self.channel.send(load)
- def hash_file(self, path, saltenv='base'):
- '''
+ def hash_file(self, path, saltenv="base"):
+ """
Return the hash of a file, to get the hash of a file on the salt
master file server prepend the path with salt://
otherwise, prepend the file with / for a local file.
- '''
+ """
return self.__hash_and_stat_file(path, saltenv)
- def hash_and_stat_file(self, path, saltenv='base'):
- '''
+ def hash_and_stat_file(self, path, saltenv="base"):
+ """
The same as hash_file, but also return the file's mode, or None if no
mode data is present.
- '''
+ """
hash_result = self.hash_file(path, saltenv)
try:
path = self._check_proto(path)
@@ -1333,67 +1367,75 @@ class RemoteClient(Client):
return hash_result, list(os.stat(path))
except Exception: # pylint: disable=broad-except
return hash_result, None
- load = {'path': path,
- 'saltenv': saltenv,
- 'cmd': '_file_find'}
+ load = {"path": path, "saltenv": saltenv, "cmd": "_file_find"}
fnd = self.channel.send(load)
try:
- stat_result = fnd.get('stat')
+ stat_result = fnd.get("stat")
except AttributeError:
stat_result = None
return hash_result, stat_result
- def list_env(self, saltenv='base'):
- '''
+ def list_env(self, saltenv="base"):
+ """
Return a list of the files in the file server's specified environment
- '''
- load = {'saltenv': saltenv,
- 'cmd': '_file_list'}
- return salt.utils.data.decode(self.channel.send(load)) if six.PY2 \
+ """
+ load = {"saltenv": saltenv, "cmd": "_file_list"}
+ return (
+ salt.utils.data.decode(self.channel.send(load))
+ if six.PY2
else self.channel.send(load)
+ )
def envs(self):
- '''
+ """
Return a list of available environments
- '''
- load = {'cmd': '_file_envs'}
- return salt.utils.data.decode(self.channel.send(load)) if six.PY2 \
+ """
+ load = {"cmd": "_file_envs"}
+ return (
+ salt.utils.data.decode(self.channel.send(load))
+ if six.PY2
else self.channel.send(load)
+ )
def master_opts(self):
- '''
+ """
Return the master opts data
- '''
- load = {'cmd': '_master_opts'}
- return salt.utils.data.decode(self.channel.send(load)) if six.PY2 \
+ """
+ load = {"cmd": "_master_opts"}
+ return (
+ salt.utils.data.decode(self.channel.send(load))
+ if six.PY2
else self.channel.send(load)
+ )
def master_tops(self):
- '''
+ """
Return the metadata derived from the master_tops system
- '''
+ """
log.debug(
- 'The _ext_nodes master function has been renamed to _master_tops. '
- 'To ensure compatibility when using older Salt masters we will '
- 'continue to invoke the function as _ext_nodes until the '
- 'Magnesium release.'
+ "The _ext_nodes master function has been renamed to _master_tops. "
+ "To ensure compatibility when using older Salt masters we will "
+ "continue to invoke the function as _ext_nodes until the "
+ "Magnesium release."
)
# TODO: Change back to _master_tops
# for Magnesium release
- load = {'cmd': '_ext_nodes',
- 'id': self.opts['id'],
- 'opts': self.opts}
+ load = {"cmd": "_ext_nodes", "id": self.opts["id"], "opts": self.opts}
if self.auth:
- load['tok'] = self.auth.gen_token(b'salt')
- return salt.utils.data.decode(self.channel.send(load)) if six.PY2 \
+ load["tok"] = self.auth.gen_token(b"salt")
+ return (
+ salt.utils.data.decode(self.channel.send(load))
+ if six.PY2
else self.channel.send(load)
+ )
class FSClient(RemoteClient):
- '''
+ """
A local client that uses the RemoteClient but substitutes the channel for
the FSChan object
- '''
+ """
+
def __init__(self, opts): # pylint: disable=W0231
Client.__init__(self, opts) # pylint: disable=W0233
self._closing = False
@@ -1407,9 +1449,10 @@ LocalClient = FSClient
class DumbAuth(object):
- '''
+ """
The dumbauth class is used to stub out auth calls fired from the FSClient
subsystem
- '''
+ """
+
def gen_token(self, clear_tok):
return clear_tok
diff --git a/salt/fileserver/__init__.py b/salt/fileserver/__init__.py
index 919987e2fcc..35b6c6e17b9 100644
--- a/salt/fileserver/__init__.py
+++ b/salt/fileserver/__init__.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
-'''
+"""
File server pluggable modules and generic backend functions
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
@@ -21,25 +21,28 @@ import salt.utils.files
import salt.utils.path
import salt.utils.url
import salt.utils.versions
+
+# Import 3rd-party libs
+from salt.ext import six
from salt.utils.args import get_function_argspec as _argspec
from salt.utils.decorators import ensure_unicode_args
try:
from collections.abc import Sequence
except ImportError:
+ # pylint: disable=no-name-in-module
from collections import Sequence
-# Import 3rd-party libs
-from salt.ext import six
+ # pylint: enable=no-name-in-module
log = logging.getLogger(__name__)
def _unlock_cache(w_lock):
- '''
+ """
Unlock a FS file/dir based lock
- '''
+ """
if not os.path.exists(w_lock):
return
try:
@@ -48,7 +51,7 @@ def _unlock_cache(w_lock):
elif os.path.isfile(w_lock):
os.unlink(w_lock)
except (OSError, IOError) as exc:
- log.trace('Error removing lockfile %s: %s', w_lock, exc)
+ log.trace("Error removing lockfile %s: %s", w_lock, exc)
def _lock_cache(w_lock):
@@ -59,16 +62,16 @@ def _lock_cache(w_lock):
raise
return False
else:
- log.trace('Lockfile %s created', w_lock)
+ log.trace("Lockfile %s created", w_lock)
return True
def wait_lock(lk_fn, dest, wait_timeout=0):
- '''
+ """
If the write lock is there, check to see if the file is actually being
written. If there is no change in the file size after a short sleep,
remove the lock and move forward.
- '''
+ """
if not os.path.exists(lk_fn):
return False
if not os.path.exists(dest):
@@ -102,7 +105,7 @@ def wait_lock(lk_fn, dest, wait_timeout=0):
if timeout:
if time.time() > timeout:
raise ValueError(
- 'Timeout({0}s) for {1} (lock: {2}) elapsed'.format(
+ "Timeout({0}s) for {1} (lock: {2}) elapsed".format(
wait_timeout, dest, lk_fn
)
)
@@ -110,11 +113,11 @@ def wait_lock(lk_fn, dest, wait_timeout=0):
def check_file_list_cache(opts, form, list_cache, w_lock):
- '''
+ """
Checks the cache file to see if there is a new enough file list cache, and
returns the match (if found, along with booleans used by the fileserver
backend to determine if the cache needs to be refreshed/written).
- '''
+ """
refresh_cache = False
save_cache = True
serial = salt.payload.Serial(opts)
@@ -138,27 +141,35 @@ def check_file_list_cache(opts, form, list_cache, w_lock):
file_mtime = int(cache_stat.st_mtime)
if file_mtime > current_time:
log.debug(
- 'Cache file modified time is in the future, ignoring. '
- 'file=%s mtime=%s current_time=%s',
- list_cache, current_time, file_mtime
+ "Cache file modified time is in the future, ignoring. "
+ "file=%s mtime=%s current_time=%s",
+ list_cache,
+ current_time,
+ file_mtime,
)
age = 0
else:
age = current_time - file_mtime
else:
# if filelist does not exists yet, mark it as expired
- age = opts.get('fileserver_list_cache_time', 20) + 1
+ age = opts.get("fileserver_list_cache_time", 20) + 1
if age < 0:
# Cache is from the future! Warn and mark cache invalid.
- log.warning('The file list_cache was created in the future!')
- if 0 <= age < opts.get('fileserver_list_cache_time', 20):
+ log.warning("The file list_cache was created in the future!")
+ if 0 <= age < opts.get("fileserver_list_cache_time", 20):
# Young enough! Load this sucker up!
- with salt.utils.files.fopen(list_cache, 'rb') as fp_:
+ with salt.utils.files.fopen(list_cache, "rb") as fp_:
log.debug(
"Returning file list from cache: age=%s cache_time=%s %s",
- age, opts.get('fileserver_list_cache_time', 20), list_cache
+ age,
+ opts.get("fileserver_list_cache_time", 20),
+ list_cache,
+ )
+ return (
+ salt.utils.data.decode(serial.load(fp_).get(form, [])),
+ False,
+ False,
)
- return salt.utils.data.decode(serial.load(fp_).get(form, [])), False, False
elif _lock_cache(w_lock):
# Set the w_lock and go
refresh_cache = True
@@ -174,27 +185,27 @@ def check_file_list_cache(opts, form, list_cache, w_lock):
def write_file_list_cache(opts, data, list_cache, w_lock):
- '''
+ """
Checks the cache file to see if there is a new enough file list cache, and
returns the match (if found, along with booleans used by the fileserver
backend to determine if the cache needs to be refreshed/written).
- '''
+ """
serial = salt.payload.Serial(opts)
- with salt.utils.files.fopen(list_cache, 'w+b') as fp_:
+ with salt.utils.files.fopen(list_cache, "w+b") as fp_:
fp_.write(serial.dumps(data))
_unlock_cache(w_lock)
- log.trace('Lockfile %s removed', w_lock)
+ log.trace("Lockfile %s removed", w_lock)
def check_env_cache(opts, env_cache):
- '''
+ """
Returns cached env names, if present. Otherwise returns None.
- '''
+ """
if not os.path.isfile(env_cache):
return None
try:
- with salt.utils.files.fopen(env_cache, 'rb') as fp_:
- log.trace('Returning env cache data from %s', env_cache)
+ with salt.utils.files.fopen(env_cache, "rb") as fp_:
+ log.trace("Returning env cache data from %s", env_cache)
serial = salt.payload.Serial(opts)
return salt.utils.data.decode(serial.load(fp_))
except (IOError, OSError):
@@ -203,9 +214,9 @@ def check_env_cache(opts, env_cache):
def generate_mtime_map(opts, path_map):
- '''
+ """
Generate a dict of filename -> mtime
- '''
+ """
file_map = {}
for saltenv, path_list in six.iteritems(path_map):
for path in path_list:
@@ -221,17 +232,16 @@ def generate_mtime_map(opts, path_map):
except (OSError, IOError):
# skip dangling symlinks
log.info(
- 'Failed to get mtime on %s, dangling symlink?',
- file_path
+ "Failed to get mtime on %s, dangling symlink?", file_path
)
continue
return file_map
def diff_mtime_map(map1, map2):
- '''
+ """
Is there a change to the mtime map? return a boolean
- '''
+ """
# check if the mtimes are the same
if sorted(map1) != sorted(map2):
return True
@@ -247,12 +257,12 @@ def diff_mtime_map(map1, map2):
def reap_fileserver_cache_dir(cache_base, find_func):
- '''
+ """
Remove unused cache items assuming the cache directory follows a directory
convention:
cache_base -> saltenv -> relpath
- '''
+ """
for saltenv in os.listdir(cache_base):
env_base = os.path.join(cache_base, saltenv)
for root, dirs, files in salt.utils.path.os_walk(env_base):
@@ -269,49 +279,44 @@ def reap_fileserver_cache_dir(cache_base, find_func):
file_path = os.path.join(root, file_)
file_rel_path = os.path.relpath(file_path, env_base)
try:
- filename, _, hash_type = file_rel_path.rsplit('.', 2)
+ filename, _, hash_type = file_rel_path.rsplit(".", 2)
except ValueError:
log.warning(
- 'Found invalid hash file [%s] when attempting to reap '
- 'cache directory', file_
+ "Found invalid hash file [%s] when attempting to reap "
+ "cache directory",
+ file_,
)
continue
# do we have the file?
ret = find_func(filename, saltenv=saltenv)
# if we don't actually have the file, lets clean up the cache
# object
- if ret['path'] == '':
+ if ret["path"] == "":
os.unlink(file_path)
def is_file_ignored(opts, fname):
- '''
+ """
If file_ignore_regex or file_ignore_glob were given in config,
compare the given file path against all of them and return True
on the first match.
- '''
- if opts['file_ignore_regex']:
- for regex in opts['file_ignore_regex']:
+ """
+ if opts["file_ignore_regex"]:
+ for regex in opts["file_ignore_regex"]:
if re.search(regex, fname):
- log.debug(
- 'File matching file_ignore_regex. Skipping: %s',
- fname
- )
+ log.debug("File matching file_ignore_regex. Skipping: %s", fname)
return True
- if opts['file_ignore_glob']:
- for glob in opts['file_ignore_glob']:
+ if opts["file_ignore_glob"]:
+ for glob in opts["file_ignore_glob"]:
if fnmatch.fnmatch(fname, glob):
- log.debug(
- 'File matching file_ignore_glob. Skipping: %s',
- fname
- )
+ log.debug("File matching file_ignore_glob. Skipping: %s", fname)
return True
return False
-def clear_lock(clear_func, role, remote=None, lock_type='update'):
- '''
+def clear_lock(clear_func, role, remote=None, lock_type="update"):
+ """
Function to allow non-fileserver functions to clear update locks
clear_func
@@ -332,36 +337,37 @@ def clear_lock(clear_func, role, remote=None, lock_type='update'):
Which type of lock to clear
Returns the return data from ``clear_func``.
- '''
- msg = 'Clearing {0} lock for {1} remotes'.format(lock_type, role)
+ """
+ msg = "Clearing {0} lock for {1} remotes".format(lock_type, role)
if remote:
- msg += ' matching {0}'.format(remote)
+ msg += " matching {0}".format(remote)
log.debug(msg)
return clear_func(remote=remote, lock_type=lock_type)
class Fileserver(object):
- '''
+ """
Create a fileserver wrapper object that wraps the fileserver functions and
iterates over them to execute the desired function within the scope of the
desired fileserver backend.
- '''
+ """
+
def __init__(self, opts):
self.opts = opts
- self.servers = salt.loader.fileserver(opts, opts['fileserver_backend'])
+ self.servers = salt.loader.fileserver(opts, opts["fileserver_backend"])
def backends(self, back=None):
- '''
+ """
Return the backend list
- '''
+ """
if not back:
- back = self.opts['fileserver_backend']
+ back = self.opts["fileserver_backend"]
else:
if not isinstance(back, list):
try:
- back = back.split(',')
+ back = back.split(",")
except AttributeError:
- back = six.text_type(back).split(',')
+ back = six.text_type(back).split(",")
if isinstance(back, Sequence):
# The test suite uses an ImmutableList type (based on
@@ -379,86 +385,84 @@ class Fileserver(object):
# .keys() attribute rather than on the LazyDict itself.
server_funcs = self.servers.keys()
try:
- subtract_only = all((x.startswith('-') for x in back))
+ subtract_only = all((x.startswith("-") for x in back))
except AttributeError:
pass
else:
if subtract_only:
# Only subtracting backends from enabled ones
- ret = self.opts['fileserver_backend']
+ ret = self.opts["fileserver_backend"]
for sub in back:
- if '{0}.envs'.format(sub[1:]) in server_funcs:
+ if "{0}.envs".format(sub[1:]) in server_funcs:
ret.remove(sub[1:])
- elif '{0}.envs'.format(sub[1:-2]) in server_funcs:
+ elif "{0}.envs".format(sub[1:-2]) in server_funcs:
ret.remove(sub[1:-2])
return ret
for sub in back:
- if '{0}.envs'.format(sub) in server_funcs:
+ if "{0}.envs".format(sub) in server_funcs:
ret.append(sub)
- elif '{0}.envs'.format(sub[:-2]) in server_funcs:
+ elif "{0}.envs".format(sub[:-2]) in server_funcs:
ret.append(sub[:-2])
return ret
def master_opts(self, load):
- '''
+ """
Simplify master opts
- '''
+ """
return self.opts
def update_opts(self):
# This fix func monkey patching by pillar
for name, func in self.servers.items():
try:
- if '__opts__' in func.__globals__:
- func.__globals__['__opts__'].update(self.opts)
+ if "__opts__" in func.__globals__:
+ func.__globals__["__opts__"].update(self.opts)
except AttributeError:
pass
def clear_cache(self, back=None):
- '''
+ """
Clear the cache of all of the fileserver backends that support the
clear_cache function or the named backend(s) only.
- '''
+ """
back = self.backends(back)
cleared = []
errors = []
for fsb in back:
- fstr = '{0}.clear_cache'.format(fsb)
+ fstr = "{0}.clear_cache".format(fsb)
if fstr in self.servers:
- log.debug('Clearing %s fileserver cache', fsb)
+ log.debug("Clearing %s fileserver cache", fsb)
failed = self.servers[fstr]()
if failed:
errors.extend(failed)
else:
cleared.append(
- 'The {0} fileserver cache was successfully cleared'
- .format(fsb)
+ "The {0} fileserver cache was successfully cleared".format(fsb)
)
return cleared, errors
def lock(self, back=None, remote=None):
- '''
+ """
``remote`` can either be a dictionary containing repo configuration
information, or a pattern. If the latter, then remotes for which the URL
matches the pattern will be locked.
- '''
+ """
back = self.backends(back)
locked = []
errors = []
for fsb in back:
- fstr = '{0}.lock'.format(fsb)
+ fstr = "{0}.lock".format(fsb)
if fstr in self.servers:
- msg = 'Setting update lock for {0} remotes'.format(fsb)
+ msg = "Setting update lock for {0} remotes".format(fsb)
if remote:
if not isinstance(remote, six.string_types):
errors.append(
- 'Badly formatted remote pattern \'{0}\''
- .format(remote)
+ "Badly formatted remote pattern '{0}'".format(remote)
)
continue
else:
- msg += ' matching {0}'.format(remote)
+ msg += " matching {0}".format(remote)
log.debug(msg)
good, bad = self.servers[fstr](remote=remote)
locked.extend(good)
@@ -466,7 +470,7 @@ class Fileserver(object):
return locked, errors
def clear_lock(self, back=None, remote=None):
- '''
+ """
Clear the update lock for the enabled fileserver backends
back
@@ -476,59 +480,59 @@ class Fileserver(object):
remote
If specified, then any remotes which contain the passed string will
have their lock cleared.
- '''
+ """
back = self.backends(back)
cleared = []
errors = []
for fsb in back:
- fstr = '{0}.clear_lock'.format(fsb)
+ fstr = "{0}.clear_lock".format(fsb)
if fstr in self.servers:
- good, bad = clear_lock(self.servers[fstr],
- fsb,
- remote=remote)
+ good, bad = clear_lock(self.servers[fstr], fsb, remote=remote)
cleared.extend(good)
errors.extend(bad)
return cleared, errors
def update(self, back=None):
- '''
+ """
Update all of the enabled fileserver backends which support the update
function, or
- '''
+ """
back = self.backends(back)
for fsb in back:
- fstr = '{0}.update'.format(fsb)
+ fstr = "{0}.update".format(fsb)
if fstr in self.servers:
- log.debug('Updating %s fileserver cache', fsb)
+ log.debug("Updating %s fileserver cache", fsb)
self.servers[fstr]()
def update_intervals(self, back=None):
- '''
+ """
Return the update intervals for all of the enabled fileserver backends
which support variable update intervals.
- '''
+ """
back = self.backends(back)
ret = {}
for fsb in back:
- fstr = '{0}.update_intervals'.format(fsb)
+ fstr = "{0}.update_intervals".format(fsb)
if fstr in self.servers:
ret[fsb] = self.servers[fstr]()
return ret
def envs(self, back=None, sources=False):
- '''
+ """
Return the environments for the named backend or all backends
- '''
+ """
back = self.backends(back)
ret = set()
if sources:
ret = {}
for fsb in back:
- fstr = '{0}.envs'.format(fsb)
- kwargs = {'ignore_cache': True} \
- if 'ignore_cache' in _argspec(self.servers[fstr]).args \
- and self.opts['__role'] == 'minion' \
+ fstr = "{0}.envs".format(fsb)
+ kwargs = (
+ {"ignore_cache": True}
+ if "ignore_cache" in _argspec(self.servers[fstr]).args
+ and self.opts["__role"] == "minion"
else {}
+ )
if sources:
ret[fsb] = self.servers[fstr](**kwargs)
else:
@@ -538,370 +542,369 @@ class Fileserver(object):
return list(ret)
def file_envs(self, load=None):
- '''
+ """
Return environments for all backends for requests from fileclient
- '''
+ """
if load is None:
load = {}
- load.pop('cmd', None)
+ load.pop("cmd", None)
return self.envs(**load)
def init(self, back=None):
- '''
+ """
Initialize the backend, only do so if the fs supports an init function
- '''
+ """
back = self.backends(back)
for fsb in back:
- fstr = '{0}.init'.format(fsb)
+ fstr = "{0}.init".format(fsb)
if fstr in self.servers:
self.servers[fstr]()
def _find_file(self, load):
- '''
+ """
Convenience function for calls made using the RemoteClient
- '''
- path = load.get('path')
+ """
+ path = load.get("path")
if not path:
- return {'path': '',
- 'rel': ''}
- tgt_env = load.get('saltenv', 'base')
+ return {"path": "", "rel": ""}
+ tgt_env = load.get("saltenv", "base")
return self.find_file(path, tgt_env)
def file_find(self, load):
- '''
+ """
Convenience function for calls made using the LocalClient
- '''
- path = load.get('path')
+ """
+ path = load.get("path")
if not path:
- return {'path': '',
- 'rel': ''}
- tgt_env = load.get('saltenv', 'base')
+ return {"path": "", "rel": ""}
+ tgt_env = load.get("saltenv", "base")
return self.find_file(path, tgt_env)
def find_file(self, path, saltenv, back=None):
- '''
+ """
Find the path and return the fnd structure, this structure is passed
to other backend interfaces.
- '''
+ """
path = salt.utils.stringutils.to_unicode(path)
saltenv = salt.utils.stringutils.to_unicode(saltenv)
back = self.backends(back)
kwargs = {}
- fnd = {'path': '',
- 'rel': ''}
+ fnd = {"path": "", "rel": ""}
if os.path.isabs(path):
return fnd
- if '../' in path:
+ if "../" in path:
return fnd
if salt.utils.url.is_escaped(path):
# don't attempt to find URL query arguments in the path
path = salt.utils.url.unescape(path)
else:
- if '?' in path:
- hcomps = path.split('?')
+ if "?" in path:
+ hcomps = path.split("?")
path = hcomps[0]
- comps = hcomps[1].split('&')
+ comps = hcomps[1].split("&")
for comp in comps:
- if '=' not in comp:
+ if "=" not in comp:
# Invalid option, skip it
continue
- args = comp.split('=', 1)
+ args = comp.split("=", 1)
kwargs[args[0]] = args[1]
- if 'env' in kwargs:
+ if "env" in kwargs:
# "env" is not supported; Use "saltenv".
- kwargs.pop('env')
- if 'saltenv' in kwargs:
- saltenv = kwargs.pop('saltenv')
+ kwargs.pop("env")
+ if "saltenv" in kwargs:
+ saltenv = kwargs.pop("saltenv")
if not isinstance(saltenv, six.string_types):
saltenv = six.text_type(saltenv)
for fsb in back:
- fstr = '{0}.find_file'.format(fsb)
+ fstr = "{0}.find_file".format(fsb)
if fstr in self.servers:
fnd = self.servers[fstr](path, saltenv, **kwargs)
- if fnd.get('path'):
- fnd['back'] = fsb
+ if fnd.get("path"):
+ fnd["back"] = fsb
return fnd
return fnd
def serve_file(self, load):
- '''
+ """
Serve up a chunk of a file
- '''
- ret = {'data': '',
- 'dest': ''}
+ """
+ ret = {"data": "", "dest": ""}
- if 'env' in load:
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- if 'path' not in load or 'loc' not in load or 'saltenv' not in load:
+ if "path" not in load or "loc" not in load or "saltenv" not in load:
return ret
- if not isinstance(load['saltenv'], six.string_types):
- load['saltenv'] = six.text_type(load['saltenv'])
+ if not isinstance(load["saltenv"], six.string_types):
+ load["saltenv"] = six.text_type(load["saltenv"])
- fnd = self.find_file(load['path'], load['saltenv'])
- if not fnd.get('back'):
+ fnd = self.find_file(load["path"], load["saltenv"])
+ if not fnd.get("back"):
return ret
- fstr = '{0}.serve_file'.format(fnd['back'])
+ fstr = "{0}.serve_file".format(fnd["back"])
if fstr in self.servers:
return self.servers[fstr](load, fnd)
return ret
def __file_hash_and_stat(self, load):
- '''
+ """
Common code for hashing and stating files
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- if 'path' not in load or 'saltenv' not in load:
- return '', None
- if not isinstance(load['saltenv'], six.string_types):
- load['saltenv'] = six.text_type(load['saltenv'])
+ if "path" not in load or "saltenv" not in load:
+ return "", None
+ if not isinstance(load["saltenv"], six.string_types):
+ load["saltenv"] = six.text_type(load["saltenv"])
- fnd = self.find_file(salt.utils.stringutils.to_unicode(load['path']),
- load['saltenv'])
- if not fnd.get('back'):
- return '', None
- stat_result = fnd.get('stat', None)
- fstr = '{0}.file_hash'.format(fnd['back'])
+ fnd = self.find_file(
+ salt.utils.stringutils.to_unicode(load["path"]), load["saltenv"]
+ )
+ if not fnd.get("back"):
+ return "", None
+ stat_result = fnd.get("stat", None)
+ fstr = "{0}.file_hash".format(fnd["back"])
if fstr in self.servers:
return self.servers[fstr](load, fnd), stat_result
- return '', None
+ return "", None
def file_hash(self, load):
- '''
+ """
Return the hash of a given file
- '''
+ """
try:
return self.__file_hash_and_stat(load)[0]
except (IndexError, TypeError):
- return ''
+ return ""
def file_hash_and_stat(self, load):
- '''
+ """
Return the hash and stat result of a given file
- '''
+ """
try:
return self.__file_hash_and_stat(load)
except (IndexError, TypeError):
- return '', None
+ return "", None
def clear_file_list_cache(self, load):
- '''
+ """
Deletes the file_lists cache files
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- saltenv = load.get('saltenv', [])
+ saltenv = load.get("saltenv", [])
if saltenv is not None:
if not isinstance(saltenv, list):
try:
- saltenv = [x.strip() for x in saltenv.split(',')]
+ saltenv = [x.strip() for x in saltenv.split(",")]
except AttributeError:
- saltenv = [x.strip() for x in six.text_type(saltenv).split(',')]
+ saltenv = [x.strip() for x in six.text_type(saltenv).split(",")]
for idx, val in enumerate(saltenv):
if not isinstance(val, six.string_types):
saltenv[idx] = six.text_type(val)
ret = {}
- fsb = self.backends(load.pop('fsbackend', None))
- list_cachedir = os.path.join(self.opts['cachedir'], 'file_lists')
+ fsb = self.backends(load.pop("fsbackend", None))
+ list_cachedir = os.path.join(self.opts["cachedir"], "file_lists")
try:
file_list_backends = os.listdir(list_cachedir)
except OSError as exc:
if exc.errno == errno.ENOENT:
- log.debug('No file list caches found')
+ log.debug("No file list caches found")
return {}
else:
log.error(
- 'Failed to get list of saltenvs for which the master has '
- 'cached file lists: %s', exc
+ "Failed to get list of saltenvs for which the master has "
+ "cached file lists: %s",
+ exc,
)
for back in file_list_backends:
# Account for the fact that the file_list cache directory for gitfs
# is 'git', hgfs is 'hg', etc.
- back_virtualname = re.sub('fs$', '', back)
+ back_virtualname = re.sub("fs$", "", back)
try:
cache_files = os.listdir(os.path.join(list_cachedir, back))
except OSError as exc:
log.error(
- 'Failed to find file list caches for saltenv \'%s\': %s',
- back, exc
+ "Failed to find file list caches for saltenv '%s': %s", back, exc
)
continue
for cache_file in cache_files:
try:
- cache_saltenv, extension = cache_file.rsplit('.', 1)
+ cache_saltenv, extension = cache_file.rsplit(".", 1)
except ValueError:
# Filename has no dot in it. Not a cache file, ignore.
continue
- if extension != 'p':
+ if extension != "p":
# Filename does not end in ".p". Not a cache file, ignore.
continue
- elif back_virtualname not in fsb or \
- (saltenv is not None and cache_saltenv not in saltenv):
+ elif back_virtualname not in fsb or (
+ saltenv is not None and cache_saltenv not in saltenv
+ ):
log.debug(
- 'Skipping %s file list cache for saltenv \'%s\'',
- back, cache_saltenv
+ "Skipping %s file list cache for saltenv '%s'",
+ back,
+ cache_saltenv,
)
continue
try:
os.remove(os.path.join(list_cachedir, back, cache_file))
except OSError as exc:
if exc.errno != errno.ENOENT:
- log.error('Failed to remove %s: %s',
- exc.filename, exc.strerror)
+ log.error("Failed to remove %s: %s", exc.filename, exc.strerror)
else:
ret.setdefault(back, []).append(cache_saltenv)
log.debug(
- 'Removed %s file list cache for saltenv \'%s\'',
- cache_saltenv, back
+ "Removed %s file list cache for saltenv '%s'",
+ cache_saltenv,
+ back,
)
return ret
@ensure_unicode_args
def file_list(self, load):
- '''
+ """
Return a list of files from the dominant environment
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
ret = set()
- if 'saltenv' not in load:
+ if "saltenv" not in load:
return []
- if not isinstance(load['saltenv'], six.string_types):
- load['saltenv'] = six.text_type(load['saltenv'])
+ if not isinstance(load["saltenv"], six.string_types):
+ load["saltenv"] = six.text_type(load["saltenv"])
- for fsb in self.backends(load.pop('fsbackend', None)):
- fstr = '{0}.file_list'.format(fsb)
+ for fsb in self.backends(load.pop("fsbackend", None)):
+ fstr = "{0}.file_list".format(fsb)
if fstr in self.servers:
ret.update(self.servers[fstr](load))
# some *fs do not handle prefix. Ensure it is filtered
- prefix = load.get('prefix', '').strip('/')
- if prefix != '':
+ prefix = load.get("prefix", "").strip("/")
+ if prefix != "":
ret = [f for f in ret if f.startswith(prefix)]
return sorted(ret)
@ensure_unicode_args
def file_list_emptydirs(self, load):
- '''
+ """
List all emptydirs in the given environment
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
ret = set()
- if 'saltenv' not in load:
+ if "saltenv" not in load:
return []
- if not isinstance(load['saltenv'], six.string_types):
- load['saltenv'] = six.text_type(load['saltenv'])
+ if not isinstance(load["saltenv"], six.string_types):
+ load["saltenv"] = six.text_type(load["saltenv"])
for fsb in self.backends(None):
- fstr = '{0}.file_list_emptydirs'.format(fsb)
+ fstr = "{0}.file_list_emptydirs".format(fsb)
if fstr in self.servers:
ret.update(self.servers[fstr](load))
# some *fs do not handle prefix. Ensure it is filtered
- prefix = load.get('prefix', '').strip('/')
- if prefix != '':
+ prefix = load.get("prefix", "").strip("/")
+ if prefix != "":
ret = [f for f in ret if f.startswith(prefix)]
return sorted(ret)
@ensure_unicode_args
def dir_list(self, load):
- '''
+ """
List all directories in the given environment
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
ret = set()
- if 'saltenv' not in load:
+ if "saltenv" not in load:
return []
- if not isinstance(load['saltenv'], six.string_types):
- load['saltenv'] = six.text_type(load['saltenv'])
+ if not isinstance(load["saltenv"], six.string_types):
+ load["saltenv"] = six.text_type(load["saltenv"])
- for fsb in self.backends(load.pop('fsbackend', None)):
- fstr = '{0}.dir_list'.format(fsb)
+ for fsb in self.backends(load.pop("fsbackend", None)):
+ fstr = "{0}.dir_list".format(fsb)
if fstr in self.servers:
ret.update(self.servers[fstr](load))
# some *fs do not handle prefix. Ensure it is filtered
- prefix = load.get('prefix', '').strip('/')
- if prefix != '':
+ prefix = load.get("prefix", "").strip("/")
+ if prefix != "":
ret = [f for f in ret if f.startswith(prefix)]
return sorted(ret)
@ensure_unicode_args
def symlink_list(self, load):
- '''
+ """
Return a list of symlinked files and dirs
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
ret = {}
- if 'saltenv' not in load:
+ if "saltenv" not in load:
return {}
- if not isinstance(load['saltenv'], six.string_types):
- load['saltenv'] = six.text_type(load['saltenv'])
+ if not isinstance(load["saltenv"], six.string_types):
+ load["saltenv"] = six.text_type(load["saltenv"])
- for fsb in self.backends(load.pop('fsbackend', None)):
- symlstr = '{0}.symlink_list'.format(fsb)
+ for fsb in self.backends(load.pop("fsbackend", None)):
+ symlstr = "{0}.symlink_list".format(fsb)
if symlstr in self.servers:
ret = self.servers[symlstr](load)
# some *fs do not handle prefix. Ensure it is filtered
- prefix = load.get('prefix', '').strip('/')
- if prefix != '':
- ret = dict([
- (x, y) for x, y in six.iteritems(ret) if x.startswith(prefix)
- ])
+ prefix = load.get("prefix", "").strip("/")
+ if prefix != "":
+ ret = dict([(x, y) for x, y in six.iteritems(ret) if x.startswith(prefix)])
return ret
class FSChan(object):
- '''
+ """
A class that mimics the transport channels allowing for local access to
to the fileserver class class structure
- '''
+ """
+
def __init__(self, opts, **kwargs):
self.opts = opts
self.kwargs = kwargs
self.fs = Fileserver(self.opts)
self.fs.init()
- if self.opts.get('file_client', 'remote') == 'local':
- if '__fs_update' not in self.opts:
+ if self.opts.get("file_client", "remote") == "local":
+ if "__fs_update" not in self.opts:
self.fs.update()
- self.opts['__fs_update'] = True
+ self.opts["__fs_update"] = True
else:
self.fs.update()
- self.cmd_stub = {'master_tops': {},
- 'ext_nodes': {}}
+ self.cmd_stub = {"master_tops": {}, "ext_nodes": {}}
- def send(self, load, tries=None, timeout=None, raw=False): # pylint: disable=unused-argument
- '''
+ def send(
+ self, load, tries=None, timeout=None, raw=False
+ ): # pylint: disable=unused-argument
+ """
Emulate the channel send method, the tries and timeout are not used
- '''
- if 'cmd' not in load:
- log.error('Malformed request, no cmd: %s', load)
+ """
+ if "cmd" not in load:
+ log.error("Malformed request, no cmd: %s", load)
return {}
- cmd = load['cmd'].lstrip('_')
+ cmd = load["cmd"].lstrip("_")
if cmd in self.cmd_stub:
return self.cmd_stub[cmd]
if not hasattr(self.fs, cmd):
- log.error('Malformed request, invalid cmd: %s', load)
+ log.error("Malformed request, invalid cmd: %s", load)
return {}
return getattr(self.fs, cmd)(load)
diff --git a/salt/fileserver/azurefs.py b/salt/fileserver/azurefs.py
index 2b5fd62a8a4..dfa87a31ef1 100644
--- a/salt/fileserver/azurefs.py
+++ b/salt/fileserver/azurefs.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
The backend for serving files from the Azure blob storage service.
.. versionadded:: 2015.8.0
@@ -44,10 +44,11 @@ permissions.
.. note::
Do not include the leading ? for sas_token if generated from the web
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import base64
import logging
import os
@@ -61,36 +62,37 @@ import salt.utils.hashutils
import salt.utils.json
import salt.utils.path
import salt.utils.stringutils
+
+# Import third party libs
+from salt.ext import six
from salt.utils.versions import LooseVersion
try:
import azure.storage
- if LooseVersion(azure.storage.__version__) < LooseVersion('0.20.0'):
- raise ImportError('azure.storage.__version__ must be >= 0.20.0')
+
+ if LooseVersion(azure.storage.__version__) < LooseVersion("0.20.0"):
+ raise ImportError("azure.storage.__version__ must be >= 0.20.0")
HAS_AZURE = True
except (ImportError, AttributeError):
HAS_AZURE = False
-# Import third party libs
-from salt.ext import six
-
-__virtualname__ = 'azurefs'
+__virtualname__ = "azurefs"
log = logging.getLogger()
def __virtual__():
- '''
+ """
Only load if defined in fileserver_backend and azure.storage is present
- '''
- if __virtualname__ not in __opts__['fileserver_backend']:
+ """
+ if __virtualname__ not in __opts__["fileserver_backend"]:
return False
if not HAS_AZURE:
return False
- if 'azurefs' not in __opts__:
+ if "azurefs" not in __opts__:
return False
if not _validate_config():
@@ -99,20 +101,18 @@ def __virtual__():
return True
-def find_file(path, saltenv='base', **kwargs):
- '''
+def find_file(path, saltenv="base", **kwargs):
+ """
Search the environment for the relative path
- '''
- fnd = {'path': '',
- 'rel': ''}
- for container in __opts__.get('azurefs', []):
- if container.get('saltenv', 'base') != saltenv:
+ """
+ fnd = {"path": "", "rel": ""}
+ for container in __opts__.get("azurefs", []):
+ if container.get("saltenv", "base") != saltenv:
continue
full = os.path.join(_get_container_path(container), path)
- if os.path.isfile(full) and not salt.fileserver.is_file_ignored(
- __opts__, path):
- fnd['path'] = full
- fnd['rel'] = path
+ if os.path.isfile(full) and not salt.fileserver.is_file_ignored(__opts__, path):
+ fnd["path"] = full
+ fnd["rel"] = path
try:
# Converting the stat result to a list, the elements of the
# list correspond to the following stat_result params:
@@ -126,7 +126,7 @@ def find_file(path, saltenv='base', **kwargs):
# 7 => st_atime=1468284229
# 8 => st_mtime=1456338235
# 9 => st_ctime=1456338235
- fnd['stat'] = list(os.stat(full))
+ fnd["stat"] = list(os.stat(full))
except Exception: # pylint: disable=broad-except
pass
return fnd
@@ -134,49 +134,48 @@ def find_file(path, saltenv='base', **kwargs):
def envs():
- '''
+ """
Each container configuration can have an environment setting, or defaults
to base
- '''
+ """
saltenvs = []
- for container in __opts__.get('azurefs', []):
- saltenvs.append(container.get('saltenv', 'base'))
+ for container in __opts__.get("azurefs", []):
+ saltenvs.append(container.get("saltenv", "base"))
# Remove duplicates
return list(set(saltenvs))
def serve_file(load, fnd):
- '''
+ """
Return a chunk from a file based on the data received
- '''
- ret = {'data': '',
- 'dest': ''}
- required_load_keys = ('path', 'loc', 'saltenv')
+ """
+ ret = {"data": "", "dest": ""}
+ required_load_keys = ("path", "loc", "saltenv")
if not all(x in load for x in required_load_keys):
log.debug(
- 'Not all of the required keys present in payload. Missing: %s',
- ', '.join(required_load_keys.difference(load))
+ "Not all of the required keys present in payload. Missing: %s",
+ ", ".join(required_load_keys.difference(load)),
)
return ret
- if not fnd['path']:
+ if not fnd["path"]:
return ret
- ret['dest'] = fnd['rel']
- gzip = load.get('gzip', None)
- fpath = os.path.normpath(fnd['path'])
- with salt.utils.files.fopen(fpath, 'rb') as fp_:
- fp_.seek(load['loc'])
- data = fp_.read(__opts__['file_buffer_size'])
+ ret["dest"] = fnd["rel"]
+ gzip = load.get("gzip", None)
+ fpath = os.path.normpath(fnd["path"])
+ with salt.utils.files.fopen(fpath, "rb") as fp_:
+ fp_.seek(load["loc"])
+ data = fp_.read(__opts__["file_buffer_size"])
if data and six.PY3 and not salt.utils.files.is_binary(fpath):
data = data.decode(__salt_system_encoding__)
if gzip and data:
data = salt.utils.gzip_util.compress(data, gzip)
- ret['gzip'] = gzip
- ret['data'] = data
+ ret["gzip"] = gzip
+ ret["data"] = data
return ret
def update():
- '''
+ """
Update caches of the storage containers.
Compares the md5 of the files on disk to the md5 of the blobs in the
@@ -184,8 +183,8 @@ def update():
Also processes deletions by walking the container caches and comparing
with the list of blobs in the container
- '''
- for container in __opts__['azurefs']:
+ """
+ for container in __opts__["azurefs"]:
path = _get_container_path(container)
try:
if not os.path.exists(path):
@@ -194,14 +193,14 @@ def update():
shutil.rmtree(path)
os.makedirs(path)
except Exception as exc: # pylint: disable=broad-except
- log.exception('Error occurred creating cache directory for azurefs')
+ log.exception("Error occurred creating cache directory for azurefs")
continue
blob_service = _get_container_service(container)
- name = container['container_name']
+ name = container["container_name"]
try:
blob_list = blob_service.list_blobs(name)
except Exception as exc: # pylint: disable=broad-except
- log.exception('Error occurred fetching blob list for azurefs')
+ log.exception("Error occurred fetching blob list for azurefs")
continue
# Walk the cache directory searching for deletions
@@ -212,7 +211,7 @@ def update():
fname = os.path.join(root, f)
relpath = os.path.relpath(fname, path)
if relpath not in blob_set:
- salt.fileserver.wait_lock(fname + '.lk', fname)
+ salt.fileserver.wait_lock(fname + ".lk", fname)
try:
os.unlink(fname)
except Exception: # pylint: disable=broad-except
@@ -226,7 +225,9 @@ def update():
if os.path.exists(fname):
# File exists, check the hashes
source_md5 = blob.properties.content_settings.content_md5
- local_md5 = base64.b64encode(salt.utils.hashutils.get_hash(fname, 'md5').decode('hex'))
+ local_md5 = base64.b64encode(
+ salt.utils.hashutils.get_hash(fname, "md5").decode("hex")
+ )
if local_md5 != source_md5:
update = True
else:
@@ -236,15 +237,15 @@ def update():
if not os.path.exists(os.path.dirname(fname)):
os.makedirs(os.path.dirname(fname))
# Lock writes
- lk_fn = fname + '.lk'
+ lk_fn = fname + ".lk"
salt.fileserver.wait_lock(lk_fn, fname)
- with salt.utils.files.fopen(lk_fn, 'w'):
+ with salt.utils.files.fopen(lk_fn, "w"):
pass
try:
blob_service.get_blob_to_path(name, blob.name, fname)
except Exception as exc: # pylint: disable=broad-except
- log.exception('Error occurred fetching blob from azurefs')
+ log.exception("Error occurred fetching blob from azurefs")
continue
# Unlock writes
@@ -254,79 +255,82 @@ def update():
pass
# Write out file list
- container_list = path + '.list'
- lk_fn = container_list + '.lk'
+ container_list = path + ".list"
+ lk_fn = container_list + ".lk"
salt.fileserver.wait_lock(lk_fn, container_list)
- with salt.utils.files.fopen(lk_fn, 'w'):
+ with salt.utils.files.fopen(lk_fn, "w"):
pass
- with salt.utils.files.fopen(container_list, 'w') as fp_:
+ with salt.utils.files.fopen(container_list, "w") as fp_:
salt.utils.json.dump(blob_names, fp_)
try:
os.unlink(lk_fn)
except Exception: # pylint: disable=broad-except
pass
try:
- hash_cachedir = os.path.join(__opts__['cachedir'], 'azurefs', 'hashes')
+ hash_cachedir = os.path.join(__opts__["cachedir"], "azurefs", "hashes")
shutil.rmtree(hash_cachedir)
except Exception: # pylint: disable=broad-except
- log.exception('Problem occurred trying to invalidate hash cach for azurefs')
+ log.exception("Problem occurred trying to invalidate hash cach for azurefs")
def file_hash(load, fnd):
- '''
+ """
Return a file hash based on the hash type set in the master config
- '''
- if not all(x in load for x in ('path', 'saltenv')):
- return '', None
- ret = {'hash_type': __opts__['hash_type']}
- relpath = fnd['rel']
- path = fnd['path']
- hash_cachedir = os.path.join(__opts__['cachedir'], 'azurefs', 'hashes')
- hashdest = salt.utils.path.join(hash_cachedir,
- load['saltenv'],
- '{0}.hash.{1}'.format(relpath,
- __opts__['hash_type']))
+ """
+ if not all(x in load for x in ("path", "saltenv")):
+ return "", None
+ ret = {"hash_type": __opts__["hash_type"]}
+ relpath = fnd["rel"]
+ path = fnd["path"]
+ hash_cachedir = os.path.join(__opts__["cachedir"], "azurefs", "hashes")
+ hashdest = salt.utils.path.join(
+ hash_cachedir,
+ load["saltenv"],
+ "{0}.hash.{1}".format(relpath, __opts__["hash_type"]),
+ )
if not os.path.isfile(hashdest):
if not os.path.exists(os.path.dirname(hashdest)):
os.makedirs(os.path.dirname(hashdest))
- ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type'])
- with salt.utils.files.fopen(hashdest, 'w+') as fp_:
- fp_.write(salt.utils.stringutils.to_str(ret['hsum']))
+ ret["hsum"] = salt.utils.hashutils.get_hash(path, __opts__["hash_type"])
+ with salt.utils.files.fopen(hashdest, "w+") as fp_:
+ fp_.write(salt.utils.stringutils.to_str(ret["hsum"]))
return ret
else:
- with salt.utils.files.fopen(hashdest, 'rb') as fp_:
- ret['hsum'] = salt.utils.stringutils.to_unicode(fp_.read())
+ with salt.utils.files.fopen(hashdest, "rb") as fp_:
+ ret["hsum"] = salt.utils.stringutils.to_unicode(fp_.read())
return ret
def file_list(load):
- '''
+ """
Return a list of all files in a specified environment
- '''
+ """
ret = set()
try:
- for container in __opts__['azurefs']:
- if container.get('saltenv', 'base') != load['saltenv']:
+ for container in __opts__["azurefs"]:
+ if container.get("saltenv", "base") != load["saltenv"]:
continue
- container_list = _get_container_path(container) + '.list'
- lk = container_list + '.lk'
+ container_list = _get_container_path(container) + ".list"
+ lk = container_list + ".lk"
salt.fileserver.wait_lock(lk, container_list, 5)
if not os.path.exists(container_list):
continue
- with salt.utils.files.fopen(container_list, 'r') as fp_:
+ with salt.utils.files.fopen(container_list, "r") as fp_:
ret.update(set(salt.utils.json.load(fp_)))
except Exception as exc: # pylint: disable=broad-except
- log.error('azurefs: an error ocurred retrieving file lists. '
- 'It should be resolved next time the fileserver '
- 'updates. Please do not manually modify the azurefs '
- 'cache directory.')
+ log.error(
+ "azurefs: an error ocurred retrieving file lists. "
+ "It should be resolved next time the fileserver "
+ "updates. Please do not manually modify the azurefs "
+ "cache directory."
+ )
return list(ret)
def dir_list(load):
- '''
+ """
Return a list of all directories in a specified environment
- '''
+ """
ret = set()
files = file_list(load)
for f in files:
@@ -339,53 +343,61 @@ def dir_list(load):
def _get_container_path(container):
- '''
+ """
Get the cache path for the container in question
Cache paths are generate by combining the account name, container name,
and saltenv, separated by underscores
- '''
- root = os.path.join(__opts__['cachedir'], 'azurefs')
- container_dir = '{0}_{1}_{2}'.format(container.get('account_name', ''),
- container.get('container_name', ''),
- container.get('saltenv', 'base'))
+ """
+ root = os.path.join(__opts__["cachedir"], "azurefs")
+ container_dir = "{0}_{1}_{2}".format(
+ container.get("account_name", ""),
+ container.get("container_name", ""),
+ container.get("saltenv", "base"),
+ )
return os.path.join(root, container_dir)
def _get_container_service(container):
- '''
+ """
Get the azure block blob service for the container in question
Try account_key, sas_token, and no auth in that order
- '''
- if 'account_key' in container:
- account = azure.storage.CloudStorageAccount(container['account_name'], account_key=container['account_key'])
- elif 'sas_token' in container:
- account = azure.storage.CloudStorageAccount(container['account_name'], sas_token=container['sas_token'])
+ """
+ if "account_key" in container:
+ account = azure.storage.CloudStorageAccount(
+ container["account_name"], account_key=container["account_key"]
+ )
+ elif "sas_token" in container:
+ account = azure.storage.CloudStorageAccount(
+ container["account_name"], sas_token=container["sas_token"]
+ )
else:
- account = azure.storage.CloudStorageAccount(container['account_name'])
+ account = azure.storage.CloudStorageAccount(container["account_name"])
blob_service = account.create_block_blob_service()
return blob_service
def _validate_config():
- '''
+ """
Validate azurefs config, return False if it doesn't validate
- '''
- if not isinstance(__opts__['azurefs'], list):
- log.error('azurefs configuration is not formed as a list, skipping azurefs')
+ """
+ if not isinstance(__opts__["azurefs"], list):
+ log.error("azurefs configuration is not formed as a list, skipping azurefs")
return False
- for container in __opts__['azurefs']:
+ for container in __opts__["azurefs"]:
if not isinstance(container, dict):
log.error(
- 'One or more entries in the azurefs configuration list are '
- 'not formed as a dict. Skipping azurefs: %s', container
+ "One or more entries in the azurefs configuration list are "
+ "not formed as a dict. Skipping azurefs: %s",
+ container,
)
return False
- if 'account_name' not in container or 'container_name' not in container:
+ if "account_name" not in container or "container_name" not in container:
log.error(
- 'An azurefs container configuration is missing either an '
- 'account_name or a container_name: %s', container
+ "An azurefs container configuration is missing either an "
+ "account_name or a container_name: %s",
+ container,
)
return False
return True
diff --git a/salt/fileserver/gitfs.py b/salt/fileserver/gitfs.py
index 494ca375611..fdf8661e391 100644
--- a/salt/fileserver/gitfs.py
+++ b/salt/fileserver/gitfs.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Git Fileserver Backend
With this backend, branches and tags in a remote git repository are exposed to
@@ -46,50 +46,59 @@ Walkthrough `.
.. _pygit2: https://github.com/libgit2/pygit2
.. _libgit2: https://libgit2.github.com/
.. _GitPython: https://github.com/gitpython-developers/GitPython
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
-PER_REMOTE_OVERRIDES = (
- 'base', 'mountpoint', 'root', 'ssl_verify',
- 'saltenv_whitelist', 'saltenv_blacklist',
- 'refspecs', 'disable_saltenv_mapping',
- 'ref_types', 'update_interval',
-)
-PER_REMOTE_ONLY = ('all_saltenvs', 'name', 'saltenv')
-
-# Auth support (auth params can be global or per-remote, too)
-AUTH_PROVIDERS = ('pygit2',)
-AUTH_PARAMS = ('user', 'password', 'pubkey', 'privkey', 'passphrase',
- 'insecure_auth')
-
# Import salt libs
import salt.utils.gitfs
from salt.exceptions import FileserverConfigError
+PER_REMOTE_OVERRIDES = (
+ "base",
+ "fallback",
+ "mountpoint",
+ "root",
+ "ssl_verify",
+ "saltenv_whitelist",
+ "saltenv_blacklist",
+ "refspecs",
+ "disable_saltenv_mapping",
+ "ref_types",
+ "update_interval",
+)
+PER_REMOTE_ONLY = ("all_saltenvs", "name", "saltenv")
+
+# Auth support (auth params can be global or per-remote, too)
+AUTH_PROVIDERS = ("pygit2",)
+AUTH_PARAMS = ("user", "password", "pubkey", "privkey", "passphrase", "insecure_auth")
+
+
log = logging.getLogger(__name__)
# Define the module's virtual name
-__virtualname__ = 'gitfs'
+__virtualname__ = "gitfs"
def _gitfs(init_remotes=True):
return salt.utils.gitfs.GitFS(
__opts__,
- __opts__['gitfs_remotes'],
+ __opts__["gitfs_remotes"],
per_remote_overrides=PER_REMOTE_OVERRIDES,
per_remote_only=PER_REMOTE_ONLY,
- init_remotes=init_remotes)
+ init_remotes=init_remotes,
+ )
def __virtual__():
- '''
+ """
Only load if the desired provider module is present and gitfs is enabled
properly in the master config file.
- '''
- if __virtualname__ not in __opts__['fileserver_backend']:
+ """
+ if __virtualname__ not in __opts__["fileserver_backend"]:
return False
try:
_gitfs(init_remotes=False)
@@ -102,106 +111,106 @@ def __virtual__():
def clear_cache():
- '''
+ """
Completely clear gitfs cache
- '''
+ """
return _gitfs(init_remotes=False).clear_cache()
-def clear_lock(remote=None, lock_type='update'):
- '''
+def clear_lock(remote=None, lock_type="update"):
+ """
Clear update.lk
- '''
+ """
return _gitfs().clear_lock(remote=remote, lock_type=lock_type)
def lock(remote=None):
- '''
+ """
Place an update.lk
``remote`` can either be a dictionary containing repo configuration
information, or a pattern. If the latter, then remotes for which the URL
matches the pattern will be locked.
- '''
+ """
return _gitfs().lock(remote=remote)
def update(remotes=None):
- '''
+ """
Execute a git fetch on all of the repos
- '''
+ """
_gitfs().update(remotes)
def update_intervals():
- '''
+ """
Returns the update intervals for each configured remote
- '''
+ """
return _gitfs().update_intervals()
def envs(ignore_cache=False):
- '''
+ """
Return a list of refs that can be used as environments
- '''
+ """
return _gitfs().envs(ignore_cache=ignore_cache)
-def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
- '''
+def find_file(path, tgt_env="base", **kwargs): # pylint: disable=W0613
+ """
Find the first file to match the path and ref, read the file out of git
and send the path to the newly cached file
- '''
+ """
return _gitfs().find_file(path, tgt_env=tgt_env, **kwargs)
def init():
- '''
+ """
Initialize remotes. This is only used by the master's pre-flight checks,
and is not invoked by GitFS.
- '''
+ """
_gitfs()
def serve_file(load, fnd):
- '''
+ """
Return a chunk from a file based on the data received
- '''
+ """
return _gitfs().serve_file(load, fnd)
def file_hash(load, fnd):
- '''
+ """
Return a file hash, the hash type is set in the master config file
- '''
+ """
return _gitfs().file_hash(load, fnd)
def file_list(load):
- '''
+ """
Return a list of all files on the file server in a specified
environment (specified as a key within the load dict).
- '''
+ """
return _gitfs().file_list(load)
def file_list_emptydirs(load): # pylint: disable=W0613
- '''
+ """
Return a list of all empty directories on the master
- '''
+ """
# Cannot have empty dirs in git
return []
def dir_list(load):
- '''
+ """
Return a list of all directories on the master
- '''
+ """
return _gitfs().dir_list(load)
def symlink_list(load):
- '''
+ """
Return a dict of all symlinks based on a given path in the repo
- '''
+ """
return _gitfs().symlink_list(load)
diff --git a/salt/fileserver/hgfs.py b/salt/fileserver/hgfs.py
index 97f8b7b952e..0f954355e6f 100644
--- a/salt/fileserver/hgfs.py
+++ b/salt/fileserver/hgfs.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Mercurial Fileserver Backend
To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the
@@ -34,10 +34,11 @@ will set the desired branch method. Possible values are: ``branches``,
:depends: - mercurial
- python bindings for mercurial (``python-hglib``)
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import copy
import errno
import fnmatch
@@ -47,20 +48,8 @@ import logging
import os
import shutil
from datetime import datetime
-from salt.exceptions import FileserverConfigError
-VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed')
-PER_REMOTE_OVERRIDES = ('base', 'branch_method', 'mountpoint', 'root')
-
-# Import third party libs
-from salt.ext import six
-# pylint: disable=import-error
-try:
- import hglib
- HAS_HG = True
-except ImportError:
- HAS_HG = False
-# pylint: enable=import-error
+import salt.fileserver
# Import salt libs
import salt.utils.data
@@ -70,38 +59,58 @@ import salt.utils.hashutils
import salt.utils.stringutils
import salt.utils.url
import salt.utils.versions
-import salt.fileserver
+from salt.exceptions import FileserverConfigError
+
+# Import third party libs
+from salt.ext import six
from salt.utils.event import tagify
+VALID_BRANCH_METHODS = ("branches", "bookmarks", "mixed")
+PER_REMOTE_OVERRIDES = ("base", "branch_method", "mountpoint", "root")
+
+
+# pylint: disable=import-error
+try:
+ import hglib
+
+ HAS_HG = True
+except ImportError:
+ HAS_HG = False
+# pylint: enable=import-error
+
+
log = logging.getLogger(__name__)
# Define the module's virtual name
-__virtualname__ = 'hg'
+__virtualname__ = "hg"
def __virtual__():
- '''
+ """
Only load if mercurial is available
- '''
- if __virtualname__ not in __opts__['fileserver_backend']:
+ """
+ if __virtualname__ not in __opts__["fileserver_backend"]:
return False
if not HAS_HG:
- log.error('Mercurial fileserver backend is enabled in configuration '
- 'but could not be loaded, is hglib installed?')
- return False
- if __opts__['hgfs_branch_method'] not in VALID_BRANCH_METHODS:
log.error(
- 'Invalid hgfs_branch_method \'%s\'. Valid methods are: %s',
- __opts__['hgfs_branch_method'], VALID_BRANCH_METHODS
+ "Mercurial fileserver backend is enabled in configuration "
+ "but could not be loaded, is hglib installed?"
+ )
+ return False
+ if __opts__["hgfs_branch_method"] not in VALID_BRANCH_METHODS:
+ log.error(
+ "Invalid hgfs_branch_method '%s'. Valid methods are: %s",
+ __opts__["hgfs_branch_method"],
+ VALID_BRANCH_METHODS,
)
return False
return __virtualname__
def _all_branches(repo):
- '''
+ """
Returns all branches for the specified repo
- '''
+ """
# repo.branches() returns a list of 3-tuples consisting of
# (branch name, rev #, nodeid)
# Example: [('default', 4, '7c96229269fa')]
@@ -109,9 +118,9 @@ def _all_branches(repo):
def _get_branch(repo, name):
- '''
+ """
Find the requested branch in the specified repo
- '''
+ """
try:
return [x for x in _all_branches(repo) if x[0] == name][0]
except IndexError:
@@ -119,9 +128,9 @@ def _get_branch(repo, name):
def _all_bookmarks(repo):
- '''
+ """
Returns all bookmarks for the specified repo
- '''
+ """
# repo.bookmarks() returns a tuple containing the following:
# 1. A list of 3-tuples consisting of (bookmark name, rev #, nodeid)
# 2. The index of the current bookmark (-1 if no current one)
@@ -130,9 +139,9 @@ def _all_bookmarks(repo):
def _get_bookmark(repo, name):
- '''
+ """
Find the requested bookmark in the specified repo
- '''
+ """
try:
return [x for x in _all_bookmarks(repo) if x[0] == name][0]
except IndexError:
@@ -140,21 +149,21 @@ def _get_bookmark(repo, name):
def _all_tags(repo):
- '''
+ """
Returns all tags for the specified repo
- '''
+ """
# repo.tags() returns a list of 4-tuples consisting of
# (tag name, rev #, nodeid, islocal)
# Example: [('1.0', 3, '3be15e71b31a', False),
# ('tip', 4, '7c96229269fa', False)]
# Avoid returning the special 'tip' tag.
- return [x for x in repo.tags() if x[0] != 'tip']
+ return [x for x in repo.tags() if x[0] != "tip"]
def _get_tag(repo, name):
- '''
+ """
Find the requested tag in the specified repo
- '''
+ """
try:
return [x for x in _all_tags(repo) if x[0] == name][0]
except IndexError:
@@ -162,83 +171,88 @@ def _get_tag(repo, name):
def _get_ref(repo, name):
- '''
+ """
Return ref tuple if ref is in the repo.
- '''
- if name == 'base':
- name = repo['base']
- if name == repo['base'] or name in envs():
- if repo['branch_method'] == 'branches':
- return _get_branch(repo['repo'], name) \
- or _get_tag(repo['repo'], name)
- elif repo['branch_method'] == 'bookmarks':
- return _get_bookmark(repo['repo'], name) \
- or _get_tag(repo['repo'], name)
- elif repo['branch_method'] == 'mixed':
- return _get_branch(repo['repo'], name) \
- or _get_bookmark(repo['repo'], name) \
- or _get_tag(repo['repo'], name)
+ """
+ if name == "base":
+ name = repo["base"]
+ if name == repo["base"] or name in envs():
+ if repo["branch_method"] == "branches":
+ return _get_branch(repo["repo"], name) or _get_tag(repo["repo"], name)
+ elif repo["branch_method"] == "bookmarks":
+ return _get_bookmark(repo["repo"], name) or _get_tag(repo["repo"], name)
+ elif repo["branch_method"] == "mixed":
+ return (
+ _get_branch(repo["repo"], name)
+ or _get_bookmark(repo["repo"], name)
+ or _get_tag(repo["repo"], name)
+ )
return False
def _failhard():
- '''
+ """
Fatal fileserver configuration issue, raise an exception
- '''
- raise FileserverConfigError(
- 'Failed to load hg fileserver backend'
- )
+ """
+ raise FileserverConfigError("Failed to load hg fileserver backend")
def init():
- '''
+ """
Return a list of hglib objects for the various hgfs remotes
- '''
- bp_ = os.path.join(__opts__['cachedir'], 'hgfs')
+ """
+ bp_ = os.path.join(__opts__["cachedir"], "hgfs")
new_remote = False
repos = []
per_remote_defaults = {}
for param in PER_REMOTE_OVERRIDES:
- per_remote_defaults[param] = \
- six.text_type(__opts__['hgfs_{0}'.format(param)])
+ per_remote_defaults[param] = six.text_type(__opts__["hgfs_{0}".format(param)])
- for remote in __opts__['hgfs_remotes']:
+ for remote in __opts__["hgfs_remotes"]:
repo_conf = copy.deepcopy(per_remote_defaults)
if isinstance(remote, dict):
repo_url = next(iter(remote))
per_remote_conf = dict(
- [(key, six.text_type(val)) for key, val in
- six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))]
+ [
+ (key, six.text_type(val))
+ for key, val in six.iteritems(
+ salt.utils.data.repack_dictlist(remote[repo_url])
+ )
+ ]
)
if not per_remote_conf:
log.error(
- 'Invalid per-remote configuration for hgfs remote %s. If '
- 'no per-remote parameters are being specified, there may '
- 'be a trailing colon after the URL, which should be '
- 'removed. Check the master configuration file.', repo_url
+ "Invalid per-remote configuration for hgfs remote %s. If "
+ "no per-remote parameters are being specified, there may "
+ "be a trailing colon after the URL, which should be "
+ "removed. Check the master configuration file.",
+ repo_url,
)
_failhard()
- branch_method = \
- per_remote_conf.get('branch_method',
- per_remote_defaults['branch_method'])
+ branch_method = per_remote_conf.get(
+ "branch_method", per_remote_defaults["branch_method"]
+ )
if branch_method not in VALID_BRANCH_METHODS:
log.error(
- 'Invalid branch_method \'%s\' for remote %s. Valid '
- 'branch methods are: %s. This remote will be ignored.',
- branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS)
+ "Invalid branch_method '%s' for remote %s. Valid "
+ "branch methods are: %s. This remote will be ignored.",
+ branch_method,
+ repo_url,
+ ", ".join(VALID_BRANCH_METHODS),
)
_failhard()
per_remote_errors = False
- for param in (x for x in per_remote_conf
- if x not in PER_REMOTE_OVERRIDES):
+ for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES):
log.error(
- 'Invalid configuration parameter \'%s\' for remote %s. '
- 'Valid parameters are: %s. See the documentation for '
- 'further information.',
- param, repo_url, ', '.join(PER_REMOTE_OVERRIDES)
+ "Invalid configuration parameter '%s' for remote %s. "
+ "Valid parameters are: %s. See the documentation for "
+ "further information.",
+ param,
+ repo_url,
+ ", ".join(PER_REMOTE_OVERRIDES),
)
per_remote_errors = True
if per_remote_errors:
@@ -250,20 +264,21 @@ def init():
if not isinstance(repo_url, six.string_types):
log.error(
- 'Invalid hgfs remote %s. Remotes must be strings, you may '
- 'need to enclose the URL in quotes', repo_url
+ "Invalid hgfs remote %s. Remotes must be strings, you may "
+ "need to enclose the URL in quotes",
+ repo_url,
)
_failhard()
try:
- repo_conf['mountpoint'] = salt.utils.url.strip_proto(
- repo_conf['mountpoint']
+ repo_conf["mountpoint"] = salt.utils.url.strip_proto(
+ repo_conf["mountpoint"]
)
except TypeError:
# mountpoint not specified
pass
- hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5'))
+ hash_type = getattr(hashlib, __opts__.get("hash_type", "md5"))
repo_hash = hash_type(repo_url).hexdigest()
rp_ = os.path.join(bp_, repo_hash)
if not os.path.isdir(rp_):
@@ -277,21 +292,24 @@ def init():
repo = hglib.open(rp_)
except hglib.error.ServerError:
log.error(
- 'Cache path %s (corresponding remote: %s) exists but is not '
- 'a valid mercurial repository. You will need to manually '
- 'delete this directory on the master to continue to use this '
- 'hgfs remote.', rp_, repo_url
+ "Cache path %s (corresponding remote: %s) exists but is not "
+ "a valid mercurial repository. You will need to manually "
+ "delete this directory on the master to continue to use this "
+ "hgfs remote.",
+ rp_,
+ repo_url,
)
_failhard()
except Exception as exc: # pylint: disable=broad-except
log.error(
- 'Exception \'%s\' encountered while initializing hgfs '
- 'remote %s', exc, repo_url
+ "Exception '%s' encountered while initializing hgfs " "remote %s",
+ exc,
+ repo_url,
)
_failhard()
try:
- refs = repo.config(names='paths')
+ refs = repo.config(names="paths")
except hglib.error.CommandError:
refs = None
@@ -300,52 +318,52 @@ def init():
# this way to support both older and newer hglib.
if not refs:
# Write an hgrc defining the remote URL
- hgconfpath = os.path.join(rp_, '.hg', 'hgrc')
- with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig:
- hgconfig.write('[paths]\n')
+ hgconfpath = os.path.join(rp_, ".hg", "hgrc")
+ with salt.utils.files.fopen(hgconfpath, "w+") as hgconfig:
+ hgconfig.write("[paths]\n")
hgconfig.write(
- salt.utils.stringutils.to_str(
- 'default = {0}\n'.format(repo_url)
- )
+ salt.utils.stringutils.to_str("default = {0}\n".format(repo_url))
)
- repo_conf.update({
- 'repo': repo,
- 'url': repo_url,
- 'hash': repo_hash,
- 'cachedir': rp_,
- 'lockfile': os.path.join(__opts__['cachedir'],
- 'hgfs',
- '{0}.update.lk'.format(repo_hash))
- })
+ repo_conf.update(
+ {
+ "repo": repo,
+ "url": repo_url,
+ "hash": repo_hash,
+ "cachedir": rp_,
+ "lockfile": os.path.join(
+ __opts__["cachedir"], "hgfs", "{0}.update.lk".format(repo_hash)
+ ),
+ }
+ )
repos.append(repo_conf)
repo.close()
if new_remote:
- remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt')
+ remote_map = os.path.join(__opts__["cachedir"], "hgfs/remote_map.txt")
try:
- with salt.utils.files.fopen(remote_map, 'w+') as fp_:
- timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f')
- fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp))
+ with salt.utils.files.fopen(remote_map, "w+") as fp_:
+ timestamp = datetime.now().strftime("%d %b %Y %H:%M:%S.%f")
+ fp_.write("# hgfs_remote map as of {0}\n".format(timestamp))
for repo in repos:
fp_.write(
salt.utils.stringutils.to_str(
- '{0} = {1}\n'.format(repo['hash'], repo['url'])
+ "{0} = {1}\n".format(repo["hash"], repo["url"])
)
)
except OSError:
pass
else:
- log.info('Wrote new hgfs_remote map to %s', remote_map)
+ log.info("Wrote new hgfs_remote map to %s", remote_map)
return repos
def _clear_old_remotes():
- '''
+ """
Remove cache directories for remotes no longer configured
- '''
- bp_ = os.path.join(__opts__['cachedir'], 'hgfs')
+ """
+ bp_ = os.path.join(__opts__["cachedir"], "hgfs")
try:
cachedir_ls = os.listdir(bp_)
except OSError:
@@ -354,12 +372,12 @@ def _clear_old_remotes():
# Remove actively-used remotes from list
for repo in repos:
try:
- cachedir_ls.remove(repo['hash'])
+ cachedir_ls.remove(repo["hash"])
except ValueError:
pass
to_remove = []
for item in cachedir_ls:
- if item in ('hash', 'refs'):
+ if item in ("hash", "refs"):
continue
path = os.path.join(bp_, item)
if os.path.isdir(path):
@@ -370,66 +388,66 @@ def _clear_old_remotes():
try:
shutil.rmtree(rdir)
except OSError as exc:
- log.error(
- 'Unable to remove old hgfs remote cachedir %s: %s',
- rdir, exc
- )
+ log.error("Unable to remove old hgfs remote cachedir %s: %s", rdir, exc)
failed.append(rdir)
else:
- log.debug('hgfs removed old cachedir %s', rdir)
+ log.debug("hgfs removed old cachedir %s", rdir)
for fdir in failed:
to_remove.remove(fdir)
return bool(to_remove), repos
def clear_cache():
- '''
+ """
Completely clear hgfs cache
- '''
- fsb_cachedir = os.path.join(__opts__['cachedir'], 'hgfs')
- list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs')
+ """
+ fsb_cachedir = os.path.join(__opts__["cachedir"], "hgfs")
+ list_cachedir = os.path.join(__opts__["cachedir"], "file_lists/hgfs")
errors = []
for rdir in (fsb_cachedir, list_cachedir):
if os.path.exists(rdir):
try:
shutil.rmtree(rdir)
except OSError as exc:
- errors.append('Unable to delete {0}: {1}'.format(rdir, exc))
+ errors.append("Unable to delete {0}: {1}".format(rdir, exc))
return errors
def clear_lock(remote=None):
- '''
+ """
Clear update.lk
``remote`` can either be a dictionary containing repo configuration
information, or a pattern. If the latter, then remotes for which the URL
matches the pattern will be locked.
- '''
+ """
+
def _do_clear_lock(repo):
def _add_error(errlist, repo, exc):
- msg = ('Unable to remove update lock for {0} ({1}): {2} '
- .format(repo['url'], repo['lockfile'], exc))
+ msg = "Unable to remove update lock for {0} ({1}): {2} ".format(
+ repo["url"], repo["lockfile"], exc
+ )
log.debug(msg)
errlist.append(msg)
+
success = []
failed = []
- if os.path.exists(repo['lockfile']):
+ if os.path.exists(repo["lockfile"]):
try:
- os.remove(repo['lockfile'])
+ os.remove(repo["lockfile"])
except OSError as exc:
if exc.errno == errno.EISDIR:
# Somehow this path is a directory. Should never happen
# unless some wiseguy manually creates a directory at this
# path, but just in case, handle it.
try:
- shutil.rmtree(repo['lockfile'])
+ shutil.rmtree(repo["lockfile"])
except OSError as exc:
_add_error(failed, repo, exc)
else:
_add_error(failed, repo, exc)
else:
- msg = 'Removed lock for {0}'.format(repo['url'])
+ msg = "Removed lock for {0}".format(repo["url"])
log.debug(msg)
success.append(msg)
return success, failed
@@ -442,11 +460,11 @@ def clear_lock(remote=None):
for repo in init():
if remote:
try:
- if not fnmatch.fnmatch(repo['url'], remote):
+ if not fnmatch.fnmatch(repo["url"], remote):
continue
except TypeError:
# remote was non-string, try again
- if not fnmatch.fnmatch(repo['url'], six.text_type(remote)):
+ if not fnmatch.fnmatch(repo["url"], six.text_type(remote)):
continue
success, failed = _do_clear_lock(repo)
cleared.extend(success)
@@ -455,27 +473,29 @@ def clear_lock(remote=None):
def lock(remote=None):
- '''
+ """
Place an update.lk
``remote`` can either be a dictionary containing repo configuration
information, or a pattern. If the latter, then remotes for which the URL
matches the pattern will be locked.
- '''
+ """
+
def _do_lock(repo):
success = []
failed = []
- if not os.path.exists(repo['lockfile']):
+ if not os.path.exists(repo["lockfile"]):
try:
- with salt.utils.files.fopen(repo['lockfile'], 'w'):
+ with salt.utils.files.fopen(repo["lockfile"], "w"):
pass
except (IOError, OSError) as exc:
- msg = ('Unable to set update lock for {0} ({1}): {2} '
- .format(repo['url'], repo['lockfile'], exc))
+ msg = "Unable to set update lock for {0} ({1}): {2} ".format(
+ repo["url"], repo["lockfile"], exc
+ )
log.debug(msg)
failed.append(msg)
else:
- msg = 'Set lock for {0}'.format(repo['url'])
+ msg = "Set lock for {0}".format(repo["url"])
log.debug(msg)
success.append(msg)
return success, failed
@@ -488,11 +508,11 @@ def lock(remote=None):
for repo in init():
if remote:
try:
- if not fnmatch.fnmatch(repo['url'], remote):
+ if not fnmatch.fnmatch(repo["url"], remote):
continue
except TypeError:
# remote was non-string, try again
- if not fnmatch.fnmatch(repo['url'], six.text_type(remote)):
+ if not fnmatch.fnmatch(repo["url"], six.text_type(remote)):
continue
success, failed = _do_lock(repo)
locked.extend(success)
@@ -502,73 +522,75 @@ def lock(remote=None):
def update():
- '''
+ """
Execute an hg pull on all of the repos
- '''
+ """
# data for the fileserver event
- data = {'changed': False,
- 'backend': 'hgfs'}
+ data = {"changed": False, "backend": "hgfs"}
# _clear_old_remotes runs init(), so use the value from there to avoid a
# second init()
- data['changed'], repos = _clear_old_remotes()
+ data["changed"], repos = _clear_old_remotes()
for repo in repos:
- if os.path.exists(repo['lockfile']):
+ if os.path.exists(repo["lockfile"]):
log.warning(
- 'Update lockfile is present for hgfs remote %s, skipping. '
- 'If this warning persists, it is possible that the update '
- 'process was interrupted. Removing %s or running '
- '\'salt-run fileserver.clear_lock hgfs\' will allow updates '
- 'to continue for this remote.', repo['url'], repo['lockfile']
+ "Update lockfile is present for hgfs remote %s, skipping. "
+ "If this warning persists, it is possible that the update "
+ "process was interrupted. Removing %s or running "
+ "'salt-run fileserver.clear_lock hgfs' will allow updates "
+ "to continue for this remote.",
+ repo["url"],
+ repo["lockfile"],
)
continue
_, errors = lock(repo)
if errors:
log.error(
- 'Unable to set update lock for hgfs remote %s, skipping.',
- repo['url']
+ "Unable to set update lock for hgfs remote %s, skipping.", repo["url"]
)
continue
- log.debug('hgfs is fetching from %s', repo['url'])
- repo['repo'].open()
- curtip = repo['repo'].tip()
+ log.debug("hgfs is fetching from %s", repo["url"])
+ repo["repo"].open()
+ curtip = repo["repo"].tip()
try:
- repo['repo'].pull()
+ repo["repo"].pull()
except Exception as exc: # pylint: disable=broad-except
log.error(
- 'Exception %s caught while updating hgfs remote %s',
- exc, repo['url'], exc_info_on_loglevel=logging.DEBUG
+ "Exception %s caught while updating hgfs remote %s",
+ exc,
+ repo["url"],
+ exc_info_on_loglevel=logging.DEBUG,
)
else:
- newtip = repo['repo'].tip()
+ newtip = repo["repo"].tip()
if curtip[1] != newtip[1]:
- data['changed'] = True
- repo['repo'].close()
+ data["changed"] = True
+ repo["repo"].close()
clear_lock(repo)
- env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p')
- if data.get('changed', False) is True or not os.path.isfile(env_cache):
+ env_cache = os.path.join(__opts__["cachedir"], "hgfs/envs.p")
+ if data.get("changed", False) is True or not os.path.isfile(env_cache):
env_cachedir = os.path.dirname(env_cache)
if not os.path.exists(env_cachedir):
os.makedirs(env_cachedir)
new_envs = envs(ignore_cache=True)
serial = salt.payload.Serial(__opts__)
- with salt.utils.files.fopen(env_cache, 'wb+') as fp_:
+ with salt.utils.files.fopen(env_cache, "wb+") as fp_:
fp_.write(serial.dumps(new_envs))
- log.trace('Wrote env cache data to %s', env_cache)
+ log.trace("Wrote env cache data to %s", env_cache)
# if there is a change, fire an event
- if __opts__.get('fileserver_events', False):
+ if __opts__.get("fileserver_events", False):
with salt.utils.event.get_event(
- 'master',
- __opts__['sock_dir'],
- __opts__['transport'],
- opts=__opts__,
- listen=False) as event:
- event.fire_event(data, tagify(['hgfs', 'update'], prefix='fileserver'))
+ "master",
+ __opts__["sock_dir"],
+ __opts__["transport"],
+ opts=__opts__,
+ listen=False,
+ ) as event:
+ event.fire_event(data, tagify(["hgfs", "update"], prefix="fileserver"))
try:
salt.fileserver.reap_fileserver_cache_dir(
- os.path.join(__opts__['cachedir'], 'hgfs/hash'),
- find_file
+ os.path.join(__opts__["cachedir"], "hgfs/hash"), find_file
)
except (IOError, OSError):
# Hash file won't exist if no files have yet been served up
@@ -576,69 +598,65 @@ def update():
def _env_is_exposed(env):
- '''
+ """
Check if an environment is exposed by comparing it against a whitelist and
blacklist.
- '''
+ """
return salt.utils.stringutils.check_whitelist_blacklist(
env,
- whitelist=__opts__['hgfs_saltenv_whitelist'],
- blacklist=__opts__['hgfs_saltenv_blacklist'],
+ whitelist=__opts__["hgfs_saltenv_whitelist"],
+ blacklist=__opts__["hgfs_saltenv_blacklist"],
)
def envs(ignore_cache=False):
- '''
+ """
Return a list of refs that can be used as environments
- '''
+ """
if not ignore_cache:
- env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p')
+ env_cache = os.path.join(__opts__["cachedir"], "hgfs/envs.p")
cache_match = salt.fileserver.check_env_cache(__opts__, env_cache)
if cache_match is not None:
return cache_match
ret = set()
for repo in init():
- repo['repo'].open()
- if repo['branch_method'] in ('branches', 'mixed'):
- for branch in _all_branches(repo['repo']):
+ repo["repo"].open()
+ if repo["branch_method"] in ("branches", "mixed"):
+ for branch in _all_branches(repo["repo"]):
branch_name = branch[0]
- if branch_name == repo['base']:
- branch_name = 'base'
+ if branch_name == repo["base"]:
+ branch_name = "base"
ret.add(branch_name)
- if repo['branch_method'] in ('bookmarks', 'mixed'):
- for bookmark in _all_bookmarks(repo['repo']):
+ if repo["branch_method"] in ("bookmarks", "mixed"):
+ for bookmark in _all_bookmarks(repo["repo"]):
bookmark_name = bookmark[0]
- if bookmark_name == repo['base']:
- bookmark_name = 'base'
+ if bookmark_name == repo["base"]:
+ bookmark_name = "base"
ret.add(bookmark_name)
- ret.update([x[0] for x in _all_tags(repo['repo'])])
- repo['repo'].close()
+ ret.update([x[0] for x in _all_tags(repo["repo"])])
+ repo["repo"].close()
return [x for x in sorted(ret) if _env_is_exposed(x)]
-def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
- '''
+def find_file(path, tgt_env="base", **kwargs): # pylint: disable=W0613
+ """
Find the first file to match the path and ref, read the file out of hg
and send the path to the newly cached file
- '''
- fnd = {'path': '',
- 'rel': ''}
+ """
+ fnd = {"path": "", "rel": ""}
if os.path.isabs(path) or tgt_env not in envs():
return fnd
- dest = os.path.join(__opts__['cachedir'], 'hgfs/refs', tgt_env, path)
- hashes_glob = os.path.join(__opts__['cachedir'],
- 'hgfs/hash',
- tgt_env,
- '{0}.hash.*'.format(path))
- blobshadest = os.path.join(__opts__['cachedir'],
- 'hgfs/hash',
- tgt_env,
- '{0}.hash.blob_sha1'.format(path))
- lk_fn = os.path.join(__opts__['cachedir'],
- 'hgfs/hash',
- tgt_env,
- '{0}.lk'.format(path))
+ dest = os.path.join(__opts__["cachedir"], "hgfs/refs", tgt_env, path)
+ hashes_glob = os.path.join(
+ __opts__["cachedir"], "hgfs/hash", tgt_env, "{0}.hash.*".format(path)
+ )
+ blobshadest = os.path.join(
+ __opts__["cachedir"], "hgfs/hash", tgt_env, "{0}.hash.blob_sha1".format(path)
+ )
+ lk_fn = os.path.join(
+ __opts__["cachedir"], "hgfs/hash", tgt_env, "{0}.lk".format(path)
+ )
destdir = os.path.dirname(dest)
hashdir = os.path.dirname(blobshadest)
if not os.path.isdir(destdir):
@@ -657,50 +675,47 @@ def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
os.makedirs(hashdir)
for repo in init():
- if repo['mountpoint'] \
- and not path.startswith(repo['mountpoint'] + os.path.sep):
+ if repo["mountpoint"] and not path.startswith(repo["mountpoint"] + os.path.sep):
continue
- repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep)
- if repo['root']:
- repo_path = os.path.join(repo['root'], repo_path)
+ repo_path = path[len(repo["mountpoint"]) :].lstrip(os.path.sep)
+ if repo["root"]:
+ repo_path = os.path.join(repo["root"], repo_path)
- repo['repo'].open()
+ repo["repo"].open()
ref = _get_ref(repo, tgt_env)
if not ref:
# Branch or tag not found in repo, try the next
- repo['repo'].close()
+ repo["repo"].close()
continue
salt.fileserver.wait_lock(lk_fn, dest)
if os.path.isfile(blobshadest) and os.path.isfile(dest):
- with salt.utils.files.fopen(blobshadest, 'r') as fp_:
+ with salt.utils.files.fopen(blobshadest, "r") as fp_:
sha = fp_.read()
if sha == ref[2]:
- fnd['rel'] = path
- fnd['path'] = dest
- repo['repo'].close()
+ fnd["rel"] = path
+ fnd["path"] = dest
+ repo["repo"].close()
return fnd
try:
- repo['repo'].cat(
- ['path:{0}'.format(repo_path)], rev=ref[2], output=dest
- )
+ repo["repo"].cat(["path:{0}".format(repo_path)], rev=ref[2], output=dest)
except hglib.error.CommandError:
- repo['repo'].close()
+ repo["repo"].close()
continue
- with salt.utils.files.fopen(lk_fn, 'w'):
+ with salt.utils.files.fopen(lk_fn, "w"):
pass
for filename in glob.glob(hashes_glob):
try:
os.remove(filename)
except Exception: # pylint: disable=broad-except
pass
- with salt.utils.files.fopen(blobshadest, 'w+') as fp_:
+ with salt.utils.files.fopen(blobshadest, "w+") as fp_:
fp_.write(ref[2])
try:
os.remove(lk_fn)
except (OSError, IOError):
pass
- fnd['rel'] = path
- fnd['path'] = dest
+ fnd["rel"] = path
+ fnd["path"] = dest
try:
# Converting the stat result to a list, the elements of the
# list correspond to the following stat_result params:
@@ -714,183 +729,180 @@ def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
# 7 => st_atime=1468284229
# 8 => st_mtime=1456338235
# 9 => st_ctime=1456338235
- fnd['stat'] = list(os.stat(dest))
+ fnd["stat"] = list(os.stat(dest))
except Exception: # pylint: disable=broad-except
pass
- repo['repo'].close()
+ repo["repo"].close()
return fnd
return fnd
def serve_file(load, fnd):
- '''
+ """
Return a chunk from a file based on the data received
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- ret = {'data': '',
- 'dest': ''}
- if not all(x in load for x in ('path', 'loc', 'saltenv')):
+ ret = {"data": "", "dest": ""}
+ if not all(x in load for x in ("path", "loc", "saltenv")):
return ret
- if not fnd['path']:
+ if not fnd["path"]:
return ret
- ret['dest'] = fnd['rel']
- gzip = load.get('gzip', None)
- fpath = os.path.normpath(fnd['path'])
- with salt.utils.files.fopen(fpath, 'rb') as fp_:
- fp_.seek(load['loc'])
- data = fp_.read(__opts__['file_buffer_size'])
+ ret["dest"] = fnd["rel"]
+ gzip = load.get("gzip", None)
+ fpath = os.path.normpath(fnd["path"])
+ with salt.utils.files.fopen(fpath, "rb") as fp_:
+ fp_.seek(load["loc"])
+ data = fp_.read(__opts__["file_buffer_size"])
if data and six.PY3 and not salt.utils.files.is_binary(fpath):
data = data.decode(__salt_system_encoding__)
if gzip and data:
data = salt.utils.gzip_util.compress(data, gzip)
- ret['gzip'] = gzip
- ret['data'] = data
+ ret["gzip"] = gzip
+ ret["data"] = data
return ret
def file_hash(load, fnd):
- '''
+ """
Return a file hash, the hash type is set in the master config file
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- if not all(x in load for x in ('path', 'saltenv')):
- return ''
- ret = {'hash_type': __opts__['hash_type']}
- relpath = fnd['rel']
- path = fnd['path']
- hashdest = os.path.join(__opts__['cachedir'],
- 'hgfs/hash',
- load['saltenv'],
- '{0}.hash.{1}'.format(relpath,
- __opts__['hash_type']))
+ if not all(x in load for x in ("path", "saltenv")):
+ return ""
+ ret = {"hash_type": __opts__["hash_type"]}
+ relpath = fnd["rel"]
+ path = fnd["path"]
+ hashdest = os.path.join(
+ __opts__["cachedir"],
+ "hgfs/hash",
+ load["saltenv"],
+ "{0}.hash.{1}".format(relpath, __opts__["hash_type"]),
+ )
if not os.path.isfile(hashdest):
- ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type'])
- with salt.utils.files.fopen(hashdest, 'w+') as fp_:
- fp_.write(ret['hsum'])
+ ret["hsum"] = salt.utils.hashutils.get_hash(path, __opts__["hash_type"])
+ with salt.utils.files.fopen(hashdest, "w+") as fp_:
+ fp_.write(ret["hsum"])
return ret
else:
- with salt.utils.files.fopen(hashdest, 'rb') as fp_:
- ret['hsum'] = salt.utils.stringutils.to_unicode(fp_.read())
+ with salt.utils.files.fopen(hashdest, "rb") as fp_:
+ ret["hsum"] = salt.utils.stringutils.to_unicode(fp_.read())
return ret
def _file_lists(load, form):
- '''
+ """
Return a dict containing the file lists for files and dirs
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs')
+ list_cachedir = os.path.join(__opts__["cachedir"], "file_lists/hgfs")
if not os.path.isdir(list_cachedir):
try:
os.makedirs(list_cachedir)
except os.error:
- log.critical('Unable to make cachedir %s', list_cachedir)
+ log.critical("Unable to make cachedir %s", list_cachedir)
return []
- list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv']))
- w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv']))
- cache_match, refresh_cache, save_cache = \
- salt.fileserver.check_file_list_cache(
- __opts__, form, list_cache, w_lock
- )
+ list_cache = os.path.join(list_cachedir, "{0}.p".format(load["saltenv"]))
+ w_lock = os.path.join(list_cachedir, ".{0}.w".format(load["saltenv"]))
+ cache_match, refresh_cache, save_cache = salt.fileserver.check_file_list_cache(
+ __opts__, form, list_cache, w_lock
+ )
if cache_match is not None:
return cache_match
if refresh_cache:
ret = {}
- ret['files'] = _get_file_list(load)
- ret['dirs'] = _get_dir_list(load)
+ ret["files"] = _get_file_list(load)
+ ret["dirs"] = _get_dir_list(load)
if save_cache:
- salt.fileserver.write_file_list_cache(
- __opts__, ret, list_cache, w_lock
- )
+ salt.fileserver.write_file_list_cache(__opts__, ret, list_cache, w_lock)
return ret.get(form, [])
# Shouldn't get here, but if we do, this prevents a TypeError
return []
def file_list(load):
- '''
+ """
Return a list of all files on the file server in a specified environment
- '''
- return _file_lists(load, 'files')
+ """
+ return _file_lists(load, "files")
def _get_file_list(load):
- '''
+ """
Get a list of all files on the file server in a specified environment
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- if 'saltenv' not in load or load['saltenv'] not in envs():
+ if "saltenv" not in load or load["saltenv"] not in envs():
return []
ret = set()
for repo in init():
- repo['repo'].open()
- ref = _get_ref(repo, load['saltenv'])
+ repo["repo"].open()
+ ref = _get_ref(repo, load["saltenv"])
if ref:
- manifest = repo['repo'].manifest(rev=ref[1])
+ manifest = repo["repo"].manifest(rev=ref[1])
for tup in manifest:
- relpath = os.path.relpath(tup[4], repo['root'])
+ relpath = os.path.relpath(tup[4], repo["root"])
# Don't add files outside the hgfs_root
- if not relpath.startswith('../'):
- ret.add(os.path.join(repo['mountpoint'], relpath))
- repo['repo'].close()
+ if not relpath.startswith("../"):
+ ret.add(os.path.join(repo["mountpoint"], relpath))
+ repo["repo"].close()
return sorted(ret)
def file_list_emptydirs(load): # pylint: disable=W0613
- '''
+ """
Return a list of all empty directories on the master
- '''
+ """
# Cannot have empty dirs in hg
return []
def dir_list(load):
- '''
+ """
Return a list of all directories on the master
- '''
- return _file_lists(load, 'dirs')
+ """
+ return _file_lists(load, "dirs")
def _get_dir_list(load):
- '''
+ """
Get a list of all directories on the master
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- if 'saltenv' not in load or load['saltenv'] not in envs():
+ if "saltenv" not in load or load["saltenv"] not in envs():
return []
ret = set()
for repo in init():
- repo['repo'].open()
- ref = _get_ref(repo, load['saltenv'])
+ repo["repo"].open()
+ ref = _get_ref(repo, load["saltenv"])
if ref:
- manifest = repo['repo'].manifest(rev=ref[1])
+ manifest = repo["repo"].manifest(rev=ref[1])
for tup in manifest:
filepath = tup[4]
- split = filepath.rsplit('/', 1)
+ split = filepath.rsplit("/", 1)
while len(split) > 1:
- relpath = os.path.relpath(split[0], repo['root'])
+ relpath = os.path.relpath(split[0], repo["root"])
# Don't add '.'
- if relpath != '.':
+ if relpath != ".":
# Don't add files outside the hgfs_root
- if not relpath.startswith('../'):
- ret.add(os.path.join(repo['mountpoint'], relpath))
- split = split[0].rsplit('/', 1)
- repo['repo'].close()
- if repo['mountpoint']:
- ret.add(repo['mountpoint'])
+ if not relpath.startswith("../"):
+ ret.add(os.path.join(repo["mountpoint"], relpath))
+ split = split[0].rsplit("/", 1)
+ repo["repo"].close()
+ if repo["mountpoint"]:
+ ret.add(repo["mountpoint"])
return sorted(ret)
diff --git a/salt/fileserver/minionfs.py b/salt/fileserver/minionfs.py
index 53603503a5e..1013981dcf6 100644
--- a/salt/fileserver/minionfs.py
+++ b/salt/fileserver/minionfs.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Fileserver backend which serves files pushed to the Master
The :mod:`cp.push ` function allows Minions to push files
@@ -26,12 +26,13 @@ Other minionfs settings include: :conf_master:`minionfs_whitelist`,
.. seealso:: :ref:`tutorial-minionfs`
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
+import logging
+
# Import python libs
import os
-import logging
# Import salt libs
import salt.fileserver
@@ -50,76 +51,74 @@ log = logging.getLogger(__name__)
# Define the module's virtual name
-__virtualname__ = 'minionfs'
+__virtualname__ = "minionfs"
def __virtual__():
- '''
+ """
Only load if file_recv is enabled
- '''
- if __virtualname__ not in __opts__['fileserver_backend']:
+ """
+ if __virtualname__ not in __opts__["fileserver_backend"]:
return False
- return __virtualname__ if __opts__['file_recv'] else False
+ return __virtualname__ if __opts__["file_recv"] else False
def _is_exposed(minion):
- '''
+ """
Check if the minion is exposed, based on the whitelist and blacklist
- '''
+ """
return salt.utils.stringutils.check_whitelist_blacklist(
minion,
- whitelist=__opts__['minionfs_whitelist'],
- blacklist=__opts__['minionfs_blacklist']
+ whitelist=__opts__["minionfs_whitelist"],
+ blacklist=__opts__["minionfs_blacklist"],
)
-def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
- '''
+def find_file(path, tgt_env="base", **kwargs): # pylint: disable=W0613
+ """
Search the environment for the relative path
- '''
- fnd = {'path': '', 'rel': ''}
+ """
+ fnd = {"path": "", "rel": ""}
if os.path.isabs(path):
return fnd
if tgt_env not in envs():
return fnd
- if os.path.basename(path) == 'top.sls':
+ if os.path.basename(path) == "top.sls":
log.debug(
- 'minionfs will NOT serve top.sls '
- 'for security reasons (path requested: %s)', path
+ "minionfs will NOT serve top.sls "
+ "for security reasons (path requested: %s)",
+ path,
)
return fnd
- mountpoint = salt.utils.url.strip_proto(__opts__['minionfs_mountpoint'])
+ mountpoint = salt.utils.url.strip_proto(__opts__["minionfs_mountpoint"])
# Remove the mountpoint to get the "true" path
- path = path[len(mountpoint):].lstrip(os.path.sep)
+ path = path[len(mountpoint) :].lstrip(os.path.sep)
try:
minion, pushed_file = path.split(os.sep, 1)
except ValueError:
return fnd
if not _is_exposed(minion):
return fnd
- full = os.path.join(
- __opts__['cachedir'], 'minions', minion, 'files', pushed_file
- )
- if os.path.isfile(full) \
- and not salt.fileserver.is_file_ignored(__opts__, full):
- fnd['path'] = full
- fnd['rel'] = path
- fnd['stat'] = list(os.stat(full))
+ full = os.path.join(__opts__["cachedir"], "minions", minion, "files", pushed_file)
+ if os.path.isfile(full) and not salt.fileserver.is_file_ignored(__opts__, full):
+ fnd["path"] = full
+ fnd["rel"] = path
+ fnd["stat"] = list(os.stat(full))
return fnd
return fnd
def envs():
- '''
+ """
Returns the one environment specified for minionfs in the master
configuration.
- '''
- return [__opts__['minionfs_env']]
+ """
+ return [__opts__["minionfs_env"]]
def serve_file(load, fnd):
- '''
+ """
Return a chunk from a file based on the data received
CLI Example:
@@ -129,54 +128,54 @@ def serve_file(load, fnd):
# Push the file to the master
$ salt 'source-minion' cp.push /path/to/the/file
$ salt 'destination-minion' cp.get_file salt://source-minion/path/to/the/file /destination/file
- '''
- ret = {'data': '', 'dest': ''}
- if not fnd['path']:
+ """
+ ret = {"data": "", "dest": ""}
+ if not fnd["path"]:
return ret
- ret['dest'] = fnd['rel']
- gzip = load.get('gzip', None)
- fpath = os.path.normpath(fnd['path'])
+ ret["dest"] = fnd["rel"]
+ gzip = load.get("gzip", None)
+ fpath = os.path.normpath(fnd["path"])
# AP
# May I sleep here to slow down serving of big files?
# How many threads are serving files?
- with salt.utils.files.fopen(fpath, 'rb') as fp_:
- fp_.seek(load['loc'])
- data = fp_.read(__opts__['file_buffer_size'])
+ with salt.utils.files.fopen(fpath, "rb") as fp_:
+ fp_.seek(load["loc"])
+ data = fp_.read(__opts__["file_buffer_size"])
if data and six.PY3 and not salt.utils.files.is_binary(fpath):
data = data.decode(__salt_system_encoding__)
if gzip and data:
data = salt.utils.gzip_util.compress(data, gzip)
- ret['gzip'] = gzip
- ret['data'] = data
+ ret["gzip"] = gzip
+ ret["data"] = data
return ret
def update():
- '''
+ """
When we are asked to update (regular interval) lets reap the cache
- '''
+ """
try:
salt.fileserver.reap_fileserver_cache_dir(
- os.path.join(__opts__['cachedir'], 'minionfs/hash'),
- find_file)
+ os.path.join(__opts__["cachedir"], "minionfs/hash"), find_file
+ )
except os.error:
# Hash file won't exist if no files have yet been served up
pass
def file_hash(load, fnd):
- '''
+ """
Return a file hash, the hash type is set in the master config file
- '''
- path = fnd['path']
+ """
+ path = fnd["path"]
ret = {}
- if 'env' in load:
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- if load['saltenv'] not in envs():
+ if load["saltenv"] not in envs():
return {}
# if the file doesn't exist, we can't get a hash
@@ -184,79 +183,80 @@ def file_hash(load, fnd):
return ret
# set the hash_type as it is determined by config-- so mechanism won't change that
- ret['hash_type'] = __opts__['hash_type']
+ ret["hash_type"] = __opts__["hash_type"]
# check if the hash is cached
# cache file's contents should be "hash:mtime"
cache_path = os.path.join(
- __opts__['cachedir'],
- 'minionfs',
- 'hash',
- load['saltenv'],
- '{0}.hash.{1}'.format(fnd['rel'], __opts__['hash_type'])
+ __opts__["cachedir"],
+ "minionfs",
+ "hash",
+ load["saltenv"],
+ "{0}.hash.{1}".format(fnd["rel"], __opts__["hash_type"]),
)
# if we have a cache, serve that if the mtime hasn't changed
if os.path.exists(cache_path):
try:
- with salt.utils.files.fopen(cache_path, 'rb') as fp_:
+ with salt.utils.files.fopen(cache_path, "rb") as fp_:
try:
- hsum, mtime = salt.utils.stringutils.to_unicode(fp_.read()).split(':')
+ hsum, mtime = salt.utils.stringutils.to_unicode(fp_.read()).split(
+ ":"
+ )
except ValueError:
log.debug(
- 'Fileserver attempted to read incomplete cache file. '
- 'Retrying.'
+ "Fileserver attempted to read incomplete cache file. "
+ "Retrying."
)
file_hash(load, fnd)
return ret
if os.path.getmtime(path) == mtime:
# check if mtime changed
- ret['hsum'] = hsum
+ ret["hsum"] = hsum
return ret
# Can't use Python select() because we need Windows support
except os.error:
log.debug(
- 'Fileserver encountered lock when reading cache file. '
- 'Retrying.'
+ "Fileserver encountered lock when reading cache file. " "Retrying."
)
file_hash(load, fnd)
return ret
# if we don't have a cache entry-- lets make one
- ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type'])
+ ret["hsum"] = salt.utils.hashutils.get_hash(path, __opts__["hash_type"])
cache_dir = os.path.dirname(cache_path)
# make cache directory if it doesn't exist
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# save the cache object "hash:mtime"
- cache_object = '{0}:{1}'.format(ret['hsum'], os.path.getmtime(path))
- with salt.utils.files.flopen(cache_path, 'w') as fp_:
+ cache_object = "{0}:{1}".format(ret["hsum"], os.path.getmtime(path))
+ with salt.utils.files.flopen(cache_path, "w") as fp_:
fp_.write(cache_object)
return ret
def file_list(load):
- '''
+ """
Return a list of all files on the file server in a specified environment
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- if load['saltenv'] not in envs():
+ if load["saltenv"] not in envs():
return []
- mountpoint = salt.utils.url.strip_proto(__opts__['minionfs_mountpoint'])
- prefix = load.get('prefix', '').strip('/')
+ mountpoint = salt.utils.url.strip_proto(__opts__["minionfs_mountpoint"])
+ prefix = load.get("prefix", "").strip("/")
if mountpoint and prefix.startswith(mountpoint + os.path.sep):
- prefix = prefix[len(mountpoint + os.path.sep):]
+ prefix = prefix[len(mountpoint + os.path.sep) :]
- minions_cache_dir = os.path.join(__opts__['cachedir'], 'minions')
+ minions_cache_dir = os.path.join(__opts__["cachedir"], "minions")
minion_dirs = os.listdir(minions_cache_dir)
# If the prefix is not an empty string, then get the minion id from it. The
# minion ID will be the part before the first slash, so if there is no
# slash, this is an invalid path.
if prefix:
- tgt_minion, _, prefix = prefix.partition('/')
+ tgt_minion, _, prefix = prefix.partition("/")
if not prefix:
# No minion ID in path
return []
@@ -264,8 +264,7 @@ def file_list(load):
# pushed files
if tgt_minion not in minion_dirs:
log.warning(
- 'No files found in minionfs cache for minion ID \'%s\'',
- tgt_minion
+ "No files found in minionfs cache for minion ID '%s'", tgt_minion
)
return []
minion_dirs = [tgt_minion]
@@ -274,11 +273,11 @@ def file_list(load):
for minion in minion_dirs:
if not _is_exposed(minion):
continue
- minion_files_dir = os.path.join(minions_cache_dir, minion, 'files')
+ minion_files_dir = os.path.join(minions_cache_dir, minion, "files")
if not os.path.isdir(minion_files_dir):
log.debug(
- 'minionfs: could not find files directory under %s!',
- os.path.join(minions_cache_dir, minion)
+ "minionfs: could not find files directory under %s!",
+ os.path.join(minions_cache_dir, minion),
)
continue
walk_dir = os.path.join(minion_files_dir, prefix)
@@ -288,10 +287,8 @@ def file_list(load):
# Ignore links for security reasons
if os.path.islink(os.path.join(root, fname)):
continue
- relpath = os.path.relpath(
- os.path.join(root, fname), minion_files_dir
- )
- if relpath.startswith('../'):
+ relpath = os.path.relpath(os.path.join(root, fname), minion_files_dir)
+ if relpath.startswith("../"):
continue
rel_fn = os.path.join(mountpoint, minion, relpath)
if not salt.fileserver.is_file_ignored(__opts__, rel_fn):
@@ -300,11 +297,11 @@ def file_list(load):
# There should be no emptydirs
-#def file_list_emptydirs(load):
+# def file_list_emptydirs(load):
def dir_list(load):
- '''
+ """
Return a list of all directories on the master
CLI Example:
@@ -316,26 +313,26 @@ def dir_list(load):
destination-minion:
- source-minion/absolute
- source-minion/absolute/path
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- if load['saltenv'] not in envs():
+ if load["saltenv"] not in envs():
return []
- mountpoint = salt.utils.url.strip_proto(__opts__['minionfs_mountpoint'])
- prefix = load.get('prefix', '').strip('/')
+ mountpoint = salt.utils.url.strip_proto(__opts__["minionfs_mountpoint"])
+ prefix = load.get("prefix", "").strip("/")
if mountpoint and prefix.startswith(mountpoint + os.path.sep):
- prefix = prefix[len(mountpoint + os.path.sep):]
+ prefix = prefix[len(mountpoint + os.path.sep) :]
- minions_cache_dir = os.path.join(__opts__['cachedir'], 'minions')
+ minions_cache_dir = os.path.join(__opts__["cachedir"], "minions")
minion_dirs = os.listdir(minions_cache_dir)
# If the prefix is not an empty string, then get the minion id from it. The
# minion ID will be the part before the first slash, so if there is no
# slash, this is an invalid path.
if prefix:
- tgt_minion, _, prefix = prefix.partition('/')
+ tgt_minion, _, prefix = prefix.partition("/")
if not prefix:
# No minion ID in path
return []
@@ -343,8 +340,7 @@ def dir_list(load):
# pushed files
if tgt_minion not in minion_dirs:
log.warning(
- 'No files found in minionfs cache for minion ID \'%s\'',
- tgt_minion
+ "No files found in minionfs cache for minion ID '%s'", tgt_minion
)
return []
minion_dirs = [tgt_minion]
@@ -353,11 +349,11 @@ def dir_list(load):
for minion in os.listdir(minions_cache_dir):
if not _is_exposed(minion):
continue
- minion_files_dir = os.path.join(minions_cache_dir, minion, 'files')
+ minion_files_dir = os.path.join(minions_cache_dir, minion, "files")
if not os.path.isdir(minion_files_dir):
log.warning(
- 'minionfs: could not find files directory under %s!',
- os.path.join(minions_cache_dir, minion)
+ "minionfs: could not find files directory under %s!",
+ os.path.join(minions_cache_dir, minion),
)
continue
walk_dir = os.path.join(minion_files_dir, prefix)
@@ -366,7 +362,7 @@ def dir_list(load):
relpath = os.path.relpath(root, minion_files_dir)
# Ensure that the current directory and directories outside of
# the minion dir do not end up in return list
- if relpath in ('.', '..') or relpath.startswith('../'):
+ if relpath in (".", "..") or relpath.startswith("../"):
continue
ret.append(os.path.join(mountpoint, minion, relpath))
return ret
diff --git a/salt/fileserver/roots.py b/salt/fileserver/roots.py
index 160e2d99a09..66227999b3d 100644
--- a/salt/fileserver/roots.py
+++ b/salt/fileserver/roots.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
The default file server backend
This fileserver backend serves files from the Master's local filesystem. If
@@ -14,13 +14,14 @@ be in the :conf_master:`fileserver_backend` list to enable this backend.
Fileserver environments are defined using the :conf_master:`file_roots`
configuration option.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
+import errno
+import logging
+
# Import python libs
import os
-import errno
-import logging
# Import salt libs
import salt.fileserver
@@ -37,28 +38,29 @@ from salt.ext import six
log = logging.getLogger(__name__)
-def find_file(path, saltenv='base', **kwargs):
- '''
+def find_file(path, saltenv="base", **kwargs):
+ """
Search the environment for the relative path.
- '''
- if 'env' in kwargs:
+ """
+ if "env" in kwargs:
# "env" is not supported; Use "saltenv".
- kwargs.pop('env')
+ kwargs.pop("env")
path = os.path.normpath(path)
- fnd = {'path': '',
- 'rel': ''}
+ fnd = {"path": "", "rel": ""}
if os.path.isabs(path):
return fnd
- if saltenv not in __opts__['file_roots']:
- if '__env__' in __opts__['file_roots']:
- log.debug("salt environment '%s' maps to __env__ file_roots directory", saltenv)
- saltenv = '__env__'
+ if saltenv not in __opts__["file_roots"]:
+ if "__env__" in __opts__["file_roots"]:
+ log.debug(
+ "salt environment '%s' maps to __env__ file_roots directory", saltenv
+ )
+ saltenv = "__env__"
else:
return fnd
def _add_file_stat(fnd):
- '''
+ """
Stat the file and, assuming no errors were found, convert the stat
result to a list of values and add to the return dict.
@@ -75,16 +77,16 @@ def find_file(path, saltenv='base', **kwargs):
7 => st_atime=1468284229
8 => st_mtime=1456338235
9 => st_ctime=1456338235
- '''
+ """
try:
- fnd['stat'] = list(os.stat(fnd['path']))
+ fnd["stat"] = list(os.stat(fnd["path"]))
except Exception: # pylint: disable=broad-except
pass
return fnd
- if 'index' in kwargs:
+ if "index" in kwargs:
try:
- root = __opts__['file_roots'][saltenv][int(kwargs['index'])]
+ root = __opts__["file_roots"][saltenv][int(kwargs["index"])]
except IndexError:
# An invalid index was passed
return fnd
@@ -93,141 +95,137 @@ def find_file(path, saltenv='base', **kwargs):
return fnd
full = os.path.join(root, path)
if os.path.isfile(full) and not salt.fileserver.is_file_ignored(__opts__, full):
- fnd['path'] = full
- fnd['rel'] = path
+ fnd["path"] = full
+ fnd["rel"] = path
return _add_file_stat(fnd)
return fnd
- for root in __opts__['file_roots'][saltenv]:
+ for root in __opts__["file_roots"][saltenv]:
full = os.path.join(root, path)
if os.path.isfile(full) and not salt.fileserver.is_file_ignored(__opts__, full):
- fnd['path'] = full
- fnd['rel'] = path
+ fnd["path"] = full
+ fnd["rel"] = path
return _add_file_stat(fnd)
return fnd
def envs():
- '''
+ """
Return the file server environments
- '''
- return sorted(__opts__['file_roots'])
+ """
+ return sorted(__opts__["file_roots"])
def serve_file(load, fnd):
- '''
+ """
Return a chunk from a file based on the data received
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- ret = {'data': '',
- 'dest': ''}
- if 'path' not in load or 'loc' not in load or 'saltenv' not in load:
+ ret = {"data": "", "dest": ""}
+ if "path" not in load or "loc" not in load or "saltenv" not in load:
return ret
- if not fnd['path']:
+ if not fnd["path"]:
return ret
- ret['dest'] = fnd['rel']
- gzip = load.get('gzip', None)
- fpath = os.path.normpath(fnd['path'])
- with salt.utils.files.fopen(fpath, 'rb') as fp_:
- fp_.seek(load['loc'])
- data = fp_.read(__opts__['file_buffer_size'])
+ ret["dest"] = fnd["rel"]
+ gzip = load.get("gzip", None)
+ fpath = os.path.normpath(fnd["path"])
+ with salt.utils.files.fopen(fpath, "rb") as fp_:
+ fp_.seek(load["loc"])
+ data = fp_.read(__opts__["file_buffer_size"])
if gzip and data:
data = salt.utils.gzip_util.compress(data, gzip)
- ret['gzip'] = gzip
- ret['data'] = data
+ ret["gzip"] = gzip
+ ret["data"] = data
return ret
def update():
- '''
+ """
When we are asked to update (regular interval) lets reap the cache
- '''
+ """
try:
salt.fileserver.reap_fileserver_cache_dir(
- os.path.join(__opts__['cachedir'], 'roots', 'hash'),
- find_file
+ os.path.join(__opts__["cachedir"], "roots", "hash"), find_file
)
except (IOError, OSError):
# Hash file won't exist if no files have yet been served up
pass
- mtime_map_path = os.path.join(__opts__['cachedir'], 'roots', 'mtime_map')
+ mtime_map_path = os.path.join(__opts__["cachedir"], "roots", "mtime_map")
# data to send on event
- data = {'changed': False,
- 'files': {'changed': []},
- 'backend': 'roots'}
+ data = {"changed": False, "files": {"changed": []}, "backend": "roots"}
# generate the new map
- new_mtime_map = salt.fileserver.generate_mtime_map(__opts__, __opts__['file_roots'])
+ new_mtime_map = salt.fileserver.generate_mtime_map(__opts__, __opts__["file_roots"])
old_mtime_map = {}
# if you have an old map, load that
if os.path.exists(mtime_map_path):
- with salt.utils.files.fopen(mtime_map_path, 'rb') as fp_:
+ with salt.utils.files.fopen(mtime_map_path, "rb") as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
try:
- file_path, mtime = line.replace('\n', '').split(':', 1)
+ file_path, mtime = line.replace("\n", "").split(":", 1)
old_mtime_map[file_path] = mtime
if mtime != new_mtime_map.get(file_path, mtime):
- data['files']['changed'].append(file_path)
+ data["files"]["changed"].append(file_path)
except ValueError:
# Document the invalid entry in the log
log.warning(
- 'Skipped invalid cache mtime entry in %s: %s',
- mtime_map_path, line
+ "Skipped invalid cache mtime entry in %s: %s",
+ mtime_map_path,
+ line,
)
# compare the maps, set changed to the return value
- data['changed'] = salt.fileserver.diff_mtime_map(old_mtime_map, new_mtime_map)
+ data["changed"] = salt.fileserver.diff_mtime_map(old_mtime_map, new_mtime_map)
# compute files that were removed and added
old_files = set(old_mtime_map.keys())
new_files = set(new_mtime_map.keys())
- data['files']['removed'] = list(old_files - new_files)
- data['files']['added'] = list(new_files - old_files)
+ data["files"]["removed"] = list(old_files - new_files)
+ data["files"]["added"] = list(new_files - old_files)
# write out the new map
mtime_map_path_dir = os.path.dirname(mtime_map_path)
if not os.path.exists(mtime_map_path_dir):
os.makedirs(mtime_map_path_dir)
- with salt.utils.files.fopen(mtime_map_path, 'wb') as fp_:
+ with salt.utils.files.fopen(mtime_map_path, "wb") as fp_:
for file_path, mtime in six.iteritems(new_mtime_map):
fp_.write(
- salt.utils.stringutils.to_bytes(
- '{0}:{1}\n'.format(file_path, mtime)
- )
+ salt.utils.stringutils.to_bytes("{0}:{1}\n".format(file_path, mtime))
)
- if __opts__.get('fileserver_events', False):
+ if __opts__.get("fileserver_events", False):
# if there is a change, fire an event
with salt.utils.event.get_event(
- 'master',
- __opts__['sock_dir'],
- __opts__['transport'],
- opts=__opts__,
- listen=False) as event:
+ "master",
+ __opts__["sock_dir"],
+ __opts__["transport"],
+ opts=__opts__,
+ listen=False,
+ ) as event:
event.fire_event(
- data,
- salt.utils.event.tagify(['roots', 'update'], prefix='fileserver'))
+ data, salt.utils.event.tagify(["roots", "update"], prefix="fileserver")
+ )
def file_hash(load, fnd):
- '''
+ """
Return a file hash, the hash type is set in the master config file
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- if 'path' not in load or 'saltenv' not in load:
- return ''
- path = fnd['path']
- saltenv = load['saltenv']
- if saltenv not in __opts__['file_roots'] and '__env__' in __opts__['file_roots']:
- saltenv = '__env__'
+ if "path" not in load or "saltenv" not in load:
+ return ""
+ path = fnd["path"]
+ saltenv = load["saltenv"]
+ if saltenv not in __opts__["file_roots"] and "__env__" in __opts__["file_roots"]:
+ saltenv = "__env__"
ret = {}
# if the file doesn't exist, we can't get a hash
@@ -235,24 +233,29 @@ def file_hash(load, fnd):
return ret
# set the hash_type as it is determined by config-- so mechanism won't change that
- ret['hash_type'] = __opts__['hash_type']
+ ret["hash_type"] = __opts__["hash_type"]
# check if the hash is cached
# cache file's contents should be "hash:mtime"
- cache_path = os.path.join(__opts__['cachedir'],
- 'roots',
- 'hash',
- saltenv,
- '{0}.hash.{1}'.format(fnd['rel'],
- __opts__['hash_type']))
+ cache_path = os.path.join(
+ __opts__["cachedir"],
+ "roots",
+ "hash",
+ saltenv,
+ "{0}.hash.{1}".format(fnd["rel"], __opts__["hash_type"]),
+ )
# if we have a cache, serve that if the mtime hasn't changed
if os.path.exists(cache_path):
try:
- with salt.utils.files.fopen(cache_path, 'rb') as fp_:
+ with salt.utils.files.fopen(cache_path, "rb") as fp_:
try:
- hsum, mtime = salt.utils.stringutils.to_unicode(fp_.read()).split(':')
+ hsum, mtime = salt.utils.stringutils.to_unicode(fp_.read()).split(
+ ":"
+ )
except ValueError:
- log.debug('Fileserver attempted to read incomplete cache file. Retrying.')
+ log.debug(
+ "Fileserver attempted to read incomplete cache file. Retrying."
+ )
# Delete the file since its incomplete (either corrupted or incomplete)
try:
os.unlink(cache_path)
@@ -261,9 +264,12 @@ def file_hash(load, fnd):
return file_hash(load, fnd)
if str(os.path.getmtime(path)) == mtime:
# check if mtime changed
- ret['hsum'] = hsum
+ ret["hsum"] = hsum
return ret
- except (os.error, IOError): # Can't use Python select() because we need Windows support
+ except (
+ os.error,
+ IOError,
+ ): # Can't use Python select() because we need Windows support
log.debug("Fileserver encountered lock when reading cache file. Retrying.")
# Delete the file since its incomplete (either corrupted or incomplete)
try:
@@ -273,7 +279,7 @@ def file_hash(load, fnd):
return file_hash(load, fnd)
# if we don't have a cache entry-- lets make one
- ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type'])
+ ret["hsum"] = salt.utils.hashutils.get_hash(path, __opts__["hash_type"])
cache_dir = os.path.dirname(cache_path)
# make cache directory if it doesn't exist
if not os.path.exists(cache_dir):
@@ -287,79 +293,79 @@ def file_hash(load, fnd):
else:
raise
# save the cache object "hash:mtime"
- cache_object = '{0}:{1}'.format(ret['hsum'], os.path.getmtime(path))
- with salt.utils.files.flopen(cache_path, 'w') as fp_:
+ cache_object = "{0}:{1}".format(ret["hsum"], os.path.getmtime(path))
+ with salt.utils.files.flopen(cache_path, "w") as fp_:
fp_.write(cache_object)
return ret
def _file_lists(load, form):
- '''
+ """
Return a dict containing the file lists for files, dirs, emtydirs and symlinks
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- saltenv = load['saltenv']
- if saltenv not in __opts__['file_roots']:
- if '__env__' in __opts__['file_roots']:
- log.debug("salt environment '%s' maps to __env__ file_roots directory", saltenv)
- saltenv = '__env__'
+ saltenv = load["saltenv"]
+ if saltenv not in __opts__["file_roots"]:
+ if "__env__" in __opts__["file_roots"]:
+ log.debug(
+ "salt environment '%s' maps to __env__ file_roots directory", saltenv
+ )
+ saltenv = "__env__"
else:
return []
- list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists', 'roots')
+ list_cachedir = os.path.join(__opts__["cachedir"], "file_lists", "roots")
if not os.path.isdir(list_cachedir):
try:
os.makedirs(list_cachedir)
except os.error:
- log.critical('Unable to make cachedir %s', list_cachedir)
+ log.critical("Unable to make cachedir %s", list_cachedir)
return []
- list_cache = os.path.join(list_cachedir, '{0}.p'.format(salt.utils.files.safe_filename_leaf(saltenv)))
- w_lock = os.path.join(list_cachedir, '.{0}.w'.format(salt.utils.files.safe_filename_leaf(saltenv)))
- cache_match, refresh_cache, save_cache = \
- salt.fileserver.check_file_list_cache(
- __opts__, form, list_cache, w_lock
- )
+ list_cache = os.path.join(
+ list_cachedir, "{0}.p".format(salt.utils.files.safe_filename_leaf(saltenv))
+ )
+ w_lock = os.path.join(
+ list_cachedir, ".{0}.w".format(salt.utils.files.safe_filename_leaf(saltenv))
+ )
+ cache_match, refresh_cache, save_cache = salt.fileserver.check_file_list_cache(
+ __opts__, form, list_cache, w_lock
+ )
if cache_match is not None:
return cache_match
if refresh_cache:
- ret = {
- 'files': set(),
- 'dirs': set(),
- 'empty_dirs': set(),
- 'links': {}
- }
+ ret = {"files": set(), "dirs": set(), "empty_dirs": set(), "links": {}}
def _add_to(tgt, fs_root, parent_dir, items):
- '''
+ """
Add the files to the target set
- '''
+ """
+
def _translate_sep(path):
- '''
+ """
Translate path separators for Windows masterless minions
- '''
- return path.replace('\\', '/') if os.path.sep == '\\' else path
+ """
+ return path.replace("\\", "/") if os.path.sep == "\\" else path
for item in items:
abs_path = os.path.join(parent_dir, item)
- log.trace('roots: Processing %s', abs_path)
+ log.trace("roots: Processing %s", abs_path)
is_link = salt.utils.path.islink(abs_path)
log.trace(
- 'roots: %s is %sa link',
- abs_path, 'not ' if not is_link else ''
+ "roots: %s is %sa link", abs_path, "not " if not is_link else ""
)
- if is_link and __opts__['fileserver_ignoresymlinks']:
+ if is_link and __opts__["fileserver_ignoresymlinks"]:
continue
rel_path = _translate_sep(os.path.relpath(abs_path, fs_root))
- log.trace('roots: %s relative path is %s', abs_path, rel_path)
+ log.trace("roots: %s relative path is %s", abs_path, rel_path)
if salt.fileserver.is_file_ignored(__opts__, rel_path):
continue
tgt.add(rel_path)
try:
if not os.listdir(abs_path):
- ret['empty_dirs'].add(rel_path)
+ ret["empty_dirs"].add(rel_path)
except Exception: # pylint: disable=broad-except
# Generic exception because running os.listdir() on a
# non-directory path raises an OSError on *NIX and a
@@ -368,57 +374,51 @@ def _file_lists(load, form):
if is_link:
link_dest = salt.utils.path.readlink(abs_path)
log.trace(
- 'roots: %s symlink destination is %s',
- abs_path, link_dest
+ "roots: %s symlink destination is %s", abs_path, link_dest
)
- if salt.utils.platform.is_windows() \
- and link_dest.startswith('\\\\'):
+ if salt.utils.platform.is_windows() and link_dest.startswith(
+ "\\\\"
+ ):
# Symlink points to a network path. Since you can't
# join UNC and non-UNC paths, just assume the original
# path.
log.trace(
- 'roots: %s is a UNC path, using %s instead',
- link_dest, abs_path
+ "roots: %s is a UNC path, using %s instead",
+ link_dest,
+ abs_path,
)
link_dest = abs_path
- if link_dest.startswith('..'):
+ if link_dest.startswith(".."):
joined = os.path.join(abs_path, link_dest)
else:
- joined = os.path.join(
- os.path.dirname(abs_path), link_dest
- )
+ joined = os.path.join(os.path.dirname(abs_path), link_dest)
rel_dest = _translate_sep(
os.path.relpath(
os.path.realpath(os.path.normpath(joined)),
- os.path.realpath(fs_root)
+ os.path.realpath(fs_root),
)
)
- log.trace(
- 'roots: %s relative path is %s',
- abs_path, rel_dest
- )
- if not rel_dest.startswith('..'):
+ log.trace("roots: %s relative path is %s", abs_path, rel_dest)
+ if not rel_dest.startswith(".."):
# Only count the link if it does not point
# outside of the root dir of the fileserver
# (i.e. the "path" variable)
- ret['links'][rel_path] = link_dest
+ ret["links"][rel_path] = link_dest
- for path in __opts__['file_roots'][saltenv]:
+ for path in __opts__["file_roots"][saltenv]:
for root, dirs, files in salt.utils.path.os_walk(
- path,
- followlinks=__opts__['fileserver_followsymlinks']):
- _add_to(ret['dirs'], path, root, dirs)
- _add_to(ret['files'], path, root, files)
+ path, followlinks=__opts__["fileserver_followsymlinks"]
+ ):
+ _add_to(ret["dirs"], path, root, dirs)
+ _add_to(ret["files"], path, root, files)
- ret['files'] = sorted(ret['files'])
- ret['dirs'] = sorted(ret['dirs'])
- ret['empty_dirs'] = sorted(ret['empty_dirs'])
+ ret["files"] = sorted(ret["files"])
+ ret["dirs"] = sorted(ret["dirs"])
+ ret["empty_dirs"] = sorted(ret["empty_dirs"])
if save_cache:
try:
- salt.fileserver.write_file_list_cache(
- __opts__, ret, list_cache, w_lock
- )
+ salt.fileserver.write_file_list_cache(__opts__, ret, list_cache, w_lock)
except NameError:
# Catch msgpack error in salt-ssh
pass
@@ -428,45 +428,48 @@ def _file_lists(load, form):
def file_list(load):
- '''
+ """
Return a list of all files on the file server in a specified
environment
- '''
- return _file_lists(load, 'files')
+ """
+ return _file_lists(load, "files")
def file_list_emptydirs(load):
- '''
+ """
Return a list of all empty directories on the master
- '''
- return _file_lists(load, 'empty_dirs')
+ """
+ return _file_lists(load, "empty_dirs")
def dir_list(load):
- '''
+ """
Return a list of all directories on the master
- '''
- return _file_lists(load, 'dirs')
+ """
+ return _file_lists(load, "dirs")
def symlink_list(load):
- '''
+ """
Return a dict of all symlinks based on a given path on the Master
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
ret = {}
- if load['saltenv'] not in __opts__['file_roots'] and '__env__' not in __opts__['file_roots']:
+ if (
+ load["saltenv"] not in __opts__["file_roots"]
+ and "__env__" not in __opts__["file_roots"]
+ ):
return ret
- if 'prefix' in load:
- prefix = load['prefix'].strip('/')
+ if "prefix" in load:
+ prefix = load["prefix"].strip("/")
else:
- prefix = ''
+ prefix = ""
- symlinks = _file_lists(load, 'links')
- return dict([(key, val)
- for key, val in six.iteritems(symlinks)
- if key.startswith(prefix)])
+ symlinks = _file_lists(load, "links")
+ return dict(
+ [(key, val) for key, val in six.iteritems(symlinks) if key.startswith(prefix)]
+ )
diff --git a/salt/fileserver/s3fs.py b/salt/fileserver/s3fs.py
index 09f1b4e227e..6b94b0c4d32 100644
--- a/salt/fileserver/s3fs.py
+++ b/salt/fileserver/s3fs.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Amazon S3 Fileserver Backend
.. versionadded:: 0.16.0
@@ -76,15 +76,16 @@ structure::
More info here:
https://docs.aws.amazon.com/cli/latest/topic/s3-config.html
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import datetime
-import os
-import time
-import pickle
import logging
+import os
+import pickle
+import time
# Import salt libs
import salt.fileserver as fs
@@ -99,6 +100,7 @@ import salt.utils.versions
from salt.ext import six
from salt.ext.six.moves import filter
from salt.ext.six.moves.urllib.parse import quote as _quote
+
# pylint: enable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
@@ -108,10 +110,10 @@ S3_SYNC_ON_UPDATE = True # sync cache on update rather than jit
def envs():
- '''
+ """
Return a list of directories within the bucket that can be
used as environments.
- '''
+ """
# update and grab the envs from the metadata keys
metadata = _init()
@@ -119,40 +121,43 @@ def envs():
def update():
- '''
+ """
Update the cache file for the bucket.
- '''
+ """
metadata = _init()
if S3_SYNC_ON_UPDATE:
# sync the buckets to the local cache
- log.info('Syncing local cache from S3...')
+ log.info("Syncing local cache from S3...")
for saltenv, env_meta in six.iteritems(metadata):
for bucket_files in _find_files(env_meta):
for bucket, files in six.iteritems(bucket_files):
for file_path in files:
- cached_file_path = _get_cached_file_name(bucket, saltenv, file_path)
- log.info('%s - %s : %s', bucket, saltenv, file_path)
+ cached_file_path = _get_cached_file_name(
+ bucket, saltenv, file_path
+ )
+ log.info("%s - %s : %s", bucket, saltenv, file_path)
# load the file from S3 if it's not in the cache or it's old
- _get_file_from_s3(metadata, saltenv, bucket, file_path, cached_file_path)
+ _get_file_from_s3(
+ metadata, saltenv, bucket, file_path, cached_file_path
+ )
- log.info('Sync local cache from S3 completed.')
+ log.info("Sync local cache from S3 completed.")
-def find_file(path, saltenv='base', **kwargs):
- '''
+def find_file(path, saltenv="base", **kwargs):
+ """
Look through the buckets cache file for a match.
If the field is found, it is retrieved from S3 only if its cached version
is missing, or if the MD5 does not match.
- '''
- if 'env' in kwargs:
+ """
+ if "env" in kwargs:
# "env" is not supported; Use "saltenv".
- kwargs.pop('env')
+ kwargs.pop("env")
- fnd = {'bucket': None,
- 'path': None}
+ fnd = {"bucket": None, "path": None}
metadata = _init()
if not metadata or saltenv not in metadata:
@@ -167,105 +172,102 @@ def find_file(path, saltenv='base', **kwargs):
for bucket in env_files:
for bucket_name, files in six.iteritems(bucket):
if path in files and not fs.is_file_ignored(__opts__, path):
- fnd['bucket'] = bucket_name
- fnd['path'] = path
+ fnd["bucket"] = bucket_name
+ fnd["path"] = path
break
else:
continue # only executes if we didn't break
break
- if not fnd['path'] or not fnd['bucket']:
+ if not fnd["path"] or not fnd["bucket"]:
return fnd
- cached_file_path = _get_cached_file_name(fnd['bucket'], saltenv, path)
+ cached_file_path = _get_cached_file_name(fnd["bucket"], saltenv, path)
# jit load the file from S3 if it's not in the cache or it's old
- _get_file_from_s3(metadata, saltenv, fnd['bucket'], path, cached_file_path)
+ _get_file_from_s3(metadata, saltenv, fnd["bucket"], path, cached_file_path)
return fnd
def file_hash(load, fnd):
- '''
+ """
Return an MD5 file hash
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
ret = {}
- if 'saltenv' not in load:
+ if "saltenv" not in load:
return ret
- if 'path' not in fnd or 'bucket' not in fnd or not fnd['path']:
+ if "path" not in fnd or "bucket" not in fnd or not fnd["path"]:
return ret
cached_file_path = _get_cached_file_name(
- fnd['bucket'],
- load['saltenv'],
- fnd['path'])
+ fnd["bucket"], load["saltenv"], fnd["path"]
+ )
if os.path.isfile(cached_file_path):
- ret['hsum'] = salt.utils.hashutils.get_hash(cached_file_path)
- ret['hash_type'] = 'md5'
+ ret["hsum"] = salt.utils.hashutils.get_hash(cached_file_path)
+ ret["hash_type"] = "md5"
return ret
def serve_file(load, fnd):
- '''
+ """
Return a chunk from a file based on the data received
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- ret = {'data': '',
- 'dest': ''}
+ ret = {"data": "", "dest": ""}
- if 'path' not in load or 'loc' not in load or 'saltenv' not in load:
+ if "path" not in load or "loc" not in load or "saltenv" not in load:
return ret
- if 'path' not in fnd or 'bucket' not in fnd:
+ if "path" not in fnd or "bucket" not in fnd:
return ret
- gzip = load.get('gzip', None)
+ gzip = load.get("gzip", None)
# get the saltenv/path file from the cache
cached_file_path = _get_cached_file_name(
- fnd['bucket'],
- load['saltenv'],
- fnd['path'])
+ fnd["bucket"], load["saltenv"], fnd["path"]
+ )
- ret['dest'] = _trim_env_off_path([fnd['path']], load['saltenv'])[0]
+ ret["dest"] = _trim_env_off_path([fnd["path"]], load["saltenv"])[0]
- with salt.utils.files.fopen(cached_file_path, 'rb') as fp_:
- fp_.seek(load['loc'])
- data = fp_.read(__opts__['file_buffer_size'])
+ with salt.utils.files.fopen(cached_file_path, "rb") as fp_:
+ fp_.seek(load["loc"])
+ data = fp_.read(__opts__["file_buffer_size"])
if data and six.PY3 and not salt.utils.files.is_binary(cached_file_path):
data = data.decode(__salt_system_encoding__)
if gzip and data:
data = salt.utils.gzip_util.compress(data, gzip)
- ret['gzip'] = gzip
- ret['data'] = data
+ ret["gzip"] = gzip
+ ret["data"] = data
return ret
def file_list(load):
- '''
+ """
Return a list of all files on the file server in a specified environment
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
ret = []
- if 'saltenv' not in load:
+ if "saltenv" not in load:
return ret
- saltenv = load['saltenv']
+ saltenv = load["saltenv"]
metadata = _init()
if not metadata or saltenv not in metadata:
@@ -279,9 +281,9 @@ def file_list(load):
def file_list_emptydirs(load):
- '''
+ """
Return a list of all empty directories on the master
- '''
+ """
# TODO - implement this
_init()
@@ -289,19 +291,19 @@ def file_list_emptydirs(load):
def dir_list(load):
- '''
+ """
Return a list of all directories on the master
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
ret = []
- if 'saltenv' not in load:
+ if "saltenv" not in load:
return ret
- saltenv = load['saltenv']
+ saltenv = load["saltenv"]
metadata = _init()
if not metadata or saltenv not in metadata:
@@ -319,37 +321,38 @@ def dir_list(load):
def _get_s3_key():
- '''
+ """
Get AWS keys from pillar or config
- '''
+ """
- key = __opts__['s3.key'] if 's3.key' in __opts__ else None
- keyid = __opts__['s3.keyid'] if 's3.keyid' in __opts__ else None
- service_url = __opts__['s3.service_url'] \
- if 's3.service_url' in __opts__ \
- else None
- verify_ssl = __opts__['s3.verify_ssl'] \
- if 's3.verify_ssl' in __opts__ \
- else None
- kms_keyid = __opts__['aws.kmw.keyid'] if 'aws.kms.keyid' in __opts__ else None
- location = __opts__['s3.location'] \
- if 's3.location' in __opts__ \
- else None
- path_style = __opts__['s3.path_style'] \
- if 's3.path_style' in __opts__ \
- else None
- https_enable = __opts__['s3.https_enable'] \
- if 's3.https_enable' in __opts__ \
- else None
+ key = __opts__["s3.key"] if "s3.key" in __opts__ else None
+ keyid = __opts__["s3.keyid"] if "s3.keyid" in __opts__ else None
+ service_url = __opts__["s3.service_url"] if "s3.service_url" in __opts__ else None
+ verify_ssl = __opts__["s3.verify_ssl"] if "s3.verify_ssl" in __opts__ else None
+ kms_keyid = __opts__["aws.kmw.keyid"] if "aws.kms.keyid" in __opts__ else None
+ location = __opts__["s3.location"] if "s3.location" in __opts__ else None
+ path_style = __opts__["s3.path_style"] if "s3.path_style" in __opts__ else None
+ https_enable = (
+ __opts__["s3.https_enable"] if "s3.https_enable" in __opts__ else None
+ )
- return key, keyid, service_url, verify_ssl, kms_keyid, location, path_style, https_enable
+ return (
+ key,
+ keyid,
+ service_url,
+ verify_ssl,
+ kms_keyid,
+ location,
+ path_style,
+ https_enable,
+ )
def _init():
- '''
+ """
Connect to S3 and download the metadata for each file in all buckets
specified and cache the data to disk.
- '''
+ """
cache_file = _get_buckets_cache_filename()
exp = time.time() - S3_CACHE_EXPIRE
@@ -369,18 +372,18 @@ def _init():
def _get_cache_dir():
- '''
+ """
Return the path to the s3cache dir
- '''
+ """
# Or is that making too many assumptions?
- return os.path.join(__opts__['cachedir'], 's3cache')
+ return os.path.join(__opts__["cachedir"], "s3cache")
def _get_cached_file_name(bucket_name, saltenv, path):
- '''
+ """
Return the cached file name for a bucket path file
- '''
+ """
file_path = os.path.join(_get_cache_dir(), saltenv, bucket_name, path)
@@ -392,53 +395,66 @@ def _get_cached_file_name(bucket_name, saltenv, path):
def _get_buckets_cache_filename():
- '''
+ """
Return the filename of the cache for bucket contents.
Create the path if it does not exist.
- '''
+ """
cache_dir = _get_cache_dir()
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
- return os.path.join(cache_dir, 'buckets_files.cache')
+ return os.path.join(cache_dir, "buckets_files.cache")
def _refresh_buckets_cache_file(cache_file):
- '''
+ """
Retrieve the content of all buckets and cache the metadata to the buckets
cache file
- '''
+ """
- log.debug('Refreshing buckets cache file')
+ log.debug("Refreshing buckets cache file")
- key, keyid, service_url, verify_ssl, kms_keyid, location, path_style, https_enable = _get_s3_key()
+ (
+ key,
+ keyid,
+ service_url,
+ verify_ssl,
+ kms_keyid,
+ location,
+ path_style,
+ https_enable,
+ ) = _get_s3_key()
metadata = {}
# helper s3 query function
def __get_s3_meta(bucket, key=key, keyid=keyid):
- ret, marker = [], ''
+ ret, marker = [], ""
while True:
- tmp = __utils__['s3.query'](key=key,
- keyid=keyid,
- kms_keyid=keyid,
- bucket=bucket,
- service_url=service_url,
- verify_ssl=verify_ssl,
- location=location,
- return_bin=False,
- path_style=path_style,
- https_enable=https_enable,
- params={'marker': marker})
+ tmp = __utils__["s3.query"](
+ key=key,
+ keyid=keyid,
+ kms_keyid=keyid,
+ bucket=bucket,
+ service_url=service_url,
+ verify_ssl=verify_ssl,
+ location=location,
+ return_bin=False,
+ path_style=path_style,
+ https_enable=https_enable,
+ params={"marker": marker},
+ )
headers = []
for header in tmp:
- if 'Key' in header:
+ if "Key" in header:
break
headers.append(header)
ret.extend(tmp)
- if all([header.get('IsTruncated', 'false') == 'false' for header in headers]):
+ if all(
+ [header.get("IsTruncated", "false") == "false" for header in headers]
+ ):
break
- marker = tmp[-1]['Key']
+ marker = tmp[-1]["Key"]
return ret
if _is_env_per_bucket():
@@ -454,32 +470,37 @@ def _refresh_buckets_cache_file(cache_file):
continue
# grab only the files/dirs
- bucket_files[bucket_name] = [k for k in s3_meta if 'Key' in k]
+ bucket_files[bucket_name] = [k for k in s3_meta if "Key" in k]
bucket_files_list.append(bucket_files)
# check to see if we added any keys, otherwise investigate possible error conditions
if not bucket_files[bucket_name]:
meta_response = {}
for k in s3_meta:
- if 'Code' in k or 'Message' in k:
+ if "Code" in k or "Message" in k:
# assumes no duplicate keys, consisdent with current erro response.
meta_response.update(k)
# attempt use of human readable output first.
try:
- log.warning("'%s' response for bucket '%s'", meta_response['Message'], bucket_name)
+ log.warning(
+ "'%s' response for bucket '%s'",
+ meta_response["Message"],
+ bucket_name,
+ )
continue
except KeyError:
# no human readable error message provided
- if 'Code' in meta_response:
+ if "Code" in meta_response:
log.warning(
"'%s' response for bucket '%s'",
- meta_response['Code'], bucket_name
+ meta_response["Code"],
+ bucket_name,
)
continue
else:
log.warning(
- 'S3 Error! Do you have any files '
- 'in your S3 bucket?')
+ "S3 Error! Do you have any files " "in your S3 bucket?"
+ )
return {}
metadata[saltenv] = bucket_files_list
@@ -494,40 +515,45 @@ def _refresh_buckets_cache_file(cache_file):
continue
# pull out the environment dirs (e.g. the root dirs)
- files = [k for k in s3_meta if 'Key' in k]
+ files = [k for k in s3_meta if "Key" in k]
# check to see if we added any keys, otherwise investigate possible error conditions
if not files:
meta_response = {}
for k in s3_meta:
- if 'Code' in k or 'Message' in k:
+ if "Code" in k or "Message" in k:
# assumes no duplicate keys, consisdent with current erro response.
meta_response.update(k)
# attempt use of human readable output first.
try:
- log.warning("'%s' response for bucket '%s'", meta_response['Message'], bucket_name)
+ log.warning(
+ "'%s' response for bucket '%s'",
+ meta_response["Message"],
+ bucket_name,
+ )
continue
except KeyError:
# no human readable error message provided
- if 'Code' in meta_response:
+ if "Code" in meta_response:
log.warning(
"'%s' response for bucket '%s'",
- meta_response['Code'], bucket_name
+ meta_response["Code"],
+ bucket_name,
)
continue
else:
log.warning(
- 'S3 Error! Do you have any files '
- 'in your S3 bucket?')
+ "S3 Error! Do you have any files " "in your S3 bucket?"
+ )
return {}
- environments = [(os.path.dirname(k['Key']).split('/', 1))[0] for k in files]
+ environments = [(os.path.dirname(k["Key"]).split("/", 1))[0] for k in files]
environments = set(environments)
# pull out the files for the environment
for saltenv in environments:
# grab only files/dirs that match this saltenv
- env_files = [k for k in files if k['Key'].startswith(saltenv)]
+ env_files = [k for k in files if k["Key"].startswith(saltenv)]
if saltenv not in metadata:
metadata[saltenv] = []
@@ -545,43 +571,49 @@ def _refresh_buckets_cache_file(cache_file):
if os.path.isfile(cache_file):
os.remove(cache_file)
- log.debug('Writing buckets cache file')
+ log.debug("Writing buckets cache file")
- with salt.utils.files.fopen(cache_file, 'w') as fp_:
+ with salt.utils.files.fopen(cache_file, "w") as fp_:
pickle.dump(metadata, fp_)
return metadata
def _read_buckets_cache_file(cache_file):
- '''
+ """
Return the contents of the buckets cache file
- '''
+ """
- log.debug('Reading buckets cache file')
+ log.debug("Reading buckets cache file")
- with salt.utils.files.fopen(cache_file, 'rb') as fp_:
+ with salt.utils.files.fopen(cache_file, "rb") as fp_:
try:
data = pickle.load(fp_)
- except (pickle.UnpicklingError, AttributeError, EOFError, ImportError,
- IndexError, KeyError):
+ except (
+ pickle.UnpicklingError,
+ AttributeError,
+ EOFError,
+ ImportError,
+ IndexError,
+ KeyError,
+ ):
data = None
return data
def _find_files(metadata):
- '''
+ """
Looks for all the files in the S3 bucket cache metadata
- '''
+ """
ret = []
found = {}
for bucket_dict in metadata:
for bucket_name, data in six.iteritems(bucket_dict):
- filepaths = [k['Key'] for k in data]
- filepaths = [k for k in filepaths if not k.endswith('/')]
+ filepaths = [k["Key"] for k in data]
+ filepaths = [k for k in filepaths if not k.endswith("/")]
if bucket_name not in found:
found[bucket_name] = True
ret.append({bucket_name: filepaths})
@@ -594,12 +626,12 @@ def _find_files(metadata):
def _find_dirs(metadata):
- '''
+ """
Looks for all the directories in the S3 bucket cache metadata.
Supports trailing '/' keys (as created by S3 console) as well as
directories discovered in the path of file keys.
- '''
+ """
ret = []
found = {}
@@ -607,10 +639,10 @@ def _find_dirs(metadata):
for bucket_dict in metadata:
for bucket_name, data in six.iteritems(bucket_dict):
dirpaths = set()
- for path in [k['Key'] for k in data]:
- prefix = ''
- for part in path.split('/')[:-1]:
- directory = prefix + part + '/'
+ for path in [k["Key"] for k in data]:
+ prefix = ""
+ for part in path.split("/")[:-1]:
+ directory = prefix + part + "/"
dirpaths.add(directory)
prefix = directory
if bucket_name not in found:
@@ -626,50 +658,59 @@ def _find_dirs(metadata):
def _find_file_meta(metadata, bucket_name, saltenv, path):
- '''
+ """
Looks for a file's metadata in the S3 bucket cache file
- '''
+ """
env_meta = metadata[saltenv] if saltenv in metadata else {}
bucket_meta = {}
for bucket in env_meta:
if bucket_name in bucket:
bucket_meta = bucket[bucket_name]
- files_meta = list(list(filter((lambda k: 'Key' in k), bucket_meta)))
+ files_meta = list(list(filter((lambda k: "Key" in k), bucket_meta)))
for item_meta in files_meta:
- if 'Key' in item_meta and item_meta['Key'] == path:
+ if "Key" in item_meta and item_meta["Key"] == path:
try:
# Get rid of quotes surrounding md5
- item_meta['ETag'] = item_meta['ETag'].strip('"')
+ item_meta["ETag"] = item_meta["ETag"].strip('"')
except KeyError:
pass
return item_meta
def _get_buckets():
- '''
+ """
Return the configuration buckets
- '''
+ """
- return __opts__['s3.buckets'] if 's3.buckets' in __opts__ else {}
+ return __opts__["s3.buckets"] if "s3.buckets" in __opts__ else {}
def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
- '''
+ """
Checks the local cache for the file, if it's old or missing go grab the
file from S3 and update the cache
- '''
- key, keyid, service_url, verify_ssl, kms_keyid, location, path_style, https_enable = _get_s3_key()
+ """
+ (
+ key,
+ keyid,
+ service_url,
+ verify_ssl,
+ kms_keyid,
+ location,
+ path_style,
+ https_enable,
+ ) = _get_s3_key()
# check the local cache...
if os.path.isfile(cached_file_path):
file_meta = _find_file_meta(metadata, bucket_name, saltenv, path)
if file_meta:
- file_etag = file_meta['ETag']
+ file_etag = file_meta["ETag"]
- if file_etag.find('-') == -1:
+ if file_etag.find("-") == -1:
file_md5 = file_etag
- cached_md5 = salt.utils.hashutils.get_hash(cached_file_path, 'md5')
+ cached_md5 = salt.utils.hashutils.get_hash(cached_file_path, "md5")
# hashes match we have a cache hit
if cached_md5 == file_md5:
@@ -678,20 +719,26 @@ def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
cached_file_stat = os.stat(cached_file_path)
cached_file_size = cached_file_stat.st_size
cached_file_mtime = datetime.datetime.fromtimestamp(
- cached_file_stat.st_mtime)
+ cached_file_stat.st_mtime
+ )
cached_file_lastmod = datetime.datetime.strptime(
- file_meta['LastModified'], '%Y-%m-%dT%H:%M:%S.%fZ')
- if (cached_file_size == int(file_meta['Size']) and
- cached_file_mtime > cached_file_lastmod):
- log.debug('cached file size equal to metadata size and '
- 'cached file mtime later than metadata last '
- 'modification time.')
- ret = __utils__['s3.query'](
+ file_meta["LastModified"], "%Y-%m-%dT%H:%M:%S.%fZ"
+ )
+ if (
+ cached_file_size == int(file_meta["Size"])
+ and cached_file_mtime > cached_file_lastmod
+ ):
+ log.debug(
+ "cached file size equal to metadata size and "
+ "cached file mtime later than metadata last "
+ "modification time."
+ )
+ ret = __utils__["s3.query"](
key=key,
keyid=keyid,
kms_keyid=keyid,
- method='HEAD',
+ method="HEAD",
bucket=bucket_name,
service_url=service_url,
verify_ssl=verify_ssl,
@@ -700,28 +747,33 @@ def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
local_file=cached_file_path,
full_headers=True,
path_style=path_style,
- https_enable=https_enable
+ https_enable=https_enable,
)
if ret is not None:
- for header_name, header_value in ret['headers'].items():
+ for header_name, header_value in ret["headers"].items():
name = header_name.strip()
value = header_value.strip()
- if six.text_type(name).lower() == 'last-modified':
+ if six.text_type(name).lower() == "last-modified":
s3_file_mtime = datetime.datetime.strptime(
- value, '%a, %d %b %Y %H:%M:%S %Z')
- elif six.text_type(name).lower() == 'content-length':
+ value, "%a, %d %b %Y %H:%M:%S %Z"
+ )
+ elif six.text_type(name).lower() == "content-length":
s3_file_size = int(value)
- if (cached_file_size == s3_file_size and
- cached_file_mtime > s3_file_mtime):
+ if (
+ cached_file_size == s3_file_size
+ and cached_file_mtime > s3_file_mtime
+ ):
log.info(
- '%s - %s : %s skipped download since cached file size '
- 'equal to and mtime after s3 values',
- bucket_name, saltenv, path
+ "%s - %s : %s skipped download since cached file size "
+ "equal to and mtime after s3 values",
+ bucket_name,
+ saltenv,
+ path,
)
return
# ... or get the file from S3
- __utils__['s3.query'](
+ __utils__["s3.query"](
key=key,
keyid=keyid,
kms_keyid=keyid,
@@ -737,9 +789,9 @@ def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
def _trim_env_off_path(paths, saltenv, trim_slash=False):
- '''
+ """
Return a list of file paths with the saltenv directory removed
- '''
+ """
env_len = None if _is_env_per_bucket() else len(saltenv) + 1
slash_len = -1 if trim_slash else None
@@ -747,10 +799,10 @@ def _trim_env_off_path(paths, saltenv, trim_slash=False):
def _is_env_per_bucket():
- '''
+ """
Return the configuration mode, either buckets per environment or a list of
buckets that have environment dirs in their root
- '''
+ """
buckets = _get_buckets()
if isinstance(buckets, dict):
@@ -758,4 +810,4 @@ def _is_env_per_bucket():
elif isinstance(buckets, list):
return False
else:
- raise ValueError('Incorrect s3.buckets type given in config')
+ raise ValueError("Incorrect s3.buckets type given in config")
diff --git a/salt/fileserver/svnfs.py b/salt/fileserver/svnfs.py
index 19de8a844c1..fec88572a8d 100644
--- a/salt/fileserver/svnfs.py
+++ b/salt/fileserver/svnfs.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Subversion Fileserver Backend
After enabling this backend, branches and tags in a remote subversion
@@ -29,10 +29,11 @@ This backend assumes a standard svn layout with directories for ``branches``,
:conf_master:`svnfs_mountpoint` was also added. Finally, support for
per-remote configuration parameters was added. See the
:conf_master:`documentation ` for more information.
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import copy
import errno
import fnmatch
@@ -41,21 +42,8 @@ import logging
import os
import shutil
from datetime import datetime
-from salt.exceptions import FileserverConfigError
-PER_REMOTE_OVERRIDES = ('mountpoint', 'root', 'trunk', 'branches', 'tags')
-
-# Import third party libs
-from salt.ext import six
-# pylint: disable=import-error
-HAS_SVN = False
-try:
- import pysvn
- HAS_SVN = True
- CLIENT = pysvn.Client()
-except ImportError:
- pass
-# pylint: enable=import-error
+import salt.fileserver
# Import salt libs
import salt.utils.data
@@ -66,105 +54,128 @@ import salt.utils.path
import salt.utils.stringutils
import salt.utils.url
import salt.utils.versions
-import salt.fileserver
+from salt.exceptions import FileserverConfigError
+
+# Import third party libs
+from salt.ext import six
from salt.utils.event import tagify
+PER_REMOTE_OVERRIDES = ("mountpoint", "root", "trunk", "branches", "tags")
+
+
+# pylint: disable=import-error
+HAS_SVN = False
+try:
+ import pysvn
+
+ HAS_SVN = True
+ CLIENT = pysvn.Client()
+except ImportError:
+ pass
+# pylint: enable=import-error
+
+
log = logging.getLogger(__name__)
# Define the module's virtual name
-__virtualname__ = 'svn'
+__virtualname__ = "svn"
def __virtual__():
- '''
+ """
Only load if subversion is available
- '''
- if __virtualname__ not in __opts__['fileserver_backend']:
+ """
+ if __virtualname__ not in __opts__["fileserver_backend"]:
return False
if not HAS_SVN:
- log.error('Subversion fileserver backend is enabled in configuration '
- 'but could not be loaded, is pysvn installed?')
+ log.error(
+ "Subversion fileserver backend is enabled in configuration "
+ "but could not be loaded, is pysvn installed?"
+ )
return False
errors = []
- for param in ('svnfs_trunk', 'svnfs_branches', 'svnfs_tags'):
+ for param in ("svnfs_trunk", "svnfs_branches", "svnfs_tags"):
if os.path.isabs(__opts__[param]):
errors.append(
- 'Master configuration parameter \'{0}\' (value: {1}) cannot '
- 'be an absolute path'.format(param, __opts__[param])
+ "Master configuration parameter '{0}' (value: {1}) cannot "
+ "be an absolute path".format(param, __opts__[param])
)
if errors:
for error in errors:
log.error(error)
- log.error('Subversion fileserver backed will be disabled')
+ log.error("Subversion fileserver backed will be disabled")
return False
return __virtualname__
def _rev(repo):
- '''
+ """
Returns revision ID of repo
- '''
+ """
try:
- repo_info = dict(six.iteritems(CLIENT.info(repo['repo'])))
- except (pysvn._pysvn.ClientError, TypeError,
- KeyError, AttributeError) as exc:
+ repo_info = dict(six.iteritems(CLIENT.info(repo["repo"])))
+ except (pysvn._pysvn.ClientError, TypeError, KeyError, AttributeError) as exc:
log.error(
- 'Error retrieving revision ID for svnfs remote %s '
- '(cachedir: %s): %s',
- repo['url'], repo['repo'], exc
+ "Error retrieving revision ID for svnfs remote %s " "(cachedir: %s): %s",
+ repo["url"],
+ repo["repo"],
+ exc,
)
else:
- return repo_info['revision'].number
+ return repo_info["revision"].number
return None
def _failhard():
- '''
+ """
Fatal fileserver configuration issue, raise an exception
- '''
- raise FileserverConfigError(
- 'Failed to load svn fileserver backend'
- )
+ """
+ raise FileserverConfigError("Failed to load svn fileserver backend")
def init():
- '''
+ """
Return the list of svn remotes and their configuration information
- '''
- bp_ = os.path.join(__opts__['cachedir'], 'svnfs')
+ """
+ bp_ = os.path.join(__opts__["cachedir"], "svnfs")
new_remote = False
repos = []
per_remote_defaults = {}
for param in PER_REMOTE_OVERRIDES:
- per_remote_defaults[param] = \
- six.text_type(__opts__['svnfs_{0}'.format(param)])
+ per_remote_defaults[param] = six.text_type(__opts__["svnfs_{0}".format(param)])
- for remote in __opts__['svnfs_remotes']:
+ for remote in __opts__["svnfs_remotes"]:
repo_conf = copy.deepcopy(per_remote_defaults)
if isinstance(remote, dict):
repo_url = next(iter(remote))
per_remote_conf = dict(
- [(key, six.text_type(val)) for key, val in
- six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))]
+ [
+ (key, six.text_type(val))
+ for key, val in six.iteritems(
+ salt.utils.data.repack_dictlist(remote[repo_url])
+ )
+ ]
)
if not per_remote_conf:
log.error(
- 'Invalid per-remote configuration for remote %s. If no '
- 'per-remote parameters are being specified, there may be '
- 'a trailing colon after the URL, which should be removed. '
- 'Check the master configuration file.', repo_url
+ "Invalid per-remote configuration for remote %s. If no "
+ "per-remote parameters are being specified, there may be "
+ "a trailing colon after the URL, which should be removed. "
+ "Check the master configuration file.",
+ repo_url,
)
_failhard()
per_remote_errors = False
- for param in (x for x in per_remote_conf
- if x not in PER_REMOTE_OVERRIDES):
+ for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES):
log.error(
- 'Invalid configuration parameter \'%s\' for remote %s. '
- 'Valid parameters are: %s. See the documentation for '
- 'further information.',
- param, repo_url, ', '.join(PER_REMOTE_OVERRIDES)
+ "Invalid configuration parameter '%s' for remote %s. "
+ "Valid parameters are: %s. See the documentation for "
+ "further information.",
+ param,
+ repo_url,
+ ", ".join(PER_REMOTE_OVERRIDES),
)
per_remote_errors = True
if per_remote_errors:
@@ -176,20 +187,21 @@ def init():
if not isinstance(repo_url, six.string_types):
log.error(
- 'Invalid svnfs remote %s. Remotes must be strings, you may '
- 'need to enclose the URL in quotes', repo_url
+ "Invalid svnfs remote %s. Remotes must be strings, you may "
+ "need to enclose the URL in quotes",
+ repo_url,
)
_failhard()
try:
- repo_conf['mountpoint'] = salt.utils.url.strip_proto(
- repo_conf['mountpoint']
+ repo_conf["mountpoint"] = salt.utils.url.strip_proto(
+ repo_conf["mountpoint"]
)
except TypeError:
# mountpoint not specified
pass
- hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5'))
+ hash_type = getattr(hashlib, __opts__.get("hash_type", "md5"))
repo_hash = hash_type(repo_url).hexdigest()
rp_ = os.path.join(bp_, repo_hash)
if not os.path.isdir(rp_):
@@ -202,10 +214,7 @@ def init():
repos.append(rp_)
new_remote = True
except pysvn._pysvn.ClientError as exc:
- log.error(
- 'Failed to initialize svnfs remote \'%s\': %s',
- repo_url, exc
- )
+ log.error("Failed to initialize svnfs remote '%s': %s", repo_url, exc)
_failhard()
else:
# Confirm that there is an svn checkout at the necessary path by
@@ -214,49 +223,51 @@ def init():
CLIENT.status(rp_)
except pysvn._pysvn.ClientError as exc:
log.error(
- 'Cache path %s (corresponding remote: %s) exists but is '
- 'not a valid subversion checkout. You will need to '
- 'manually delete this directory on the master to continue '
- 'to use this svnfs remote.', rp_, repo_url
+ "Cache path %s (corresponding remote: %s) exists but is "
+ "not a valid subversion checkout. You will need to "
+ "manually delete this directory on the master to continue "
+ "to use this svnfs remote.",
+ rp_,
+ repo_url,
)
_failhard()
- repo_conf.update({
- 'repo': rp_,
- 'url': repo_url,
- 'hash': repo_hash,
- 'cachedir': rp_,
- 'lockfile': os.path.join(rp_, 'update.lk')
- })
+ repo_conf.update(
+ {
+ "repo": rp_,
+ "url": repo_url,
+ "hash": repo_hash,
+ "cachedir": rp_,
+ "lockfile": os.path.join(rp_, "update.lk"),
+ }
+ )
repos.append(repo_conf)
if new_remote:
- remote_map = os.path.join(__opts__['cachedir'], 'svnfs/remote_map.txt')
+ remote_map = os.path.join(__opts__["cachedir"], "svnfs/remote_map.txt")
try:
- with salt.utils.files.fopen(remote_map, 'w+') as fp_:
- timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f')
- fp_.write('# svnfs_remote map as of {0}\n'.format(timestamp))
+ with salt.utils.files.fopen(remote_map, "w+") as fp_:
+ timestamp = datetime.now().strftime("%d %b %Y %H:%M:%S.%f")
+ fp_.write("# svnfs_remote map as of {0}\n".format(timestamp))
for repo_conf in repos:
fp_.write(
salt.utils.stringutils.to_str(
- '{0} = {1}\n'.format(
- repo_conf['hash'], repo_conf['url']
- )
+ "{0} = {1}\n".format(repo_conf["hash"], repo_conf["url"])
)
)
except OSError:
pass
else:
- log.info('Wrote new svnfs_remote map to %s', remote_map)
+ log.info("Wrote new svnfs_remote map to %s", remote_map)
return repos
def _clear_old_remotes():
- '''
+ """
Remove cache directories for remotes no longer configured
- '''
- bp_ = os.path.join(__opts__['cachedir'], 'svnfs')
+ """
+ bp_ = os.path.join(__opts__["cachedir"], "svnfs")
try:
cachedir_ls = os.listdir(bp_)
except OSError:
@@ -265,12 +276,12 @@ def _clear_old_remotes():
# Remove actively-used remotes from list
for repo in repos:
try:
- cachedir_ls.remove(repo['hash'])
+ cachedir_ls.remove(repo["hash"])
except ValueError:
pass
to_remove = []
for item in cachedir_ls:
- if item in ('hash', 'refs'):
+ if item in ("hash", "refs"):
continue
path = os.path.join(bp_, item)
if os.path.isdir(path):
@@ -282,65 +293,67 @@ def _clear_old_remotes():
shutil.rmtree(rdir)
except OSError as exc:
log.error(
- 'Unable to remove old svnfs remote cachedir %s: %s',
- rdir, exc
+ "Unable to remove old svnfs remote cachedir %s: %s", rdir, exc
)
failed.append(rdir)
else:
- log.debug('svnfs removed old cachedir %s', rdir)
+ log.debug("svnfs removed old cachedir %s", rdir)
for fdir in failed:
to_remove.remove(fdir)
return bool(to_remove), repos
def clear_cache():
- '''
+ """
Completely clear svnfs cache
- '''
- fsb_cachedir = os.path.join(__opts__['cachedir'], 'svnfs')
- list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/svnfs')
+ """
+ fsb_cachedir = os.path.join(__opts__["cachedir"], "svnfs")
+ list_cachedir = os.path.join(__opts__["cachedir"], "file_lists/svnfs")
errors = []
for rdir in (fsb_cachedir, list_cachedir):
if os.path.exists(rdir):
try:
shutil.rmtree(rdir)
except OSError as exc:
- errors.append('Unable to delete {0}: {1}'.format(rdir, exc))
+ errors.append("Unable to delete {0}: {1}".format(rdir, exc))
return errors
def clear_lock(remote=None):
- '''
+ """
Clear update.lk
``remote`` can either be a dictionary containing repo configuration
information, or a pattern. If the latter, then remotes for which the URL
matches the pattern will be locked.
- '''
+ """
+
def _do_clear_lock(repo):
def _add_error(errlist, repo, exc):
- msg = ('Unable to remove update lock for {0} ({1}): {2} '
- .format(repo['url'], repo['lockfile'], exc))
+ msg = "Unable to remove update lock for {0} ({1}): {2} ".format(
+ repo["url"], repo["lockfile"], exc
+ )
log.debug(msg)
errlist.append(msg)
+
success = []
failed = []
- if os.path.exists(repo['lockfile']):
+ if os.path.exists(repo["lockfile"]):
try:
- os.remove(repo['lockfile'])
+ os.remove(repo["lockfile"])
except OSError as exc:
if exc.errno == errno.EISDIR:
# Somehow this path is a directory. Should never happen
# unless some wiseguy manually creates a directory at this
# path, but just in case, handle it.
try:
- shutil.rmtree(repo['lockfile'])
+ shutil.rmtree(repo["lockfile"])
except OSError as exc:
_add_error(failed, repo, exc)
else:
_add_error(failed, repo, exc)
else:
- msg = 'Removed lock for {0}'.format(repo['url'])
+ msg = "Removed lock for {0}".format(repo["url"])
log.debug(msg)
success.append(msg)
return success, failed
@@ -353,11 +366,11 @@ def clear_lock(remote=None):
for repo in init():
if remote:
try:
- if remote not in repo['url']:
+ if remote not in repo["url"]:
continue
except TypeError:
# remote was non-string, try again
- if six.text_type(remote) not in repo['url']:
+ if six.text_type(remote) not in repo["url"]:
continue
success, failed = _do_clear_lock(repo)
cleared.extend(success)
@@ -366,27 +379,29 @@ def clear_lock(remote=None):
def lock(remote=None):
- '''
+ """
Place an update.lk
``remote`` can either be a dictionary containing repo configuration
information, or a pattern. If the latter, then remotes for which the URL
matches the pattern will be locked.
- '''
+ """
+
def _do_lock(repo):
success = []
failed = []
- if not os.path.exists(repo['lockfile']):
+ if not os.path.exists(repo["lockfile"]):
try:
- with salt.utils.files.fopen(repo['lockfile'], 'w+') as fp_:
- fp_.write('')
+ with salt.utils.files.fopen(repo["lockfile"], "w+") as fp_:
+ fp_.write("")
except (IOError, OSError) as exc:
- msg = ('Unable to set update lock for {0} ({1}): {2} '
- .format(repo['url'], repo['lockfile'], exc))
+ msg = "Unable to set update lock for {0} ({1}): {2} ".format(
+ repo["url"], repo["lockfile"], exc
+ )
log.debug(msg)
failed.append(msg)
else:
- msg = 'Set lock for {0}'.format(repo['url'])
+ msg = "Set lock for {0}".format(repo["url"])
log.debug(msg)
success.append(msg)
return success, failed
@@ -399,11 +414,11 @@ def lock(remote=None):
for repo in init():
if remote:
try:
- if not fnmatch.fnmatch(repo['url'], remote):
+ if not fnmatch.fnmatch(repo["url"], remote):
continue
except TypeError:
# remote was non-string, try again
- if not fnmatch.fnmatch(repo['url'], six.text_type(remote)):
+ if not fnmatch.fnmatch(repo["url"], six.text_type(remote)):
continue
success, failed = _do_lock(repo)
locked.extend(success)
@@ -413,40 +428,42 @@ def lock(remote=None):
def update():
- '''
+ """
Execute an svn update on all of the repos
- '''
+ """
# data for the fileserver event
- data = {'changed': False,
- 'backend': 'svnfs'}
+ data = {"changed": False, "backend": "svnfs"}
# _clear_old_remotes runs init(), so use the value from there to avoid a
# second init()
- data['changed'], repos = _clear_old_remotes()
+ data["changed"], repos = _clear_old_remotes()
for repo in repos:
- if os.path.exists(repo['lockfile']):
+ if os.path.exists(repo["lockfile"]):
log.warning(
- 'Update lockfile is present for svnfs remote %s, skipping. '
- 'If this warning persists, it is possible that the update '
- 'process was interrupted. Removing %s or running '
- '\'salt-run fileserver.clear_lock svnfs\' will allow updates '
- 'to continue for this remote.', repo['url'], repo['lockfile']
+ "Update lockfile is present for svnfs remote %s, skipping. "
+ "If this warning persists, it is possible that the update "
+ "process was interrupted. Removing %s or running "
+ "'salt-run fileserver.clear_lock svnfs' will allow updates "
+ "to continue for this remote.",
+ repo["url"],
+ repo["lockfile"],
)
continue
_, errors = lock(repo)
if errors:
log.error(
- 'Unable to set update lock for svnfs remote %s, skipping.',
- repo['url']
+ "Unable to set update lock for svnfs remote %s, skipping.", repo["url"]
)
continue
- log.debug('svnfs is fetching from %s', repo['url'])
+ log.debug("svnfs is fetching from %s", repo["url"])
old_rev = _rev(repo)
try:
- CLIENT.update(repo['repo'])
+ CLIENT.update(repo["repo"])
except pysvn._pysvn.ClientError as exc:
log.error(
- 'Error updating svnfs remote %s (cachedir: %s): %s',
- repo['url'], repo['cachedir'], exc
+ "Error updating svnfs remote %s (cachedir: %s): %s",
+ repo["url"],
+ repo["cachedir"],
+ exc,
)
new_rev = _rev(repo)
@@ -454,34 +471,34 @@ def update():
# There were problems getting the revision ID
continue
if new_rev != old_rev:
- data['changed'] = True
+ data["changed"] = True
clear_lock(repo)
- env_cache = os.path.join(__opts__['cachedir'], 'svnfs/envs.p')
- if data.get('changed', False) is True or not os.path.isfile(env_cache):
+ env_cache = os.path.join(__opts__["cachedir"], "svnfs/envs.p")
+ if data.get("changed", False) is True or not os.path.isfile(env_cache):
env_cachedir = os.path.dirname(env_cache)
if not os.path.exists(env_cachedir):
os.makedirs(env_cachedir)
new_envs = envs(ignore_cache=True)
serial = salt.payload.Serial(__opts__)
- with salt.utils.files.fopen(env_cache, 'wb+') as fp_:
+ with salt.utils.files.fopen(env_cache, "wb+") as fp_:
fp_.write(serial.dumps(new_envs))
- log.trace('Wrote env cache data to %s', env_cache)
+ log.trace("Wrote env cache data to %s", env_cache)
# if there is a change, fire an event
- if __opts__.get('fileserver_events', False):
+ if __opts__.get("fileserver_events", False):
with salt.utils.event.get_event(
- 'master',
- __opts__['sock_dir'],
- __opts__['transport'],
- opts=__opts__,
- listen=False) as event:
- event.fire_event(data, tagify(['svnfs', 'update'], prefix='fileserver'))
+ "master",
+ __opts__["sock_dir"],
+ __opts__["transport"],
+ opts=__opts__,
+ listen=False,
+ ) as event:
+ event.fire_event(data, tagify(["svnfs", "update"], prefix="fileserver"))
try:
salt.fileserver.reap_fileserver_cache_dir(
- os.path.join(__opts__['cachedir'], 'svnfs/hash'),
- find_file
+ os.path.join(__opts__["cachedir"], "svnfs/hash"), find_file
)
except (IOError, OSError):
# Hash file won't exist if no files have yet been served up
@@ -489,93 +506,95 @@ def update():
def _env_is_exposed(env):
- '''
+ """
Check if an environment is exposed by comparing it against a whitelist and
blacklist.
- '''
+ """
return salt.utils.stringutils.check_whitelist_blacklist(
env,
- whitelist=__opts__['svnfs_saltenv_whitelist'],
- blacklist=__opts__['svnfs_saltenv_blacklist'],
+ whitelist=__opts__["svnfs_saltenv_whitelist"],
+ blacklist=__opts__["svnfs_saltenv_blacklist"],
)
def envs(ignore_cache=False):
- '''
+ """
Return a list of refs that can be used as environments
- '''
+ """
if not ignore_cache:
- env_cache = os.path.join(__opts__['cachedir'], 'svnfs/envs.p')
+ env_cache = os.path.join(__opts__["cachedir"], "svnfs/envs.p")
cache_match = salt.fileserver.check_env_cache(__opts__, env_cache)
if cache_match is not None:
return cache_match
ret = set()
for repo in init():
- trunk = os.path.join(repo['repo'], repo['trunk'])
+ trunk = os.path.join(repo["repo"], repo["trunk"])
if os.path.isdir(trunk):
# Add base as the env for trunk
- ret.add('base')
+ ret.add("base")
else:
log.error(
- 'svnfs trunk path \'%s\' does not exist in repo %s, no base '
- 'environment will be provided by this remote',
- repo['trunk'], repo['url']
+ "svnfs trunk path '%s' does not exist in repo %s, no base "
+ "environment will be provided by this remote",
+ repo["trunk"],
+ repo["url"],
)
- branches = os.path.join(repo['repo'], repo['branches'])
+ branches = os.path.join(repo["repo"], repo["branches"])
if os.path.isdir(branches):
ret.update(os.listdir(branches))
else:
log.error(
- 'svnfs branches path \'%s\' does not exist in repo %s',
- repo['branches'], repo['url']
+ "svnfs branches path '%s' does not exist in repo %s",
+ repo["branches"],
+ repo["url"],
)
- tags = os.path.join(repo['repo'], repo['tags'])
+ tags = os.path.join(repo["repo"], repo["tags"])
if os.path.isdir(tags):
ret.update(os.listdir(tags))
else:
log.error(
- 'svnfs tags path \'%s\' does not exist in repo %s',
- repo['tags'], repo['url']
+ "svnfs tags path '%s' does not exist in repo %s",
+ repo["tags"],
+ repo["url"],
)
return [x for x in sorted(ret) if _env_is_exposed(x)]
def _env_root(repo, saltenv):
- '''
+ """
Return the root of the directory corresponding to the desired environment,
or None if the environment was not found.
- '''
+ """
# If 'base' is desired, look for the trunk
- if saltenv == 'base':
- trunk = os.path.join(repo['repo'], repo['trunk'])
+ if saltenv == "base":
+ trunk = os.path.join(repo["repo"], repo["trunk"])
if os.path.isdir(trunk):
return trunk
else:
return None
# Check branches
- branches = os.path.join(repo['repo'], repo['branches'])
+ branches = os.path.join(repo["repo"], repo["branches"])
if os.path.isdir(branches) and saltenv in os.listdir(branches):
return os.path.join(branches, saltenv)
# Check tags
- tags = os.path.join(repo['repo'], repo['tags'])
+ tags = os.path.join(repo["repo"], repo["tags"])
if os.path.isdir(tags) and saltenv in os.listdir(tags):
return os.path.join(tags, saltenv)
return None
-def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
- '''
+def find_file(path, tgt_env="base", **kwargs): # pylint: disable=W0613
+ """
Find the first file to match the path and ref. This operates similarly to
the roots file sever but with assumptions of the directory structure
based on svn standard practices.
- '''
- fnd = {'path': '',
- 'rel': ''}
+ """
+ fnd = {"path": "", "rel": ""}
if os.path.isabs(path) or tgt_env not in envs():
return fnd
@@ -584,17 +603,16 @@ def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
if env_root is None:
# Environment not found, try the next repo
continue
- if repo['mountpoint'] \
- and not path.startswith(repo['mountpoint'] + os.path.sep):
+ if repo["mountpoint"] and not path.startswith(repo["mountpoint"] + os.path.sep):
continue
- repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep)
- if repo['root']:
- repo_path = os.path.join(repo['root'], repo_path)
+ repo_path = path[len(repo["mountpoint"]) :].lstrip(os.path.sep)
+ if repo["root"]:
+ repo_path = os.path.join(repo["root"], repo_path)
full = os.path.join(env_root, repo_path)
if os.path.isfile(full):
- fnd['rel'] = path
- fnd['path'] = full
+ fnd["rel"] = path
+ fnd["path"] = full
try:
# Converting the stat result to a list, the elements of the
# list correspond to the following stat_result params:
@@ -608,7 +626,7 @@ def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
# 7 => st_atime=1468284229
# 8 => st_mtime=1456338235
# 9 => st_ctime=1456338235
- fnd['stat'] = list(os.stat(full))
+ fnd["stat"] = list(os.stat(full))
except Exception: # pylint: disable=broad-except
pass
return fnd
@@ -616,176 +634,165 @@ def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
def serve_file(load, fnd):
- '''
+ """
Return a chunk from a file based on the data received
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- ret = {'data': '',
- 'dest': ''}
- if not all(x in load for x in ('path', 'loc', 'saltenv')):
+ ret = {"data": "", "dest": ""}
+ if not all(x in load for x in ("path", "loc", "saltenv")):
return ret
- if not fnd['path']:
+ if not fnd["path"]:
return ret
- ret['dest'] = fnd['rel']
- gzip = load.get('gzip', None)
- fpath = os.path.normpath(fnd['path'])
- with salt.utils.files.fopen(fpath, 'rb') as fp_:
- fp_.seek(load['loc'])
- data = fp_.read(__opts__['file_buffer_size'])
+ ret["dest"] = fnd["rel"]
+ gzip = load.get("gzip", None)
+ fpath = os.path.normpath(fnd["path"])
+ with salt.utils.files.fopen(fpath, "rb") as fp_:
+ fp_.seek(load["loc"])
+ data = fp_.read(__opts__["file_buffer_size"])
if data and six.PY3 and not salt.utils.files.is_binary(fpath):
data = data.decode(__salt_system_encoding__)
if gzip and data:
data = salt.utils.gzip_util.compress(data, gzip)
- ret['gzip'] = gzip
- ret['data'] = data
+ ret["gzip"] = gzip
+ ret["data"] = data
return ret
def file_hash(load, fnd):
- '''
+ """
Return a file hash, the hash type is set in the master config file
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- if not all(x in load for x in ('path', 'saltenv')):
- return ''
- saltenv = load['saltenv']
- if saltenv == 'base':
- saltenv = 'trunk'
+ if not all(x in load for x in ("path", "saltenv")):
+ return ""
+ saltenv = load["saltenv"]
+ if saltenv == "base":
+ saltenv = "trunk"
ret = {}
- relpath = fnd['rel']
- path = fnd['path']
+ relpath = fnd["rel"]
+ path = fnd["path"]
# If the file doesn't exist, we can't get a hash
if not path or not os.path.isfile(path):
return ret
# Set the hash_type as it is determined by config
- ret['hash_type'] = __opts__['hash_type']
+ ret["hash_type"] = __opts__["hash_type"]
# Check if the hash is cached
# Cache file's contents should be "hash:mtime"
- cache_path = os.path.join(__opts__['cachedir'],
- 'svnfs',
- 'hash',
- saltenv,
- '{0}.hash.{1}'.format(relpath,
- __opts__['hash_type']))
+ cache_path = os.path.join(
+ __opts__["cachedir"],
+ "svnfs",
+ "hash",
+ saltenv,
+ "{0}.hash.{1}".format(relpath, __opts__["hash_type"]),
+ )
# If we have a cache, serve that if the mtime hasn't changed
if os.path.exists(cache_path):
- with salt.utils.files.fopen(cache_path, 'rb') as fp_:
- hsum, mtime = fp_.read().split(':')
+ with salt.utils.files.fopen(cache_path, "rb") as fp_:
+ hsum, mtime = fp_.read().split(":")
if os.path.getmtime(path) == mtime:
# check if mtime changed
- ret['hsum'] = hsum
+ ret["hsum"] = hsum
return ret
# if we don't have a cache entry-- lets make one
- ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type'])
+ ret["hsum"] = salt.utils.hashutils.get_hash(path, __opts__["hash_type"])
cache_dir = os.path.dirname(cache_path)
# make cache directory if it doesn't exist
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# save the cache object "hash:mtime"
- with salt.utils.files.fopen(cache_path, 'w') as fp_:
- fp_.write('{0}:{1}'.format(ret['hsum'], os.path.getmtime(path)))
+ with salt.utils.files.fopen(cache_path, "w") as fp_:
+ fp_.write("{0}:{1}".format(ret["hsum"], os.path.getmtime(path)))
return ret
def _file_lists(load, form):
- '''
+ """
Return a dict containing the file lists for files, dirs, emptydirs and symlinks
- '''
- if 'env' in load:
+ """
+ if "env" in load:
# "env" is not supported; Use "saltenv".
- load.pop('env')
+ load.pop("env")
- if 'saltenv' not in load or load['saltenv'] not in envs():
+ if "saltenv" not in load or load["saltenv"] not in envs():
return []
- list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/svnfs')
+ list_cachedir = os.path.join(__opts__["cachedir"], "file_lists/svnfs")
if not os.path.isdir(list_cachedir):
try:
os.makedirs(list_cachedir)
except os.error:
- log.critical('Unable to make cachedir %s', list_cachedir)
+ log.critical("Unable to make cachedir %s", list_cachedir)
return []
- list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv']))
- w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv']))
- cache_match, refresh_cache, save_cache = \
- salt.fileserver.check_file_list_cache(
- __opts__, form, list_cache, w_lock
- )
+ list_cache = os.path.join(list_cachedir, "{0}.p".format(load["saltenv"]))
+ w_lock = os.path.join(list_cachedir, ".{0}.w".format(load["saltenv"]))
+ cache_match, refresh_cache, save_cache = salt.fileserver.check_file_list_cache(
+ __opts__, form, list_cache, w_lock
+ )
if cache_match is not None:
return cache_match
if refresh_cache:
- ret = {
- 'files': set(),
- 'dirs': set(),
- 'empty_dirs': set()
- }
+ ret = {"files": set(), "dirs": set(), "empty_dirs": set()}
for repo in init():
- env_root = _env_root(repo, load['saltenv'])
+ env_root = _env_root(repo, load["saltenv"])
if env_root is None:
# Environment not found, try the next repo
continue
- if repo['root']:
- env_root = \
- os.path.join(env_root, repo['root']).rstrip(os.path.sep)
+ if repo["root"]:
+ env_root = os.path.join(env_root, repo["root"]).rstrip(os.path.sep)
if not os.path.isdir(env_root):
# svnfs root (global or per-remote) does not exist in env
continue
for root, dirs, files in salt.utils.path.os_walk(env_root):
relpath = os.path.relpath(root, env_root)
- dir_rel_fn = os.path.join(repo['mountpoint'], relpath)
- if relpath != '.':
- ret['dirs'].add(dir_rel_fn)
+ dir_rel_fn = os.path.join(repo["mountpoint"], relpath)
+ if relpath != ".":
+ ret["dirs"].add(dir_rel_fn)
if not dirs and not files:
- ret['empty_dirs'].add(dir_rel_fn)
+ ret["empty_dirs"].add(dir_rel_fn)
for fname in files:
- rel_fn = os.path.relpath(
- os.path.join(root, fname),
- env_root
- )
- ret['files'].add(os.path.join(repo['mountpoint'], rel_fn))
- if repo['mountpoint']:
- ret['dirs'].add(repo['mountpoint'])
+ rel_fn = os.path.relpath(os.path.join(root, fname), env_root)
+ ret["files"].add(os.path.join(repo["mountpoint"], rel_fn))
+ if repo["mountpoint"]:
+ ret["dirs"].add(repo["mountpoint"])
# Convert all compiled sets to lists
for key in ret:
ret[key] = sorted(ret[key])
if save_cache:
- salt.fileserver.write_file_list_cache(
- __opts__, ret, list_cache, w_lock
- )
+ salt.fileserver.write_file_list_cache(__opts__, ret, list_cache, w_lock)
return ret.get(form, [])
# Shouldn't get here, but if we do, this prevents a TypeError
return []
def file_list(load):
- '''
+ """
Return a list of all files on the file server in a specified
environment
- '''
- return _file_lists(load, 'files')
+ """
+ return _file_lists(load, "files")
def file_list_emptydirs(load):
- '''
+ """
Return a list of all empty directories on the master
- '''
- return _file_lists(load, 'empty_dirs')
+ """
+ return _file_lists(load, "empty_dirs")
def dir_list(load):
- '''
+ """
Return a list of all directories on the master
- '''
- return _file_lists(load, 'dirs')
+ """
+ return _file_lists(load, "dirs")
diff --git a/salt/grains/__init__.py b/salt/grains/__init__.py
index 7a1d4801a91..cf708331ef7 100644
--- a/salt/grains/__init__.py
+++ b/salt/grains/__init__.py
@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
-'''
+"""
Grains plugin directory
-'''
+"""
diff --git a/salt/grains/chronos.py b/salt/grains/chronos.py
index 3b5add6895b..60478a76938 100644
--- a/salt/grains/chronos.py
+++ b/salt/grains/chronos.py
@@ -1,39 +1,39 @@
# -*- coding: utf-8 -*-
-'''
+"""
Generate chronos proxy minion grains.
.. versionadded:: 2015.8.2
-'''
+"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
-
# Import Salt libs
import salt.utils.http
import salt.utils.platform
-__proxyenabled__ = ['chronos']
-__virtualname__ = 'chronos'
+
+__proxyenabled__ = ["chronos"]
+__virtualname__ = "chronos"
def __virtual__():
- if not salt.utils.platform.is_proxy() or 'proxy' not in __opts__:
+ if not salt.utils.platform.is_proxy() or "proxy" not in __opts__:
return False
else:
return __virtualname__
def kernel():
- return {'kernel': 'chronos'}
+ return {"kernel": "chronos"}
def os():
- return {'os': 'chronos'}
+ return {"os": "chronos"}
def os_family():
- return {'os_family': 'chronos'}
+ return {"os_family": "chronos"}
def os_data():
- return {'os_data': 'chronos'}
+ return {"os_data": "chronos"}
diff --git a/salt/grains/cimc.py b/salt/grains/cimc.py
index 62f891ebee9..080b124a6b6 100644
--- a/salt/grains/cimc.py
+++ b/salt/grains/cimc.py
@@ -1,28 +1,30 @@
# -*- coding: utf-8 -*-
-'''
+"""
Generate baseline proxy minion grains for cimc hosts.
-'''
+"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
+import salt.proxy.cimc
+
# Import Salt Libs
import salt.utils.platform
-import salt.proxy.cimc
-__proxyenabled__ = ['cimc']
-__virtualname__ = 'cimc'
+__proxyenabled__ = ["cimc"]
+__virtualname__ = "cimc"
log = logging.getLogger(__file__)
-GRAINS_CACHE = {'os_family': 'Cisco UCS'}
+GRAINS_CACHE = {"os_family": "Cisco UCS"}
def __virtual__():
try:
- if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'cimc':
+ if salt.utils.platform.is_proxy() and __opts__["proxy"]["proxytype"] == "cimc":
return __virtualname__
except KeyError:
pass
@@ -33,6 +35,6 @@ def __virtual__():
def cimc(proxy=None):
if not proxy:
return {}
- if proxy['cimc.initialized']() is False:
+ if proxy["cimc.initialized"]() is False:
return {}
- return {'cimc': proxy['cimc.grains']()}
+ return {"cimc": proxy["cimc.grains"]()}
diff --git a/salt/grains/core.py b/salt/grains/core.py
index 9b244def9c9..e7b9a1f564f 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
The static grains, these are the core, or built in grains.
When grains are loaded they are not loaded in the same way that modules are
@@ -8,53 +8,36 @@ return a dict which will be applied to the main grains dict. This module
will always be executed first, so that any grains loaded here in the core
module can be overwritten just by returning dict keys with the same value
as those returned here
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
+import datetime
+import locale
+import logging
import os
+import platform
+import re
import socket
import sys
-import re
-import platform
-import logging
-import locale
-import uuid
-from errno import EACCES, EPERM
-import datetime
-import warnings
import time
-
-# pylint: disable=import-error
-try:
- import dateutil.tz
- _DATEUTIL_TZ = True
-except ImportError:
- _DATEUTIL_TZ = False
-
-__proxyenabled__ = ['*']
-__FQDN__ = None
+import uuid
+import warnings
+from errno import EACCES, EPERM
# Extend the default list of supported distros. This will be used for the
# /etc/DISTRO-release checking that is part of linux_distribution()
from platform import _supported_dists
-_supported_dists += ('arch', 'mageia', 'meego', 'vmware', 'bluewhite64',
- 'slamd64', 'ovs', 'system', 'mint', 'oracle', 'void')
-
-# linux_distribution deprecated in py3.7
-try:
- from platform import linux_distribution as _deprecated_linux_distribution
-
- def linux_distribution(**kwargs):
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- return _deprecated_linux_distribution(**kwargs)
-except ImportError:
- from distro import linux_distribution
# Import salt libs
import salt.exceptions
import salt.log
+
+# Solve the Chicken and egg problem where grains need to run before any
+# of the modules are loaded and are generally available for any usage.
+import salt.modules.cmdmod
+import salt.modules.smbios
import salt.utils.dns
import salt.utils.files
import salt.utils.network
@@ -65,20 +48,56 @@ import salt.utils.stringutils
from salt.ext import six
from salt.ext.six.moves import range
+# pylint: disable=import-error
+try:
+ import dateutil.tz
+
+ _DATEUTIL_TZ = True
+except ImportError:
+ _DATEUTIL_TZ = False
+
+__proxyenabled__ = ["*"]
+__FQDN__ = None
+
+
+_supported_dists += (
+ "arch",
+ "mageia",
+ "meego",
+ "vmware",
+ "bluewhite64",
+ "slamd64",
+ "ovs",
+ "system",
+ "mint",
+ "oracle",
+ "void",
+)
+
+# linux_distribution deprecated in py3.7
+try:
+ from platform import linux_distribution as _deprecated_linux_distribution
+
+ def linux_distribution(**kwargs):
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ return _deprecated_linux_distribution(**kwargs)
+
+
+except ImportError:
+ from distro import linux_distribution
+
+
if salt.utils.platform.is_windows():
import salt.utils.win_osinfo
-# Solve the Chicken and egg problem where grains need to run before any
-# of the modules are loaded and are generally available for any usage.
-import salt.modules.cmdmod
-import salt.modules.smbios
__salt__ = {
- 'cmd.run': salt.modules.cmdmod._run_quiet,
- 'cmd.retcode': salt.modules.cmdmod._retcode_quiet,
- 'cmd.run_all': salt.modules.cmdmod._run_all_quiet,
- 'smbios.records': salt.modules.smbios.records,
- 'smbios.get': salt.modules.smbios.get,
+ "cmd.run": salt.modules.cmdmod._run_quiet,
+ "cmd.retcode": salt.modules.cmdmod._retcode_quiet,
+ "cmd.run_all": salt.modules.cmdmod._run_all_quiet,
+ "smbios.records": salt.modules.smbios.records,
+ "smbios.get": salt.modules.smbios.get,
}
log = logging.getLogger(__name__)
@@ -91,15 +110,15 @@ if salt.utils.platform.is_windows():
import salt.utils.winapi
import win32api
import salt.utils.win_reg
+
HAS_WMI = True
except ImportError:
log.exception(
- 'Unable to import Python wmi module, some core grains '
- 'will be missing'
+ "Unable to import Python wmi module, some core grains " "will be missing"
)
HAS_UNAME = True
-if not hasattr(os, 'uname'):
+if not hasattr(os, "uname"):
HAS_UNAME = False
_INTERFACES = {}
@@ -110,61 +129,62 @@ NO_DATA = 4
def _windows_cpudata():
- '''
+ """
Return some CPU information on Windows minions
- '''
+ """
# Provides:
# num_cpus
# cpu_model
grains = {}
- if 'NUMBER_OF_PROCESSORS' in os.environ:
+ if "NUMBER_OF_PROCESSORS" in os.environ:
# Cast to int so that the logic isn't broken when used as a
# conditional in templating. Also follows _linux_cpudata()
try:
- grains['num_cpus'] = int(os.environ['NUMBER_OF_PROCESSORS'])
+ grains["num_cpus"] = int(os.environ["NUMBER_OF_PROCESSORS"])
except ValueError:
- grains['num_cpus'] = 1
- grains['cpu_model'] = salt.utils.win_reg.read_value(
+ grains["num_cpus"] = 1
+ grains["cpu_model"] = salt.utils.win_reg.read_value(
hive="HKEY_LOCAL_MACHINE",
key="HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
- vname="ProcessorNameString").get('vdata')
+ vname="ProcessorNameString",
+ ).get("vdata")
return grains
def _linux_cpudata():
- '''
+ """
Return some CPU information for Linux minions
- '''
+ """
# Provides:
# num_cpus
# cpu_model
# cpu_flags
grains = {}
- cpuinfo = '/proc/cpuinfo'
+ cpuinfo = "/proc/cpuinfo"
# Parse over the cpuinfo file
if os.path.isfile(cpuinfo):
- with salt.utils.files.fopen(cpuinfo, 'r') as _fp:
+ with salt.utils.files.fopen(cpuinfo, "r") as _fp:
for line in _fp:
- comps = line.split(':')
+ comps = line.split(":")
if not len(comps) > 1:
continue
key = comps[0].strip()
val = comps[1].strip()
- if key == 'processor':
- grains['num_cpus'] = int(val) + 1
+ if key == "processor":
+ grains["num_cpus"] = int(val) + 1
# head -2 /proc/cpuinfo
# vendor_id : IBM/S390
# # processors : 2
- elif key == '# processors':
- grains['num_cpus'] = int(val)
- elif key == 'vendor_id':
- grains['cpu_model'] = val
- elif key == 'model name':
- grains['cpu_model'] = val
- elif key == 'flags':
- grains['cpu_flags'] = val.split()
- elif key == 'Features':
- grains['cpu_flags'] = val.split()
+ elif key == "# processors":
+ grains["num_cpus"] = int(val)
+ elif key == "vendor_id":
+ grains["cpu_model"] = val
+ elif key == "model name":
+ grains["cpu_model"] = val
+ elif key == "flags":
+ grains["cpu_flags"] = val.split()
+ elif key == "Features":
+ grains["cpu_flags"] = val.split()
# ARM support - /proc/cpuinfo
#
# Processor : ARMv6-compatible processor rev 7 (v6l)
@@ -179,588 +199,626 @@ def _linux_cpudata():
# Hardware : BCM2708
# Revision : 0002
# Serial : 00000000
- elif key == 'Processor':
- grains['cpu_model'] = val.split('-')[0]
- grains['num_cpus'] = 1
- if 'num_cpus' not in grains:
- grains['num_cpus'] = 0
- if 'cpu_model' not in grains:
- grains['cpu_model'] = 'Unknown'
- if 'cpu_flags' not in grains:
- grains['cpu_flags'] = []
+ elif key == "Processor":
+ grains["cpu_model"] = val.split("-")[0]
+ grains["num_cpus"] = 1
+ if "num_cpus" not in grains:
+ grains["num_cpus"] = 0
+ if "cpu_model" not in grains:
+ grains["cpu_model"] = "Unknown"
+ if "cpu_flags" not in grains:
+ grains["cpu_flags"] = []
return grains
def _linux_gpu_data():
- '''
+ """
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
- '''
- if __opts__.get('enable_lspci', True) is False:
+ """
+ if __opts__.get("enable_lspci", True) is False:
return {}
- if __opts__.get('enable_gpu_grains', True) is False:
+ if __opts__.get("enable_gpu_grains", True) is False:
return {}
- lspci = salt.utils.path.which('lspci')
+ lspci = salt.utils.path.which("lspci")
if not lspci:
log.debug(
- 'The `lspci` binary is not available on the system. GPU grains '
- 'will not be available.'
+ "The `lspci` binary is not available on the system. GPU grains "
+ "will not be available."
)
return {}
# dominant gpu vendors to search for (MUST be lowercase for matching below)
- known_vendors = ['nvidia', 'amd', 'ati', 'intel', 'cirrus logic', 'vmware', 'matrox', 'aspeed']
- gpu_classes = ('vga compatible controller', '3d controller')
+ known_vendors = [
+ "nvidia",
+ "amd",
+ "ati",
+ "intel",
+ "cirrus logic",
+ "vmware",
+ "matrox",
+ "aspeed",
+ ]
+ gpu_classes = ("vga compatible controller", "3d controller")
devs = []
try:
- lspci_out = __salt__['cmd.run']('{0} -vmm'.format(lspci))
+ lspci_out = __salt__["cmd.run"]("{0} -vmm".format(lspci))
cur_dev = {}
error = False
# Add a blank element to the lspci_out.splitlines() list,
# otherwise the last device is not evaluated as a cur_dev and ignored.
lspci_list = lspci_out.splitlines()
- lspci_list.append('')
+ lspci_list.append("")
for line in lspci_list:
# check for record-separating empty lines
- if line == '':
- if cur_dev.get('Class', '').lower() in gpu_classes:
+ if line == "":
+ if cur_dev.get("Class", "").lower() in gpu_classes:
devs.append(cur_dev)
cur_dev = {}
continue
- if re.match(r'^\w+:\s+.*', line):
- key, val = line.split(':', 1)
+ if re.match(r"^\w+:\s+.*", line):
+ key, val = line.split(":", 1)
cur_dev[key.strip()] = val.strip()
else:
error = True
- log.debug('Unexpected lspci output: \'%s\'', line)
+ log.debug("Unexpected lspci output: '%s'", line)
if error:
log.warning(
- 'Error loading grains, unexpected linux_gpu_data output, '
- 'check that you have a valid shell configured and '
- 'permissions to run lspci command'
+ "Error loading grains, unexpected linux_gpu_data output, "
+ "check that you have a valid shell configured and "
+ "permissions to run lspci command"
)
except OSError:
pass
gpus = []
for gpu in devs:
- vendor_strings = gpu['Vendor'].lower().split()
+ vendor_strings = gpu["Vendor"].lower().split()
# default vendor to 'unknown', overwrite if we match a known one
- vendor = 'unknown'
+ vendor = "unknown"
for name in known_vendors:
# search for an 'expected' vendor name in the list of strings
if name in vendor_strings:
vendor = name
break
- gpus.append({'vendor': vendor, 'model': gpu['Device']})
+ gpus.append({"vendor": vendor, "model": gpu["Device"]})
grains = {}
- grains['num_gpus'] = len(gpus)
- grains['gpus'] = gpus
+ grains["num_gpus"] = len(gpus)
+ grains["gpus"] = gpus
return grains
def _netbsd_gpu_data():
- '''
+ """
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
- '''
- known_vendors = ['nvidia', 'amd', 'ati', 'intel', 'cirrus logic', 'vmware', 'matrox', 'aspeed']
+ """
+ known_vendors = [
+ "nvidia",
+ "amd",
+ "ati",
+ "intel",
+ "cirrus logic",
+ "vmware",
+ "matrox",
+ "aspeed",
+ ]
gpus = []
try:
- pcictl_out = __salt__['cmd.run']('pcictl pci0 list')
+ pcictl_out = __salt__["cmd.run"]("pcictl pci0 list")
for line in pcictl_out.splitlines():
for vendor in known_vendors:
vendor_match = re.match(
- r'[0-9:]+ ({0}) (.+) \(VGA .+\)'.format(vendor),
- line,
- re.IGNORECASE
+ r"[0-9:]+ ({0}) (.+) \(VGA .+\)".format(vendor), line, re.IGNORECASE
)
if vendor_match:
- gpus.append({'vendor': vendor_match.group(1), 'model': vendor_match.group(2)})
+ gpus.append(
+ {
+ "vendor": vendor_match.group(1),
+ "model": vendor_match.group(2),
+ }
+ )
except OSError:
pass
grains = {}
- grains['num_gpus'] = len(gpus)
- grains['gpus'] = gpus
+ grains["num_gpus"] = len(gpus)
+ grains["gpus"] = gpus
return grains
def _osx_gpudata():
- '''
+ """
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
- '''
+ """
gpus = []
try:
- pcictl_out = __salt__['cmd.run']('system_profiler SPDisplaysDataType')
+ pcictl_out = __salt__["cmd.run"]("system_profiler SPDisplaysDataType")
for line in pcictl_out.splitlines():
- fieldname, _, fieldval = line.partition(': ')
+ fieldname, _, fieldval = line.partition(": ")
if fieldname.strip() == "Chipset Model":
- vendor, _, model = fieldval.partition(' ')
+ vendor, _, model = fieldval.partition(" ")
vendor = vendor.lower()
- gpus.append({'vendor': vendor, 'model': model})
+ gpus.append({"vendor": vendor, "model": model})
except OSError:
pass
grains = {}
- grains['num_gpus'] = len(gpus)
- grains['gpus'] = gpus
+ grains["num_gpus"] = len(gpus)
+ grains["gpus"] = gpus
return grains
def _bsd_cpudata(osdata):
- '''
+ """
Return CPU information for BSD-like systems
- '''
+ """
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
- sysctl = salt.utils.path.which('sysctl')
- arch = salt.utils.path.which('arch')
+ sysctl = salt.utils.path.which("sysctl")
+ arch = salt.utils.path.which("arch")
cmds = {}
if sysctl:
- cmds.update({
- 'num_cpus': '{0} -n hw.ncpu'.format(sysctl),
- 'cpuarch': '{0} -n hw.machine'.format(sysctl),
- 'cpu_model': '{0} -n hw.model'.format(sysctl),
- })
+ cmds.update(
+ {
+ "num_cpus": "{0} -n hw.ncpu".format(sysctl),
+ "cpuarch": "{0} -n hw.machine".format(sysctl),
+ "cpu_model": "{0} -n hw.model".format(sysctl),
+ }
+ )
- if arch and osdata['kernel'] == 'OpenBSD':
- cmds['cpuarch'] = '{0} -s'.format(arch)
+ if arch and osdata["kernel"] == "OpenBSD":
+ cmds["cpuarch"] = "{0} -s".format(arch)
- if osdata['kernel'] == 'Darwin':
- cmds['cpu_model'] = '{0} -n machdep.cpu.brand_string'.format(sysctl)
- cmds['cpu_flags'] = '{0} -n machdep.cpu.features'.format(sysctl)
+ if osdata["kernel"] == "Darwin":
+ cmds["cpu_model"] = "{0} -n machdep.cpu.brand_string".format(sysctl)
+ cmds["cpu_flags"] = "{0} -n machdep.cpu.features".format(sysctl)
- grains = dict([(k, __salt__['cmd.run'](v)) for k, v in six.iteritems(cmds)])
+ grains = dict([(k, __salt__["cmd.run"](v)) for k, v in six.iteritems(cmds)])
- if 'cpu_flags' in grains and isinstance(grains['cpu_flags'], six.string_types):
- grains['cpu_flags'] = grains['cpu_flags'].split(' ')
+ if "cpu_flags" in grains and isinstance(grains["cpu_flags"], six.string_types):
+ grains["cpu_flags"] = grains["cpu_flags"].split(" ")
- if osdata['kernel'] == 'NetBSD':
- grains['cpu_flags'] = []
- for line in __salt__['cmd.run']('cpuctl identify 0').splitlines():
- cpu_match = re.match(r'cpu[0-9]:\ features[0-9]?\ .+<(.+)>', line)
+ if osdata["kernel"] == "NetBSD":
+ grains["cpu_flags"] = []
+ for line in __salt__["cmd.run"]("cpuctl identify 0").splitlines():
+ cpu_match = re.match(r"cpu[0-9]:\ features[0-9]?\ .+<(.+)>", line)
if cpu_match:
- flag = cpu_match.group(1).split(',')
- grains['cpu_flags'].extend(flag)
+ flag = cpu_match.group(1).split(",")
+ grains["cpu_flags"].extend(flag)
- if osdata['kernel'] == 'FreeBSD' and os.path.isfile('/var/run/dmesg.boot'):
- grains['cpu_flags'] = []
+ if osdata["kernel"] == "FreeBSD" and os.path.isfile("/var/run/dmesg.boot"):
+ grains["cpu_flags"] = []
# TODO: at least it needs to be tested for BSD other then FreeBSD
- with salt.utils.files.fopen('/var/run/dmesg.boot', 'r') as _fp:
+ with salt.utils.files.fopen("/var/run/dmesg.boot", "r") as _fp:
cpu_here = False
for line in _fp:
- if line.startswith('CPU: '):
+ if line.startswith("CPU: "):
cpu_here = True # starts CPU descr
continue
if cpu_here:
- if not line.startswith(' '):
+ if not line.startswith(" "):
break # game over
- if 'Features' in line:
- start = line.find('<')
- end = line.find('>')
+ if "Features" in line:
+ start = line.find("<")
+ end = line.find(">")
if start > 0 and end > 0:
- flag = line[start + 1:end].split(',')
- grains['cpu_flags'].extend(flag)
+ flag = line[start + 1 : end].split(",")
+ grains["cpu_flags"].extend(flag)
try:
- grains['num_cpus'] = int(grains['num_cpus'])
+ grains["num_cpus"] = int(grains["num_cpus"])
except ValueError:
- grains['num_cpus'] = 1
+ grains["num_cpus"] = 1
return grains
def _sunos_cpudata():
- '''
+ """
Return the CPU information for Solaris-like systems
- '''
+ """
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
- grains['cpu_flags'] = []
+ grains["cpu_flags"] = []
- grains['cpuarch'] = __salt__['cmd.run']('isainfo -k')
- psrinfo = '/usr/sbin/psrinfo 2>/dev/null'
- grains['num_cpus'] = len(__salt__['cmd.run'](psrinfo, python_shell=True).splitlines())
- kstat_info = 'kstat -p cpu_info:*:*:brand'
- for line in __salt__['cmd.run'](kstat_info).splitlines():
- match = re.match(r'(\w+:\d+:\w+\d+:\w+)\s+(.+)', line)
+ grains["cpuarch"] = __salt__["cmd.run"]("isainfo -k")
+ psrinfo = "/usr/sbin/psrinfo 2>/dev/null"
+ grains["num_cpus"] = len(
+ __salt__["cmd.run"](psrinfo, python_shell=True).splitlines()
+ )
+ kstat_info = "kstat -p cpu_info:*:*:brand"
+ for line in __salt__["cmd.run"](kstat_info).splitlines():
+ match = re.match(r"(\w+:\d+:\w+\d+:\w+)\s+(.+)", line)
if match:
- grains['cpu_model'] = match.group(2)
- isainfo = 'isainfo -n -v'
- for line in __salt__['cmd.run'](isainfo).splitlines():
- match = re.match(r'^\s+(.+)', line)
+ grains["cpu_model"] = match.group(2)
+ isainfo = "isainfo -n -v"
+ for line in __salt__["cmd.run"](isainfo).splitlines():
+ match = re.match(r"^\s+(.+)", line)
if match:
cpu_flags = match.group(1).split()
- grains['cpu_flags'].extend(cpu_flags)
+ grains["cpu_flags"].extend(cpu_flags)
return grains
def _aix_cpudata():
- '''
+ """
Return CPU information for AIX systems
- '''
+ """
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
- cmd = salt.utils.path.which('prtconf')
+ cmd = salt.utils.path.which("prtconf")
if cmd:
- data = __salt__['cmd.run']('{0}'.format(cmd)) + os.linesep
- for dest, regstring in (('cpuarch', r'(?im)^\s*Processor\s+Type:\s+(\S+)'),
- ('cpu_flags', r'(?im)^\s*Processor\s+Version:\s+(\S+)'),
- ('cpu_model', r'(?im)^\s*Processor\s+Implementation\s+Mode:\s+(.*)'),
- ('num_cpus', r'(?im)^\s*Number\s+Of\s+Processors:\s+(\S+)')):
+ data = __salt__["cmd.run"]("{0}".format(cmd)) + os.linesep
+ for dest, regstring in (
+ ("cpuarch", r"(?im)^\s*Processor\s+Type:\s+(\S+)"),
+ ("cpu_flags", r"(?im)^\s*Processor\s+Version:\s+(\S+)"),
+ ("cpu_model", r"(?im)^\s*Processor\s+Implementation\s+Mode:\s+(.*)"),
+ ("num_cpus", r"(?im)^\s*Number\s+Of\s+Processors:\s+(\S+)"),
+ ):
for regex in [re.compile(r) for r in [regstring]]:
res = regex.search(data)
if res and len(res.groups()) >= 1:
- grains[dest] = res.group(1).strip().replace("'", '')
+ grains[dest] = res.group(1).strip().replace("'", "")
else:
- log.error('The \'prtconf\' binary was not found in $PATH.')
+ log.error("The 'prtconf' binary was not found in $PATH.")
return grains
def _linux_memdata():
- '''
+ """
Return the memory information for Linux-like systems
- '''
- grains = {'mem_total': 0, 'swap_total': 0}
+ """
+ grains = {"mem_total": 0, "swap_total": 0}
- meminfo = '/proc/meminfo'
+ meminfo = "/proc/meminfo"
if os.path.isfile(meminfo):
- with salt.utils.files.fopen(meminfo, 'r') as ifile:
+ with salt.utils.files.fopen(meminfo, "r") as ifile:
for line in ifile:
- comps = line.rstrip('\n').split(':')
+ comps = line.rstrip("\n").split(":")
if not len(comps) > 1:
continue
- if comps[0].strip() == 'MemTotal':
+ if comps[0].strip() == "MemTotal":
# Use floor division to force output to be an integer
- grains['mem_total'] = int(comps[1].split()[0]) // 1024
- if comps[0].strip() == 'SwapTotal':
+ grains["mem_total"] = int(comps[1].split()[0]) // 1024
+ if comps[0].strip() == "SwapTotal":
# Use floor division to force output to be an integer
- grains['swap_total'] = int(comps[1].split()[0]) // 1024
+ grains["swap_total"] = int(comps[1].split()[0]) // 1024
return grains
def _osx_memdata():
- '''
+ """
Return the memory information for BSD-like systems
- '''
- grains = {'mem_total': 0, 'swap_total': 0}
+ """
+ grains = {"mem_total": 0, "swap_total": 0}
- sysctl = salt.utils.path.which('sysctl')
+ sysctl = salt.utils.path.which("sysctl")
if sysctl:
- mem = __salt__['cmd.run']('{0} -n hw.memsize'.format(sysctl))
- swap_total = __salt__['cmd.run']('{0} -n vm.swapusage'.format(sysctl)).split()[2].replace(',', '.')
- if swap_total.endswith('K'):
- _power = 2**10
- elif swap_total.endswith('M'):
- _power = 2**20
- elif swap_total.endswith('G'):
- _power = 2**30
+ mem = __salt__["cmd.run"]("{0} -n hw.memsize".format(sysctl))
+ swap_total = (
+ __salt__["cmd.run"]("{0} -n vm.swapusage".format(sysctl))
+ .split()[2]
+ .replace(",", ".")
+ )
+ if swap_total.endswith("K"):
+ _power = 2 ** 10
+ elif swap_total.endswith("M"):
+ _power = 2 ** 20
+ elif swap_total.endswith("G"):
+ _power = 2 ** 30
swap_total = float(swap_total[:-1]) * _power
- grains['mem_total'] = int(mem) // 1024 // 1024
- grains['swap_total'] = int(swap_total) // 1024 // 1024
+ grains["mem_total"] = int(mem) // 1024 // 1024
+ grains["swap_total"] = int(swap_total) // 1024 // 1024
return grains
def _bsd_memdata(osdata):
- '''
+ """
Return the memory information for BSD-like systems
- '''
- grains = {'mem_total': 0, 'swap_total': 0}
+ """
+ grains = {"mem_total": 0, "swap_total": 0}
- sysctl = salt.utils.path.which('sysctl')
+ sysctl = salt.utils.path.which("sysctl")
if sysctl:
- mem = __salt__['cmd.run']('{0} -n hw.physmem'.format(sysctl))
- if osdata['kernel'] == 'NetBSD' and mem.startswith('-'):
- mem = __salt__['cmd.run']('{0} -n hw.physmem64'.format(sysctl))
- grains['mem_total'] = int(mem) // 1024 // 1024
+ mem = __salt__["cmd.run"]("{0} -n hw.physmem".format(sysctl))
+ if osdata["kernel"] == "NetBSD" and mem.startswith("-"):
+ mem = __salt__["cmd.run"]("{0} -n hw.physmem64".format(sysctl))
+ grains["mem_total"] = int(mem) // 1024 // 1024
- if osdata['kernel'] in ['OpenBSD', 'NetBSD']:
- swapctl = salt.utils.path.which('swapctl')
- swap_data = __salt__['cmd.run']('{0} -sk'.format(swapctl))
- if swap_data == 'no swap devices configured':
+ if osdata["kernel"] in ["OpenBSD", "NetBSD"]:
+ swapctl = salt.utils.path.which("swapctl")
+ swap_data = __salt__["cmd.run"]("{0} -sk".format(swapctl))
+ if swap_data == "no swap devices configured":
swap_total = 0
else:
- swap_total = swap_data.split(' ')[1]
+ swap_total = swap_data.split(" ")[1]
else:
- swap_total = __salt__['cmd.run']('{0} -n vm.swap_total'.format(sysctl))
- grains['swap_total'] = int(swap_total) // 1024 // 1024
+ swap_total = __salt__["cmd.run"]("{0} -n vm.swap_total".format(sysctl))
+ grains["swap_total"] = int(swap_total) // 1024 // 1024
return grains
def _sunos_memdata():
- '''
+ """
Return the memory information for SunOS-like systems
- '''
- grains = {'mem_total': 0, 'swap_total': 0}
+ """
+ grains = {"mem_total": 0, "swap_total": 0}
- prtconf = '/usr/sbin/prtconf 2>/dev/null'
- for line in __salt__['cmd.run'](prtconf, python_shell=True).splitlines():
- comps = line.split(' ')
- if comps[0].strip() == 'Memory' and comps[1].strip() == 'size:':
- grains['mem_total'] = int(comps[2].strip())
+ prtconf = "/usr/sbin/prtconf 2>/dev/null"
+ for line in __salt__["cmd.run"](prtconf, python_shell=True).splitlines():
+ comps = line.split(" ")
+ if comps[0].strip() == "Memory" and comps[1].strip() == "size:":
+ grains["mem_total"] = int(comps[2].strip())
- swap_cmd = salt.utils.path.which('swap')
- swap_data = __salt__['cmd.run']('{0} -s'.format(swap_cmd)).split()
+ swap_cmd = salt.utils.path.which("swap")
+ swap_data = __salt__["cmd.run"]("{0} -s".format(swap_cmd)).split()
try:
swap_avail = int(swap_data[-2][:-1])
swap_used = int(swap_data[-4][:-1])
swap_total = (swap_avail + swap_used) // 1024
except ValueError:
swap_total = None
- grains['swap_total'] = swap_total
+ grains["swap_total"] = swap_total
return grains
def _aix_memdata():
- '''
+ """
Return the memory information for AIX systems
- '''
- grains = {'mem_total': 0, 'swap_total': 0}
- prtconf = salt.utils.path.which('prtconf')
+ """
+ grains = {"mem_total": 0, "swap_total": 0}
+ prtconf = salt.utils.path.which("prtconf")
if prtconf:
- for line in __salt__['cmd.run'](prtconf, python_shell=True).splitlines():
- comps = [x for x in line.strip().split(' ') if x]
- if len(comps) > 2 and 'Memory' in comps[0] and 'Size' in comps[1]:
- grains['mem_total'] = int(comps[2])
+ for line in __salt__["cmd.run"](prtconf, python_shell=True).splitlines():
+ comps = [x for x in line.strip().split(" ") if x]
+ if len(comps) > 2 and "Memory" in comps[0] and "Size" in comps[1]:
+ grains["mem_total"] = int(comps[2])
break
else:
- log.error('The \'prtconf\' binary was not found in $PATH.')
+ log.error("The 'prtconf' binary was not found in $PATH.")
- swap_cmd = salt.utils.path.which('swap')
+ swap_cmd = salt.utils.path.which("swap")
if swap_cmd:
- swap_data = __salt__['cmd.run']('{0} -s'.format(swap_cmd)).split()
+ swap_data = __salt__["cmd.run"]("{0} -s".format(swap_cmd)).split()
try:
swap_total = (int(swap_data[-2]) + int(swap_data[-6])) * 4
except ValueError:
swap_total = None
- grains['swap_total'] = swap_total
+ grains["swap_total"] = swap_total
else:
- log.error('The \'swap\' binary was not found in $PATH.')
+ log.error("The 'swap' binary was not found in $PATH.")
return grains
def _windows_memdata():
- '''
+ """
Return the memory information for Windows systems
- '''
- grains = {'mem_total': 0}
+ """
+ grains = {"mem_total": 0}
# get the Total Physical memory as reported by msinfo32
- tot_bytes = win32api.GlobalMemoryStatusEx()['TotalPhys']
+ tot_bytes = win32api.GlobalMemoryStatusEx()["TotalPhys"]
# return memory info in gigabytes
- grains['mem_total'] = int(tot_bytes / (1024 ** 2))
+ grains["mem_total"] = int(tot_bytes / (1024 ** 2))
return grains
def _memdata(osdata):
- '''
+ """
Gather information about the system memory
- '''
+ """
# Provides:
# mem_total
# swap_total, for supported systems.
- grains = {'mem_total': 0}
- if osdata['kernel'] == 'Linux':
+ grains = {"mem_total": 0}
+ if osdata["kernel"] == "Linux":
grains.update(_linux_memdata())
- elif osdata['kernel'] in ('FreeBSD', 'OpenBSD', 'NetBSD'):
+ elif osdata["kernel"] in ("FreeBSD", "OpenBSD", "NetBSD"):
grains.update(_bsd_memdata(osdata))
- elif osdata['kernel'] == 'Darwin':
+ elif osdata["kernel"] == "Darwin":
grains.update(_osx_memdata())
- elif osdata['kernel'] == 'SunOS':
+ elif osdata["kernel"] == "SunOS":
grains.update(_sunos_memdata())
- elif osdata['kernel'] == 'AIX':
+ elif osdata["kernel"] == "AIX":
grains.update(_aix_memdata())
- elif osdata['kernel'] == 'Windows' and HAS_WMI:
+ elif osdata["kernel"] == "Windows" and HAS_WMI:
grains.update(_windows_memdata())
return grains
def _aix_get_machine_id():
- '''
+ """
Parse the output of lsattr -El sys0 for os_uuid
- '''
+ """
grains = {}
- cmd = salt.utils.path.which('lsattr')
+ cmd = salt.utils.path.which("lsattr")
if cmd:
- data = __salt__['cmd.run']('{0} -El sys0'.format(cmd)) + os.linesep
- uuid_regexes = [re.compile(r'(?im)^\s*os_uuid\s+(\S+)\s+(.*)')]
+ data = __salt__["cmd.run"]("{0} -El sys0".format(cmd)) + os.linesep
+ uuid_regexes = [re.compile(r"(?im)^\s*os_uuid\s+(\S+)\s+(.*)")]
for regex in uuid_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
- grains['machine_id'] = res.group(1).strip()
+ grains["machine_id"] = res.group(1).strip()
break
else:
- log.error('The \'lsattr\' binary was not found in $PATH.')
+ log.error("The 'lsattr' binary was not found in $PATH.")
return grains
def _windows_virtual(osdata):
- '''
+ """
Returns what type of virtual hardware is under the hood, kvm or physical
- '''
+ """
# Provides:
# virtual
# virtual_subtype
grains = dict()
- if osdata['kernel'] != 'Windows':
+ if osdata["kernel"] != "Windows":
return grains
- grains['virtual'] = osdata.get('virtual', 'physical')
+ grains["virtual"] = osdata.get("virtual", "physical")
# It is possible that the 'manufacturer' and/or 'productname' grains
# exist but have a value of None.
- manufacturer = osdata.get('manufacturer', '')
+ manufacturer = osdata.get("manufacturer", "")
if manufacturer is None:
- manufacturer = ''
- productname = osdata.get('productname', '')
+ manufacturer = ""
+ productname = osdata.get("productname", "")
if productname is None:
- productname = ''
+ productname = ""
- if 'QEMU' in manufacturer:
+ if "QEMU" in manufacturer:
# FIXME: Make this detect between kvm or qemu
- grains['virtual'] = 'kvm'
- if 'Bochs' in manufacturer:
- grains['virtual'] = 'kvm'
+ grains["virtual"] = "kvm"
+ if "Bochs" in manufacturer:
+ grains["virtual"] = "kvm"
# Product Name: (oVirt) www.ovirt.org
# Red Hat Community virtualization Project based on kvm
- elif 'oVirt' in productname:
- grains['virtual'] = 'kvm'
- grains['virtual_subtype'] = 'oVirt'
+ elif "oVirt" in productname:
+ grains["virtual"] = "kvm"
+ grains["virtual_subtype"] = "oVirt"
# Red Hat Enterprise Virtualization
- elif 'RHEV Hypervisor' in productname:
- grains['virtual'] = 'kvm'
- grains['virtual_subtype'] = 'rhev'
+ elif "RHEV Hypervisor" in productname:
+ grains["virtual"] = "kvm"
+ grains["virtual_subtype"] = "rhev"
# Product Name: VirtualBox
- elif 'VirtualBox' in productname:
- grains['virtual'] = 'VirtualBox'
+ elif "VirtualBox" in productname:
+ grains["virtual"] = "VirtualBox"
# Product Name: VMware Virtual Platform
- elif 'VMware Virtual Platform' in productname:
- grains['virtual'] = 'VMware'
+ elif "VMware Virtual Platform" in productname:
+ grains["virtual"] = "VMware"
# Manufacturer: Microsoft Corporation
# Product Name: Virtual Machine
- elif 'Microsoft' in manufacturer and \
- 'Virtual Machine' in productname:
- grains['virtual'] = 'VirtualPC'
+ elif "Microsoft" in manufacturer and "Virtual Machine" in productname:
+ grains["virtual"] = "VirtualPC"
# Manufacturer: Parallels Software International Inc.
- elif 'Parallels Software' in manufacturer:
- grains['virtual'] = 'Parallels'
+ elif "Parallels Software" in manufacturer:
+ grains["virtual"] = "Parallels"
# Apache CloudStack
- elif 'CloudStack KVM Hypervisor' in productname:
- grains['virtual'] = 'kvm'
- grains['virtual_subtype'] = 'cloudstack'
+ elif "CloudStack KVM Hypervisor" in productname:
+ grains["virtual"] = "kvm"
+ grains["virtual_subtype"] = "cloudstack"
return grains
def _virtual(osdata):
- '''
+ """
Returns what type of virtual hardware is under the hood, kvm or physical
- '''
+ """
# This is going to be a monster, if you are running a vm you can test this
# grain with please submit patches!
# Provides:
# virtual
# virtual_subtype
- grains = {'virtual': osdata.get('virtual', 'physical')}
+ grains = {"virtual": osdata.get("virtual", "physical")}
# Skip the below loop on platforms which have none of the desired cmds
# This is a temporary measure until we can write proper virtual hardware
# detection.
- skip_cmds = ('AIX',)
+ skip_cmds = ("AIX",)
# list of commands to be executed to determine the 'virtual' grain
- _cmds = ['systemd-detect-virt', 'virt-what', 'dmidecode']
+ _cmds = ["systemd-detect-virt", "virt-what", "dmidecode"]
# test first for virt-what, which covers most of the desired functionality
# on most platforms
- if not salt.utils.platform.is_windows() and osdata['kernel'] not in skip_cmds:
- if salt.utils.path.which('virt-what'):
- _cmds = ['virt-what']
+ if not salt.utils.platform.is_windows() and osdata["kernel"] not in skip_cmds:
+ if salt.utils.path.which("virt-what"):
+ _cmds = ["virt-what"]
# Check if enable_lspci is True or False
- if __opts__.get('enable_lspci', True) is True:
+ if __opts__.get("enable_lspci", True) is True:
# /proc/bus/pci does not exists, lspci will fail
- if os.path.exists('/proc/bus/pci'):
- _cmds += ['lspci']
+ if os.path.exists("/proc/bus/pci"):
+ _cmds += ["lspci"]
# Add additional last resort commands
- if osdata['kernel'] in skip_cmds:
+ if osdata["kernel"] in skip_cmds:
_cmds = ()
# Quick backout for BrandZ (Solaris LX Branded zones)
# Don't waste time trying other commands to detect the virtual grain
- if HAS_UNAME and osdata['kernel'] == 'Linux' and 'BrandZ virtual linux' in os.uname():
- grains['virtual'] = 'zone'
+ if (
+ HAS_UNAME
+ and osdata["kernel"] == "Linux"
+ and "BrandZ virtual linux" in os.uname()
+ ):
+ grains["virtual"] = "zone"
return grains
failed_commands = set()
for command in _cmds:
args = []
- if osdata['kernel'] == 'Darwin':
- command = 'system_profiler'
- args = ['SPDisplaysDataType']
- elif osdata['kernel'] == 'SunOS':
- virtinfo = salt.utils.path.which('virtinfo')
+ if osdata["kernel"] == "Darwin":
+ command = "system_profiler"
+ args = ["SPDisplaysDataType"]
+ elif osdata["kernel"] == "SunOS":
+ virtinfo = salt.utils.path.which("virtinfo")
if virtinfo:
try:
- ret = __salt__['cmd.run_all']('{0} -a'.format(virtinfo))
+ ret = __salt__["cmd.run_all"]("{0} -a".format(virtinfo))
except salt.exceptions.CommandExecutionError:
if salt.log.is_logging_configured():
failed_commands.add(virtinfo)
else:
- if ret['stdout'].endswith('not supported'):
- command = 'prtdiag'
+ if ret["stdout"].endswith("not supported"):
+ command = "prtdiag"
else:
- command = 'virtinfo'
+ command = "virtinfo"
else:
- command = 'prtdiag'
+ command = "prtdiag"
cmd = salt.utils.path.which(command)
if not cmd:
continue
- cmd = '{0} {1}'.format(cmd, ' '.join(args))
+ cmd = "{0} {1}".format(cmd, " ".join(args))
try:
- ret = __salt__['cmd.run_all'](cmd)
+ ret = __salt__["cmd.run_all"](cmd)
- if ret['retcode'] > 0:
+ if ret["retcode"] > 0:
if salt.log.is_logging_configured():
# systemd-detect-virt always returns > 0 on non-virtualized
# systems
# prtdiag only works in the global zone, skip if it fails
- if salt.utils.platform.is_windows() or 'systemd-detect-virt' in cmd or 'prtdiag' in cmd:
+ if (
+ salt.utils.platform.is_windows()
+ or "systemd-detect-virt" in cmd
+ or "prtdiag" in cmd
+ ):
continue
failed_commands.add(command)
continue
@@ -771,329 +829,345 @@ def _virtual(osdata):
failed_commands.add(command)
continue
- output = ret['stdout']
+ output = ret["stdout"]
if command == "system_profiler":
macoutput = output.lower()
- if '0x1ab8' in macoutput:
- grains['virtual'] = 'Parallels'
- if 'parallels' in macoutput:
- grains['virtual'] = 'Parallels'
- if 'vmware' in macoutput:
- grains['virtual'] = 'VMware'
- if '0x15ad' in macoutput:
- grains['virtual'] = 'VMware'
- if 'virtualbox' in macoutput:
- grains['virtual'] = 'VirtualBox'
+ if "0x1ab8" in macoutput:
+ grains["virtual"] = "Parallels"
+ if "parallels" in macoutput:
+ grains["virtual"] = "Parallels"
+ if "vmware" in macoutput:
+ grains["virtual"] = "VMware"
+ if "0x15ad" in macoutput:
+ grains["virtual"] = "VMware"
+ if "virtualbox" in macoutput:
+ grains["virtual"] = "VirtualBox"
# Break out of the loop so the next log message is not issued
break
- elif command == 'systemd-detect-virt':
- if output in ('qemu', 'kvm', 'oracle', 'xen', 'bochs', 'chroot', 'uml', 'systemd-nspawn'):
- grains['virtual'] = output
+ elif command == "systemd-detect-virt":
+ if output in (
+ "qemu",
+ "kvm",
+ "oracle",
+ "xen",
+ "bochs",
+ "chroot",
+ "uml",
+ "systemd-nspawn",
+ ):
+ grains["virtual"] = output
break
- elif 'vmware' in output:
- grains['virtual'] = 'VMware'
+ elif "vmware" in output:
+ grains["virtual"] = "VMware"
break
- elif 'microsoft' in output:
- grains['virtual'] = 'VirtualPC'
+ elif "microsoft" in output:
+ grains["virtual"] = "VirtualPC"
break
- elif 'lxc' in output:
- grains['virtual'] = 'LXC'
+ elif "lxc" in output:
+ grains["virtual"] = "LXC"
break
- elif 'systemd-nspawn' in output:
- grains['virtual'] = 'LXC'
+ elif "systemd-nspawn" in output:
+ grains["virtual"] = "LXC"
break
- elif command == 'virt-what':
+ elif command == "virt-what":
try:
output = output.splitlines()[-1]
except IndexError:
pass
- if output in ('kvm', 'qemu', 'uml', 'xen', 'lxc'):
- grains['virtual'] = output
+ if output in ("kvm", "qemu", "uml", "xen", "lxc"):
+ grains["virtual"] = output
break
- elif 'vmware' in output:
- grains['virtual'] = 'VMware'
+ elif "vmware" in output:
+ grains["virtual"] = "VMware"
break
- elif 'parallels' in output:
- grains['virtual'] = 'Parallels'
+ elif "parallels" in output:
+ grains["virtual"] = "Parallels"
break
- elif 'hyperv' in output:
- grains['virtual'] = 'HyperV'
+ elif "hyperv" in output:
+ grains["virtual"] = "HyperV"
break
- elif command == 'dmidecode':
+ elif command == "dmidecode":
# Product Name: VirtualBox
- if 'Vendor: QEMU' in output:
+ if "Vendor: QEMU" in output:
# FIXME: Make this detect between kvm or qemu
- grains['virtual'] = 'kvm'
- if 'Manufacturer: QEMU' in output:
- grains['virtual'] = 'kvm'
- if 'Vendor: Bochs' in output:
- grains['virtual'] = 'kvm'
- if 'Manufacturer: Bochs' in output:
- grains['virtual'] = 'kvm'
- if 'BHYVE' in output:
- grains['virtual'] = 'bhyve'
+ grains["virtual"] = "kvm"
+ if "Manufacturer: QEMU" in output:
+ grains["virtual"] = "kvm"
+ if "Vendor: Bochs" in output:
+ grains["virtual"] = "kvm"
+ if "Manufacturer: Bochs" in output:
+ grains["virtual"] = "kvm"
+ if "BHYVE" in output:
+ grains["virtual"] = "bhyve"
# Product Name: (oVirt) www.ovirt.org
# Red Hat Community virtualization Project based on kvm
- elif 'Manufacturer: oVirt' in output:
- grains['virtual'] = 'kvm'
- grains['virtual_subtype'] = 'ovirt'
+ elif "Manufacturer: oVirt" in output:
+ grains["virtual"] = "kvm"
+ grains["virtual_subtype"] = "ovirt"
# Red Hat Enterprise Virtualization
- elif 'Product Name: RHEV Hypervisor' in output:
- grains['virtual'] = 'kvm'
- grains['virtual_subtype'] = 'rhev'
- elif 'VirtualBox' in output:
- grains['virtual'] = 'VirtualBox'
+ elif "Product Name: RHEV Hypervisor" in output:
+ grains["virtual"] = "kvm"
+ grains["virtual_subtype"] = "rhev"
+ elif "VirtualBox" in output:
+ grains["virtual"] = "VirtualBox"
# Product Name: VMware Virtual Platform
- elif 'VMware' in output:
- grains['virtual'] = 'VMware'
+ elif "VMware" in output:
+ grains["virtual"] = "VMware"
# Manufacturer: Microsoft Corporation
# Product Name: Virtual Machine
- elif ': Microsoft' in output and 'Virtual Machine' in output:
- grains['virtual'] = 'VirtualPC'
+ elif ": Microsoft" in output and "Virtual Machine" in output:
+ grains["virtual"] = "VirtualPC"
# Manufacturer: Parallels Software International Inc.
- elif 'Parallels Software' in output:
- grains['virtual'] = 'Parallels'
- elif 'Manufacturer: Google' in output:
- grains['virtual'] = 'kvm'
+ elif "Parallels Software" in output:
+ grains["virtual"] = "Parallels"
+ elif "Manufacturer: Google" in output:
+ grains["virtual"] = "kvm"
# Proxmox KVM
- elif 'Vendor: SeaBIOS' in output:
- grains['virtual'] = 'kvm'
+ elif "Vendor: SeaBIOS" in output:
+ grains["virtual"] = "kvm"
# Break out of the loop, lspci parsing is not necessary
break
- elif command == 'lspci':
+ elif command == "lspci":
# dmidecode not available or the user does not have the necessary
# permissions
model = output.lower()
- if 'vmware' in model:
- grains['virtual'] = 'VMware'
+ if "vmware" in model:
+ grains["virtual"] = "VMware"
# 00:04.0 System peripheral: InnoTek Systemberatung GmbH
# VirtualBox Guest Service
- elif 'virtualbox' in model:
- grains['virtual'] = 'VirtualBox'
- elif 'qemu' in model:
- grains['virtual'] = 'kvm'
- elif 'virtio' in model:
- grains['virtual'] = 'kvm'
+ elif "virtualbox" in model:
+ grains["virtual"] = "VirtualBox"
+ elif "qemu" in model:
+ grains["virtual"] = "kvm"
+ elif "virtio" in model:
+ grains["virtual"] = "kvm"
# Break out of the loop so the next log message is not issued
break
- elif command == 'prtdiag':
+ elif command == "prtdiag":
model = output.lower().split("\n")[0]
- if 'vmware' in model:
- grains['virtual'] = 'VMware'
- elif 'virtualbox' in model:
- grains['virtual'] = 'VirtualBox'
- elif 'qemu' in model:
- grains['virtual'] = 'kvm'
- elif 'joyent smartdc hvm' in model:
- grains['virtual'] = 'kvm'
+ if "vmware" in model:
+ grains["virtual"] = "VMware"
+ elif "virtualbox" in model:
+ grains["virtual"] = "VirtualBox"
+ elif "qemu" in model:
+ grains["virtual"] = "kvm"
+ elif "joyent smartdc hvm" in model:
+ grains["virtual"] = "kvm"
break
- elif command == 'virtinfo':
- grains['virtual'] = 'LDOM'
+ elif command == "virtinfo":
+ grains["virtual"] = "LDOM"
break
- choices = ('Linux', 'HP-UX')
+ choices = ("Linux", "HP-UX")
isdir = os.path.isdir
- sysctl = salt.utils.path.which('sysctl')
- if osdata['kernel'] in choices:
- if os.path.isdir('/proc'):
+ sysctl = salt.utils.path.which("sysctl")
+ if osdata["kernel"] in choices:
+ if os.path.isdir("/proc"):
try:
- self_root = os.stat('/')
- init_root = os.stat('/proc/1/root/.')
+ self_root = os.stat("/")
+ init_root = os.stat("/proc/1/root/.")
if self_root != init_root:
- grains['virtual_subtype'] = 'chroot'
+ grains["virtual_subtype"] = "chroot"
except (IOError, OSError):
pass
- if isdir('/proc/vz'):
- if os.path.isfile('/proc/vz/version'):
- grains['virtual'] = 'openvzhn'
- elif os.path.isfile('/proc/vz/veinfo'):
- grains['virtual'] = 'openvzve'
+ if isdir("/proc/vz"):
+ if os.path.isfile("/proc/vz/version"):
+ grains["virtual"] = "openvzhn"
+ elif os.path.isfile("/proc/vz/veinfo"):
+ grains["virtual"] = "openvzve"
# a posteriori, it's expected for these to have failed:
- failed_commands.discard('lspci')
- failed_commands.discard('dmidecode')
+ failed_commands.discard("lspci")
+ failed_commands.discard("dmidecode")
# Provide additional detection for OpenVZ
- if os.path.isfile('/proc/self/status'):
- with salt.utils.files.fopen('/proc/self/status') as status_file:
- vz_re = re.compile(r'^envID:\s+(\d+)$')
+ if os.path.isfile("/proc/self/status"):
+ with salt.utils.files.fopen("/proc/self/status") as status_file:
+ vz_re = re.compile(r"^envID:\s+(\d+)$")
for line in status_file:
- vz_match = vz_re.match(line.rstrip('\n'))
+ vz_match = vz_re.match(line.rstrip("\n"))
if vz_match and int(vz_match.groups()[0]) != 0:
- grains['virtual'] = 'openvzve'
+ grains["virtual"] = "openvzve"
elif vz_match and int(vz_match.groups()[0]) == 0:
- grains['virtual'] = 'openvzhn'
- if isdir('/proc/sys/xen') or \
- isdir('/sys/bus/xen') or isdir('/proc/xen'):
- if os.path.isfile('/proc/xen/xsd_kva'):
+ grains["virtual"] = "openvzhn"
+ if isdir("/proc/sys/xen") or isdir("/sys/bus/xen") or isdir("/proc/xen"):
+ if os.path.isfile("/proc/xen/xsd_kva"):
# Tested on CentOS 5.3 / 2.6.18-194.26.1.el5xen
# Tested on CentOS 5.4 / 2.6.18-164.15.1.el5xen
- grains['virtual_subtype'] = 'Xen Dom0'
+ grains["virtual_subtype"] = "Xen Dom0"
else:
- if osdata.get('productname', '') == 'HVM domU':
+ if osdata.get("productname", "") == "HVM domU":
# Requires dmidecode!
- grains['virtual_subtype'] = 'Xen HVM DomU'
- elif os.path.isfile('/proc/xen/capabilities') and \
- os.access('/proc/xen/capabilities', os.R_OK):
- with salt.utils.files.fopen('/proc/xen/capabilities') as fhr:
- if 'control_d' not in fhr.read():
+ grains["virtual_subtype"] = "Xen HVM DomU"
+ elif os.path.isfile("/proc/xen/capabilities") and os.access(
+ "/proc/xen/capabilities", os.R_OK
+ ):
+ with salt.utils.files.fopen("/proc/xen/capabilities") as fhr:
+ if "control_d" not in fhr.read():
# Tested on CentOS 5.5 / 2.6.18-194.3.1.el5xen
- grains['virtual_subtype'] = 'Xen PV DomU'
+ grains["virtual_subtype"] = "Xen PV DomU"
else:
# Shouldn't get to this, but just in case
- grains['virtual_subtype'] = 'Xen Dom0'
+ grains["virtual_subtype"] = "Xen Dom0"
# Tested on Fedora 10 / 2.6.27.30-170.2.82 with xen
# Tested on Fedora 15 / 2.6.41.4-1 without running xen
- elif isdir('/sys/bus/xen'):
- if 'xen:' in __salt__['cmd.run']('dmesg').lower():
- grains['virtual_subtype'] = 'Xen PV DomU'
- elif os.path.isfile('/sys/bus/xen/drivers/xenconsole'):
+ elif isdir("/sys/bus/xen"):
+ if "xen:" in __salt__["cmd.run"]("dmesg").lower():
+ grains["virtual_subtype"] = "Xen PV DomU"
+ elif os.path.isfile("/sys/bus/xen/drivers/xenconsole"):
# An actual DomU will have the xenconsole driver
- grains['virtual_subtype'] = 'Xen PV DomU'
+ grains["virtual_subtype"] = "Xen PV DomU"
# If a Dom0 or DomU was detected, obviously this is xen
- if 'dom' in grains.get('virtual_subtype', '').lower():
- grains['virtual'] = 'xen'
+ if "dom" in grains.get("virtual_subtype", "").lower():
+ grains["virtual"] = "xen"
# Check container type after hypervisors, to avoid variable overwrite on containers running in virtual environment.
- if os.path.isfile('/proc/1/cgroup'):
+ if os.path.isfile("/proc/1/cgroup"):
try:
- with salt.utils.files.fopen('/proc/1/cgroup', 'r') as fhr:
+ with salt.utils.files.fopen("/proc/1/cgroup", "r") as fhr:
fhr_contents = fhr.read()
- if ':/lxc/' in fhr_contents:
- grains['virtual'] = 'container'
- grains['virtual_subtype'] = 'LXC'
- elif ':/kubepods/' in fhr_contents:
- grains['virtual_subtype'] = 'kubernetes'
- elif ':/libpod_parent/' in fhr_contents:
- grains['virtual_subtype'] = 'libpod'
+ if ":/lxc/" in fhr_contents:
+ grains["virtual"] = "container"
+ grains["virtual_subtype"] = "LXC"
+ elif ":/kubepods/" in fhr_contents:
+ grains["virtual_subtype"] = "kubernetes"
+ elif ":/libpod_parent/" in fhr_contents:
+ grains["virtual_subtype"] = "libpod"
else:
- if any(x in fhr_contents
- for x in (':/system.slice/docker', ':/docker/',
- ':/docker-ce/')):
- grains['virtual'] = 'container'
- grains['virtual_subtype'] = 'Docker'
+ if any(
+ x in fhr_contents
+ for x in (":/system.slice/docker", ":/docker/", ":/docker-ce/")
+ ):
+ grains["virtual"] = "container"
+ grains["virtual_subtype"] = "Docker"
except IOError:
pass
- if os.path.isfile('/proc/cpuinfo'):
- with salt.utils.files.fopen('/proc/cpuinfo', 'r') as fhr:
- if 'QEMU Virtual CPU' in fhr.read():
- grains['virtual'] = 'kvm'
- if os.path.isfile('/sys/devices/virtual/dmi/id/product_name'):
+ if os.path.isfile("/proc/cpuinfo"):
+ with salt.utils.files.fopen("/proc/cpuinfo", "r") as fhr:
+ if "QEMU Virtual CPU" in fhr.read():
+ grains["virtual"] = "kvm"
+ if os.path.isfile("/sys/devices/virtual/dmi/id/product_name"):
try:
- with salt.utils.files.fopen('/sys/devices/virtual/dmi/id/product_name', 'r') as fhr:
- output = salt.utils.stringutils.to_unicode(fhr.read(), errors='replace')
- if 'VirtualBox' in output:
- grains['virtual'] = 'VirtualBox'
- elif 'RHEV Hypervisor' in output:
- grains['virtual'] = 'kvm'
- grains['virtual_subtype'] = 'rhev'
- elif 'oVirt Node' in output:
- grains['virtual'] = 'kvm'
- grains['virtual_subtype'] = 'ovirt'
- elif 'Google' in output:
- grains['virtual'] = 'gce'
- elif 'BHYVE' in output:
- grains['virtual'] = 'bhyve'
+ with salt.utils.files.fopen(
+ "/sys/devices/virtual/dmi/id/product_name", "r"
+ ) as fhr:
+ output = salt.utils.stringutils.to_unicode(
+ fhr.read(), errors="replace"
+ )
+ if "VirtualBox" in output:
+ grains["virtual"] = "VirtualBox"
+ elif "RHEV Hypervisor" in output:
+ grains["virtual"] = "kvm"
+ grains["virtual_subtype"] = "rhev"
+ elif "oVirt Node" in output:
+ grains["virtual"] = "kvm"
+ grains["virtual_subtype"] = "ovirt"
+ elif "Google" in output:
+ grains["virtual"] = "gce"
+ elif "BHYVE" in output:
+ grains["virtual"] = "bhyve"
except IOError:
pass
- elif osdata['kernel'] == 'FreeBSD':
- kenv = salt.utils.path.which('kenv')
+ elif osdata["kernel"] == "FreeBSD":
+ kenv = salt.utils.path.which("kenv")
if kenv:
- product = __salt__['cmd.run'](
- '{0} smbios.system.product'.format(kenv)
- )
- maker = __salt__['cmd.run'](
- '{0} smbios.system.maker'.format(kenv)
- )
- if product.startswith('VMware'):
- grains['virtual'] = 'VMware'
- if product.startswith('VirtualBox'):
- grains['virtual'] = 'VirtualBox'
- if maker.startswith('Xen'):
- grains['virtual_subtype'] = '{0} {1}'.format(maker, product)
- grains['virtual'] = 'xen'
- if maker.startswith('Microsoft') and product.startswith('Virtual'):
- grains['virtual'] = 'VirtualPC'
- if maker.startswith('OpenStack'):
- grains['virtual'] = 'OpenStack'
- if maker.startswith('Bochs'):
- grains['virtual'] = 'kvm'
+ product = __salt__["cmd.run"]("{0} smbios.system.product".format(kenv))
+ maker = __salt__["cmd.run"]("{0} smbios.system.maker".format(kenv))
+ if product.startswith("VMware"):
+ grains["virtual"] = "VMware"
+ if product.startswith("VirtualBox"):
+ grains["virtual"] = "VirtualBox"
+ if maker.startswith("Xen"):
+ grains["virtual_subtype"] = "{0} {1}".format(maker, product)
+ grains["virtual"] = "xen"
+ if maker.startswith("Microsoft") and product.startswith("Virtual"):
+ grains["virtual"] = "VirtualPC"
+ if maker.startswith("OpenStack"):
+ grains["virtual"] = "OpenStack"
+ if maker.startswith("Bochs"):
+ grains["virtual"] = "kvm"
if sysctl:
- hv_vendor = __salt__['cmd.run']('{0} -n hw.hv_vendor'.format(sysctl))
- model = __salt__['cmd.run']('{0} -n hw.model'.format(sysctl))
- jail = __salt__['cmd.run'](
- '{0} -n security.jail.jailed'.format(sysctl)
- )
- if 'bhyve' in hv_vendor:
- grains['virtual'] = 'bhyve'
- elif 'QEMU Virtual CPU' in model:
- grains['virtual'] = 'kvm'
- if jail == '1':
- grains['virtual_subtype'] = 'jail'
- elif osdata['kernel'] == 'OpenBSD':
- if 'manufacturer' in osdata:
- if osdata['manufacturer'] in ['QEMU', 'Red Hat', 'Joyent']:
- grains['virtual'] = 'kvm'
- if osdata['manufacturer'] == 'OpenBSD':
- grains['virtual'] = 'vmm'
- elif osdata['kernel'] == 'SunOS':
- if grains['virtual'] == 'LDOM':
+ hv_vendor = __salt__["cmd.run"]("{0} -n hw.hv_vendor".format(sysctl))
+ model = __salt__["cmd.run"]("{0} -n hw.model".format(sysctl))
+ jail = __salt__["cmd.run"]("{0} -n security.jail.jailed".format(sysctl))
+ if "bhyve" in hv_vendor:
+ grains["virtual"] = "bhyve"
+ elif "QEMU Virtual CPU" in model:
+ grains["virtual"] = "kvm"
+ if jail == "1":
+ grains["virtual_subtype"] = "jail"
+ elif osdata["kernel"] == "OpenBSD":
+ if "manufacturer" in osdata:
+ if osdata["manufacturer"] in ["QEMU", "Red Hat", "Joyent"]:
+ grains["virtual"] = "kvm"
+ if osdata["manufacturer"] == "OpenBSD":
+ grains["virtual"] = "vmm"
+ elif osdata["kernel"] == "SunOS":
+ if grains["virtual"] == "LDOM":
roles = []
- for role in ('control', 'io', 'root', 'service'):
- subtype_cmd = '{0} -c current get -H -o value {1}-role'.format(cmd, role)
- ret = __salt__['cmd.run_all']('{0}'.format(subtype_cmd))
- if ret['stdout'] == 'true':
+ for role in ("control", "io", "root", "service"):
+ subtype_cmd = "{0} -c current get -H -o value {1}-role".format(
+ cmd, role
+ )
+ ret = __salt__["cmd.run_all"]("{0}".format(subtype_cmd))
+ if ret["stdout"] == "true":
roles.append(role)
if roles:
- grains['virtual_subtype'] = roles
+ grains["virtual_subtype"] = roles
else:
# Check if it's a "regular" zone. (i.e. Solaris 10/11 zone)
- zonename = salt.utils.path.which('zonename')
+ zonename = salt.utils.path.which("zonename")
if zonename:
- zone = __salt__['cmd.run']('{0}'.format(zonename))
- if zone != 'global':
- grains['virtual'] = 'zone'
+ zone = __salt__["cmd.run"]("{0}".format(zonename))
+ if zone != "global":
+ grains["virtual"] = "zone"
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
- if isdir('/.SUNWnative'):
- grains['virtual'] = 'zone'
- elif osdata['kernel'] == 'NetBSD':
+ if isdir("/.SUNWnative"):
+ grains["virtual"] = "zone"
+ elif osdata["kernel"] == "NetBSD":
if sysctl:
- if 'QEMU Virtual CPU' in __salt__['cmd.run'](
- '{0} -n machdep.cpu_brand'.format(sysctl)):
- grains['virtual'] = 'kvm'
- elif 'invalid' not in __salt__['cmd.run'](
- '{0} -n machdep.xen.suspend'.format(sysctl)):
- grains['virtual'] = 'Xen PV DomU'
- elif 'VMware' in __salt__['cmd.run'](
- '{0} -n machdep.dmi.system-vendor'.format(sysctl)):
- grains['virtual'] = 'VMware'
+ if "QEMU Virtual CPU" in __salt__["cmd.run"](
+ "{0} -n machdep.cpu_brand".format(sysctl)
+ ):
+ grains["virtual"] = "kvm"
+ elif "invalid" not in __salt__["cmd.run"](
+ "{0} -n machdep.xen.suspend".format(sysctl)
+ ):
+ grains["virtual"] = "Xen PV DomU"
+ elif "VMware" in __salt__["cmd.run"](
+ "{0} -n machdep.dmi.system-vendor".format(sysctl)
+ ):
+ grains["virtual"] = "VMware"
# NetBSD has Xen dom0 support
- elif __salt__['cmd.run'](
- '{0} -n machdep.idle-mechanism'.format(sysctl)) == 'xen':
- if os.path.isfile('/var/run/xenconsoled.pid'):
- grains['virtual_subtype'] = 'Xen Dom0'
+ elif (
+ __salt__["cmd.run"]("{0} -n machdep.idle-mechanism".format(sysctl))
+ == "xen"
+ ):
+ if os.path.isfile("/var/run/xenconsoled.pid"):
+ grains["virtual_subtype"] = "Xen Dom0"
# If we have a virtual_subtype, we're virtual, but maybe we couldn't
# figure out what specific virtual type we were?
- if grains.get('virtual_subtype') and grains['virtual'] == 'physical':
- grains['virtual'] = 'virtual'
+ if grains.get("virtual_subtype") and grains["virtual"] == "physical":
+ grains["virtual"] = "virtual"
for command in failed_commands:
log.info(
"Although '%s' was found in path, the current user "
- 'cannot execute it. Grains output might not be '
- 'accurate.', command
+ "cannot execute it. Grains output might not be "
+ "accurate.",
+ command,
)
return grains
def _virtual_hv(osdata):
- '''
+ """
Returns detailed hypervisor information from sysfs
Currently this seems to be used only by Xen
- '''
+ """
grains = {}
# Bail early if we're not running on Xen
try:
- if 'xen' not in osdata['virtual']:
+ if "xen" not in osdata["virtual"]:
return grains
except KeyError:
return grains
@@ -1101,40 +1175,50 @@ def _virtual_hv(osdata):
# Try to get the exact hypervisor version from sysfs
try:
version = {}
- for fn in ('major', 'minor', 'extra'):
- with salt.utils.files.fopen('/sys/hypervisor/version/{}'.format(fn), 'r') as fhr:
+ for fn in ("major", "minor", "extra"):
+ with salt.utils.files.fopen(
+ "/sys/hypervisor/version/{}".format(fn), "r"
+ ) as fhr:
version[fn] = salt.utils.stringutils.to_unicode(fhr.read().strip())
- grains['virtual_hv_version'] = '{}.{}{}'.format(version['major'], version['minor'], version['extra'])
- grains['virtual_hv_version_info'] = [version['major'], version['minor'], version['extra']]
+ grains["virtual_hv_version"] = "{}.{}{}".format(
+ version["major"], version["minor"], version["extra"]
+ )
+ grains["virtual_hv_version_info"] = [
+ version["major"],
+ version["minor"],
+ version["extra"],
+ ]
except (IOError, OSError, KeyError):
pass
# Try to read and decode the supported feature set of the hypervisor
# Based on https://github.com/brendangregg/Misc/blob/master/xen/xen-features.py
# Table data from include/xen/interface/features.h
- xen_feature_table = {0: 'writable_page_tables',
- 1: 'writable_descriptor_tables',
- 2: 'auto_translated_physmap',
- 3: 'supervisor_mode_kernel',
- 4: 'pae_pgdir_above_4gb',
- 5: 'mmu_pt_update_preserve_ad',
- 7: 'gnttab_map_avail_bits',
- 8: 'hvm_callback_vector',
- 9: 'hvm_safe_pvclock',
- 10: 'hvm_pirqs',
- 11: 'dom0',
- 12: 'grant_map_identity',
- 13: 'memory_op_vnode_supported',
- 14: 'ARM_SMCCC_supported'}
+ xen_feature_table = {
+ 0: "writable_page_tables",
+ 1: "writable_descriptor_tables",
+ 2: "auto_translated_physmap",
+ 3: "supervisor_mode_kernel",
+ 4: "pae_pgdir_above_4gb",
+ 5: "mmu_pt_update_preserve_ad",
+ 7: "gnttab_map_avail_bits",
+ 8: "hvm_callback_vector",
+ 9: "hvm_safe_pvclock",
+ 10: "hvm_pirqs",
+ 11: "dom0",
+ 12: "grant_map_identity",
+ 13: "memory_op_vnode_supported",
+ 14: "ARM_SMCCC_supported",
+ }
try:
- with salt.utils.files.fopen('/sys/hypervisor/properties/features', 'r') as fhr:
+ with salt.utils.files.fopen("/sys/hypervisor/properties/features", "r") as fhr:
features = salt.utils.stringutils.to_unicode(fhr.read().strip())
enabled_features = []
for bit, feat in six.iteritems(xen_feature_table):
if int(features, 16) & (1 << bit):
enabled_features.append(feat)
- grains['virtual_hv_features'] = features
- grains['virtual_hv_features_list'] = enabled_features
+ grains["virtual_hv_features"] = features
+ grains["virtual_hv_features_list"] = enabled_features
except (IOError, OSError, KeyError):
pass
@@ -1142,45 +1226,44 @@ def _virtual_hv(osdata):
def _ps(osdata):
- '''
+ """
Return the ps grain
- '''
+ """
grains = {}
- bsd_choices = ('FreeBSD', 'NetBSD', 'OpenBSD', 'MacOS')
- if osdata['os'] in bsd_choices:
- grains['ps'] = 'ps auxwww'
- elif osdata['os_family'] == 'Solaris':
- grains['ps'] = '/usr/ucb/ps auxwww'
- elif osdata['os'] == 'Windows':
- grains['ps'] = 'tasklist.exe'
- elif osdata.get('virtual', '') == 'openvzhn':
- grains['ps'] = (
- 'ps -fH -p $(grep -l \"^envID:[[:space:]]*0\\$\" '
- '/proc/[0-9]*/status | sed -e \"s=/proc/\\([0-9]*\\)/.*=\\1=\") '
- '| awk \'{ $7=\"\"; print }\''
+ bsd_choices = ("FreeBSD", "NetBSD", "OpenBSD", "MacOS")
+ if osdata["os"] in bsd_choices:
+ grains["ps"] = "ps auxwww"
+ elif osdata["os_family"] == "Solaris":
+ grains["ps"] = "/usr/ucb/ps auxwww"
+ elif osdata["os"] == "Windows":
+ grains["ps"] = "tasklist.exe"
+ elif osdata.get("virtual", "") == "openvzhn":
+ grains["ps"] = (
+ 'ps -fH -p $(grep -l "^envID:[[:space:]]*0\\$" '
+ '/proc/[0-9]*/status | sed -e "s=/proc/\\([0-9]*\\)/.*=\\1=") '
+ "| awk '{ $7=\"\"; print }'"
)
- elif osdata['os_family'] == 'AIX':
- grains['ps'] = '/usr/bin/ps auxww'
- elif osdata['os_family'] == 'NILinuxRT':
- grains['ps'] = 'ps -o user,pid,ppid,tty,time,comm'
+ elif osdata["os_family"] == "AIX":
+ grains["ps"] = "/usr/bin/ps auxww"
+ elif osdata["os_family"] == "NILinuxRT":
+ grains["ps"] = "ps -o user,pid,ppid,tty,time,comm"
else:
- grains['ps'] = 'ps -efHww'
+ grains["ps"] = "ps -efHww"
return grains
def _clean_value(key, val):
- '''
+ """
Clean out well-known bogus values.
If it isn't clean (for example has value 'None'), return None.
Otherwise, return the original value.
NOTE: This logic also exists in the smbios module. This function is
for use when not using smbios to retrieve the value.
- '''
- if (val is None or not val or
- re.match('none', val, flags=re.IGNORECASE)):
+ """
+ if val is None or not val or re.match("none", val, flags=re.IGNORECASE):
return None
- elif 'uuid' in key:
+ elif "uuid" in key:
# Try each version (1-5) of RFC4122 to check if it's actually a UUID
for uuidver in range(1, 5):
try:
@@ -1188,69 +1271,83 @@ def _clean_value(key, val):
return val
except ValueError:
continue
- log.trace('HW %s value %s is an invalid UUID', key, val.replace('\n', ' '))
+ log.trace("HW %s value %s is an invalid UUID", key, val.replace("\n", " "))
return None
- elif re.search('serial|part|version', key):
+ elif re.search("serial|part|version", key):
# 'To be filled by O.E.M.
# 'Not applicable' etc.
# 'Not specified' etc.
# 0000000, 1234567 etc.
# begone!
- if (re.match(r'^[0]+$', val) or
- re.match(r'[0]?1234567[8]?[9]?[0]?', val) or
- re.search(r'sernum|part[_-]?number|specified|filled|applicable', val, flags=re.IGNORECASE)):
+ if (
+ re.match(r"^[0]+$", val)
+ or re.match(r"[0]?1234567[8]?[9]?[0]?", val)
+ or re.search(
+ r"sernum|part[_-]?number|specified|filled|applicable",
+ val,
+ flags=re.IGNORECASE,
+ )
+ ):
return None
- elif re.search('asset|manufacturer', key):
+ elif re.search("asset|manufacturer", key):
# AssetTag0. Manufacturer04. Begone.
- if re.search(r'manufacturer|to be filled|available|asset|^no(ne|t)', val, flags=re.IGNORECASE):
+ if re.search(
+ r"manufacturer|to be filled|available|asset|^no(ne|t)",
+ val,
+ flags=re.IGNORECASE,
+ ):
return None
else:
# map unspecified, undefined, unknown & whatever to None
- if (re.search(r'to be filled', val, flags=re.IGNORECASE) or
- re.search(r'un(known|specified)|no(t|ne)? (asset|provided|defined|available|present|specified)',
- val, flags=re.IGNORECASE)):
+ if re.search(r"to be filled", val, flags=re.IGNORECASE) or re.search(
+ r"un(known|specified)|no(t|ne)? (asset|provided|defined|available|present|specified)",
+ val,
+ flags=re.IGNORECASE,
+ ):
return None
return val
def _windows_os_release_grain(caption, product_type):
- '''
+ """
helper function for getting the osrelease grain
:return:
- '''
+ """
# This creates the osrelease grain based on the Windows Operating
# System Product Name. As long as Microsoft maintains a similar format
# this should be future proof
- version = 'Unknown'
- release = ''
- if 'Server' in caption:
- for item in caption.split(' '):
+ version = "Unknown"
+ release = ""
+ if "Server" in caption:
+ for item in caption.split(" "):
# If it's all digits, then it's version
- if re.match(r'\d+', item):
+ if re.match(r"\d+", item):
version = item
# If it starts with R and then numbers, it's the release
# ie: R2
- if re.match(r'^R\d+$', item):
+ if re.match(r"^R\d+$", item):
release = item
- os_release = '{0}Server{1}'.format(version, release)
+ os_release = "{0}Server{1}".format(version, release)
else:
- for item in caption.split(' '):
+ for item in caption.split(" "):
# If it's a number, decimal number, Thin or Vista, then it's the
# version
- if re.match(r'^(\d+(\.\d+)?)|Thin|Vista|XP$', item):
+ if re.match(r"^(\d+(\.\d+)?)|Thin|Vista|XP$", item):
version = item
os_release = version
# If the version is still Unknown, revert back to the old way of getting
# the os_release
# https://github.com/saltstack/salt/issues/52339
- if os_release in ['Unknown']:
+ if os_release in ["Unknown"]:
os_release = platform.release()
- server = {'Vista': '2008Server',
- '7': '2008ServerR2',
- '8': '2012Server',
- '8.1': '2012ServerR2',
- '10': '2016Server'}
+ server = {
+ "Vista": "2008Server",
+ "7": "2008ServerR2",
+ "8": "2012Server",
+ "8.1": "2012ServerR2",
+ "10": "2016Server",
+ }
# Starting with Python 2.7.12 and 3.5.2 the `platform.uname()`
# function started reporting the Desktop version instead of the
@@ -1265,9 +1362,9 @@ def _windows_os_release_grain(caption, product_type):
def _windows_platform_data():
- '''
+ """
Use the platform module for as much as we can.
- '''
+ """
# Provides:
# kernelrelease
# kernelversion
@@ -1302,145 +1399,151 @@ def _windows_platform_data():
timeinfo = wmi_c.Win32_TimeZone()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394072(v=vs.85).aspx
- motherboard = {'product': None,
- 'serial': None}
+ motherboard = {"product": None, "serial": None}
try:
motherboardinfo = wmi_c.Win32_BaseBoard()[0]
- motherboard['product'] = motherboardinfo.Product
- motherboard['serial'] = motherboardinfo.SerialNumber
+ motherboard["product"] = motherboardinfo.Product
+ motherboard["serial"] = motherboardinfo.SerialNumber
except IndexError:
- log.debug('Motherboard info not available on this system')
+ log.debug("Motherboard info not available on this system")
kernel_version = platform.version()
info = salt.utils.win_osinfo.get_os_version_info()
net_info = salt.utils.win_osinfo.get_join_info()
service_pack = None
- if info['ServicePackMajor'] > 0:
- service_pack = ''.join(['SP', six.text_type(info['ServicePackMajor'])])
+ if info["ServicePackMajor"] > 0:
+ service_pack = "".join(["SP", six.text_type(info["ServicePackMajor"])])
- os_release = _windows_os_release_grain(caption=osinfo.Caption,
- product_type=osinfo.ProductType)
+ os_release = _windows_os_release_grain(
+ caption=osinfo.Caption, product_type=osinfo.ProductType
+ )
grains = {
- 'kernelrelease': _clean_value('kernelrelease', osinfo.Version),
- 'kernelversion': _clean_value('kernelversion', kernel_version),
- 'osversion': _clean_value('osversion', osinfo.Version),
- 'osrelease': _clean_value('osrelease', os_release),
- 'osservicepack': _clean_value('osservicepack', service_pack),
- 'osmanufacturer': _clean_value('osmanufacturer', osinfo.Manufacturer),
- 'manufacturer': _clean_value('manufacturer', systeminfo.Manufacturer),
- 'productname': _clean_value('productname', systeminfo.Model),
+ "kernelrelease": _clean_value("kernelrelease", osinfo.Version),
+ "kernelversion": _clean_value("kernelversion", kernel_version),
+ "osversion": _clean_value("osversion", osinfo.Version),
+ "osrelease": _clean_value("osrelease", os_release),
+ "osservicepack": _clean_value("osservicepack", service_pack),
+ "osmanufacturer": _clean_value("osmanufacturer", osinfo.Manufacturer),
+ "manufacturer": _clean_value("manufacturer", systeminfo.Manufacturer),
+ "productname": _clean_value("productname", systeminfo.Model),
# bios name had a bunch of whitespace appended to it in my testing
# 'PhoenixBIOS 4.0 Release 6.0 '
- 'biosversion': _clean_value('biosversion', biosinfo.Name.strip()),
- 'serialnumber': _clean_value('serialnumber', biosinfo.SerialNumber),
- 'osfullname': _clean_value('osfullname', osinfo.Caption),
- 'timezone': _clean_value('timezone', timeinfo.Description),
- 'windowsdomain': _clean_value('windowsdomain', net_info['Domain']),
- 'windowsdomaintype': _clean_value('windowsdomaintype', net_info['DomainType']),
- 'motherboard': {
- 'productname': _clean_value('motherboard.productname', motherboard['product']),
- 'serialnumber': _clean_value('motherboard.serialnumber', motherboard['serial']),
- }
+ "biosversion": _clean_value("biosversion", biosinfo.Name.strip()),
+ "serialnumber": _clean_value("serialnumber", biosinfo.SerialNumber),
+ "osfullname": _clean_value("osfullname", osinfo.Caption),
+ "timezone": _clean_value("timezone", timeinfo.Description),
+ "windowsdomain": _clean_value("windowsdomain", net_info["Domain"]),
+ "windowsdomaintype": _clean_value(
+ "windowsdomaintype", net_info["DomainType"]
+ ),
+ "motherboard": {
+ "productname": _clean_value(
+ "motherboard.productname", motherboard["product"]
+ ),
+ "serialnumber": _clean_value(
+ "motherboard.serialnumber", motherboard["serial"]
+ ),
+ },
}
# test for virtualized environments
# I only had VMware available so the rest are unvalidated
- if 'VRTUAL' in biosinfo.Version: # (not a typo)
- grains['virtual'] = 'HyperV'
- elif 'A M I' in biosinfo.Version:
- grains['virtual'] = 'VirtualPC'
- elif 'VMware' in systeminfo.Model:
- grains['virtual'] = 'VMware'
- elif 'VirtualBox' in systeminfo.Model:
- grains['virtual'] = 'VirtualBox'
- elif 'Xen' in biosinfo.Version:
- grains['virtual'] = 'Xen'
- if 'HVM domU' in systeminfo.Model:
- grains['virtual_subtype'] = 'HVM domU'
- elif 'OpenStack' in systeminfo.Model:
- grains['virtual'] = 'OpenStack'
- elif 'AMAZON' in biosinfo.Version:
- grains['virtual'] = 'EC2'
+ if "VRTUAL" in biosinfo.Version: # (not a typo)
+ grains["virtual"] = "HyperV"
+ elif "A M I" in biosinfo.Version:
+ grains["virtual"] = "VirtualPC"
+ elif "VMware" in systeminfo.Model:
+ grains["virtual"] = "VMware"
+ elif "VirtualBox" in systeminfo.Model:
+ grains["virtual"] = "VirtualBox"
+ elif "Xen" in biosinfo.Version:
+ grains["virtual"] = "Xen"
+ if "HVM domU" in systeminfo.Model:
+ grains["virtual_subtype"] = "HVM domU"
+ elif "OpenStack" in systeminfo.Model:
+ grains["virtual"] = "OpenStack"
+ elif "AMAZON" in biosinfo.Version:
+ grains["virtual"] = "EC2"
return grains
def _osx_platform_data():
- '''
+ """
Additional data for macOS systems
Returns: A dictionary containing values for the following:
- model_name
- boot_rom_version
- smc_version
- system_serialnumber
- '''
- cmd = 'system_profiler SPHardwareDataType'
- hardware = __salt__['cmd.run'](cmd)
+ """
+ cmd = "system_profiler SPHardwareDataType"
+ hardware = __salt__["cmd.run"](cmd)
grains = {}
for line in hardware.splitlines():
- field_name, _, field_val = line.partition(': ')
+ field_name, _, field_val = line.partition(": ")
if field_name.strip() == "Model Name":
- key = 'model_name'
+ key = "model_name"
grains[key] = _clean_value(key, field_val)
if field_name.strip() == "Boot ROM Version":
- key = 'boot_rom_version'
+ key = "boot_rom_version"
grains[key] = _clean_value(key, field_val)
if field_name.strip() == "SMC Version (system)":
- key = 'smc_version'
+ key = "smc_version"
grains[key] = _clean_value(key, field_val)
if field_name.strip() == "Serial Number (system)":
- key = 'system_serialnumber'
+ key = "system_serialnumber"
grains[key] = _clean_value(key, field_val)
return grains
def id_():
- '''
+ """
Return the id
- '''
- return {'id': __opts__.get('id', '')}
+ """
+ return {"id": __opts__.get("id", "")}
-_REPLACE_LINUX_RE = re.compile(r'\W(?:gnu/)?linux', re.IGNORECASE)
+_REPLACE_LINUX_RE = re.compile(r"\W(?:gnu/)?linux", re.IGNORECASE)
# This maps (at most) the first ten characters (no spaces, lowercased) of
# 'osfullname' to the 'os' grain that Salt traditionally uses.
# Please see os_data() and _supported_dists.
# If your system is not detecting properly it likely needs an entry here.
_OS_NAME_MAP = {
- 'redhatente': 'RedHat',
- 'gentoobase': 'Gentoo',
- 'archarm': 'Arch ARM',
- 'arch': 'Arch',
- 'debian': 'Debian',
- 'raspbian': 'Raspbian',
- 'fedoraremi': 'Fedora',
- 'chapeau': 'Chapeau',
- 'korora': 'Korora',
- 'amazonami': 'Amazon',
- 'alt': 'ALT',
- 'enterprise': 'OEL',
- 'oracleserv': 'OEL',
- 'cloudserve': 'CloudLinux',
- 'cloudlinux': 'CloudLinux',
- 'pidora': 'Fedora',
- 'scientific': 'ScientificLinux',
- 'synology': 'Synology',
- 'nilrt': 'NILinuxRT',
- 'poky': 'Poky',
- 'manjaro': 'Manjaro',
- 'manjarolin': 'Manjaro',
- 'univention': 'Univention',
- 'antergos': 'Antergos',
- 'sles': 'SUSE',
- 'void': 'Void',
- 'slesexpand': 'RES',
- 'linuxmint': 'Mint',
- 'neon': 'KDE neon',
+ "redhatente": "RedHat",
+ "gentoobase": "Gentoo",
+ "archarm": "Arch ARM",
+ "arch": "Arch",
+ "debian": "Debian",
+ "raspbian": "Raspbian",
+ "fedoraremi": "Fedora",
+ "chapeau": "Chapeau",
+ "korora": "Korora",
+ "amazonami": "Amazon",
+ "alt": "ALT",
+ "enterprise": "OEL",
+ "oracleserv": "OEL",
+ "cloudserve": "CloudLinux",
+ "cloudlinux": "CloudLinux",
+ "pidora": "Fedora",
+ "scientific": "ScientificLinux",
+ "synology": "Synology",
+ "nilrt": "NILinuxRT",
+ "poky": "Poky",
+ "manjaro": "Manjaro",
+ "manjarolin": "Manjaro",
+ "univention": "Univention",
+ "antergos": "Antergos",
+ "sles": "SUSE",
+ "void": "Void",
+ "slesexpand": "RES",
+ "linuxmint": "Mint",
+ "neon": "KDE neon",
}
# Map the 'os' grain to the 'os_family' grain
@@ -1448,71 +1551,71 @@ _OS_NAME_MAP = {
# post-_OS_NAME_MAP. If your system is having trouble with detection, please
# make sure that the 'os' grain is capitalized and working correctly first.
_OS_FAMILY_MAP = {
- 'Ubuntu': 'Debian',
- 'Fedora': 'RedHat',
- 'Chapeau': 'RedHat',
- 'Korora': 'RedHat',
- 'FedBerry': 'RedHat',
- 'CentOS': 'RedHat',
- 'GoOSe': 'RedHat',
- 'Scientific': 'RedHat',
- 'Amazon': 'RedHat',
- 'CloudLinux': 'RedHat',
- 'OVS': 'RedHat',
- 'OEL': 'RedHat',
- 'XCP': 'RedHat',
- 'XCP-ng': 'RedHat',
- 'XenServer': 'RedHat',
- 'RES': 'RedHat',
- 'Sangoma': 'RedHat',
- 'Mandrake': 'Mandriva',
- 'ESXi': 'VMware',
- 'Mint': 'Debian',
- 'VMwareESX': 'VMware',
- 'Bluewhite64': 'Bluewhite',
- 'Slamd64': 'Slackware',
- 'SLES': 'Suse',
- 'SUSE Enterprise Server': 'Suse',
- 'SUSE Enterprise Server': 'Suse',
- 'SLED': 'Suse',
- 'openSUSE': 'Suse',
- 'SUSE': 'Suse',
- 'openSUSE Leap': 'Suse',
- 'openSUSE Tumbleweed': 'Suse',
- 'SLES_SAP': 'Suse',
- 'Solaris': 'Solaris',
- 'SmartOS': 'Solaris',
- 'OmniOS': 'Solaris',
- 'OpenIndiana Development': 'Solaris',
- 'OpenIndiana': 'Solaris',
- 'OpenSolaris Development': 'Solaris',
- 'OpenSolaris': 'Solaris',
- 'Oracle Solaris': 'Solaris',
- 'Arch ARM': 'Arch',
- 'Manjaro': 'Arch',
- 'Antergos': 'Arch',
- 'ALT': 'RedHat',
- 'Trisquel': 'Debian',
- 'GCEL': 'Debian',
- 'Linaro': 'Debian',
- 'elementary OS': 'Debian',
- 'elementary': 'Debian',
- 'Univention': 'Debian',
- 'ScientificLinux': 'RedHat',
- 'Raspbian': 'Debian',
- 'Devuan': 'Debian',
- 'antiX': 'Debian',
- 'Kali': 'Debian',
- 'neon': 'Debian',
- 'Cumulus': 'Debian',
- 'Deepin': 'Debian',
- 'NILinuxRT': 'NILinuxRT',
- 'KDE neon': 'Debian',
- 'Void': 'Void',
- 'IDMS': 'Debian',
- 'Funtoo': 'Gentoo',
- 'AIX': 'AIX',
- 'TurnKey': 'Debian',
+ "Ubuntu": "Debian",
+ "Fedora": "RedHat",
+ "Chapeau": "RedHat",
+ "Korora": "RedHat",
+ "FedBerry": "RedHat",
+ "CentOS": "RedHat",
+ "GoOSe": "RedHat",
+ "Scientific": "RedHat",
+ "Amazon": "RedHat",
+ "CloudLinux": "RedHat",
+ "OVS": "RedHat",
+ "OEL": "RedHat",
+ "XCP": "RedHat",
+ "XCP-ng": "RedHat",
+ "XenServer": "RedHat",
+ "RES": "RedHat",
+ "Sangoma": "RedHat",
+ "Mandrake": "Mandriva",
+ "ESXi": "VMware",
+ "Mint": "Debian",
+ "VMwareESX": "VMware",
+ "Bluewhite64": "Bluewhite",
+ "Slamd64": "Slackware",
+ "SLES": "Suse",
+ "SUSE Enterprise Server": "Suse",
+ "SUSE Enterprise Server": "Suse",
+ "SLED": "Suse",
+ "openSUSE": "Suse",
+ "SUSE": "Suse",
+ "openSUSE Leap": "Suse",
+ "openSUSE Tumbleweed": "Suse",
+ "SLES_SAP": "Suse",
+ "Solaris": "Solaris",
+ "SmartOS": "Solaris",
+ "OmniOS": "Solaris",
+ "OpenIndiana Development": "Solaris",
+ "OpenIndiana": "Solaris",
+ "OpenSolaris Development": "Solaris",
+ "OpenSolaris": "Solaris",
+ "Oracle Solaris": "Solaris",
+ "Arch ARM": "Arch",
+ "Manjaro": "Arch",
+ "Antergos": "Arch",
+ "ALT": "RedHat",
+ "Trisquel": "Debian",
+ "GCEL": "Debian",
+ "Linaro": "Debian",
+ "elementary OS": "Debian",
+ "elementary": "Debian",
+ "Univention": "Debian",
+ "ScientificLinux": "RedHat",
+ "Raspbian": "Debian",
+ "Devuan": "Debian",
+ "antiX": "Debian",
+ "Kali": "Debian",
+ "neon": "Debian",
+ "Cumulus": "Debian",
+ "Deepin": "Debian",
+ "NILinuxRT": "NILinuxRT",
+ "KDE neon": "Debian",
+ "Void": "Void",
+ "IDMS": "Debian",
+ "Funtoo": "Gentoo",
+ "AIX": "AIX",
+ "TurnKey": "Debian",
}
# Matches any possible format:
@@ -1522,36 +1625,41 @@ _OS_FAMILY_MAP = {
# DISTRIB_RELEASE='10.10'
# DISTRIB_CODENAME='squeeze'
# DISTRIB_DESCRIPTION='Ubuntu 10.10'
-_LSB_REGEX = re.compile((
- '^(DISTRIB_(?:ID|RELEASE|CODENAME|DESCRIPTION))=(?:\'|")?'
- '([\\w\\s\\.\\-_]+)(?:\'|")?'
-))
+_LSB_REGEX = re.compile(
+ (
+ "^(DISTRIB_(?:ID|RELEASE|CODENAME|DESCRIPTION))=(?:'|\")?"
+ "([\\w\\s\\.\\-_]+)(?:'|\")?"
+ )
+)
def _linux_bin_exists(binary):
- '''
+ """
Does a binary exist in linux (depends on which, type, or whereis)
- '''
- for search_cmd in ('which', 'type -ap'):
+ """
+ for search_cmd in ("which", "type -ap"):
try:
- return __salt__['cmd.retcode'](
- '{0} {1}'.format(search_cmd, binary)
- ) == 0
+ return __salt__["cmd.retcode"]("{0} {1}".format(search_cmd, binary)) == 0
except salt.exceptions.CommandExecutionError:
pass
try:
- return len(__salt__['cmd.run_all'](
- 'whereis -b {0}'.format(binary)
- )['stdout'].split()) > 1
+ return (
+ len(
+ __salt__["cmd.run_all"]("whereis -b {0}".format(binary))[
+ "stdout"
+ ].split()
+ )
+ > 1
+ )
except salt.exceptions.CommandExecutionError:
return False
def _get_interfaces():
- '''
+ """
Provide a dict of the connected interfaces and their ip addresses
- '''
+ """
global _INTERFACES
if not _INTERFACES:
@@ -1562,40 +1670,40 @@ def _get_interfaces():
def _parse_lsb_release():
ret = {}
try:
- log.trace('Attempting to parse /etc/lsb-release')
- with salt.utils.files.fopen('/etc/lsb-release') as ifile:
+ log.trace("Attempting to parse /etc/lsb-release")
+ with salt.utils.files.fopen("/etc/lsb-release") as ifile:
for line in ifile:
try:
- key, value = _LSB_REGEX.match(line.rstrip('\n')).groups()[:2]
+ key, value = _LSB_REGEX.match(line.rstrip("\n")).groups()[:2]
except AttributeError:
pass
else:
# Adds lsb_distrib_{id,release,codename,description}
- ret['lsb_{0}'.format(key.lower())] = value.rstrip()
+ ret["lsb_{0}".format(key.lower())] = value.rstrip()
except (IOError, OSError) as exc:
- log.trace('Failed to parse /etc/lsb-release: %s', exc)
+ log.trace("Failed to parse /etc/lsb-release: %s", exc)
return ret
def _parse_os_release(*os_release_files):
- '''
+ """
Parse os-release and return a parameter dictionary
See http://www.freedesktop.org/software/systemd/man/os-release.html
for specification of the file format.
- '''
+ """
ret = {}
for filename in os_release_files:
try:
with salt.utils.files.fopen(filename) as ifile:
- regex = re.compile('^([\\w]+)=(?:\'|")?(.*?)(?:\'|")?$')
+ regex = re.compile("^([\\w]+)=(?:'|\")?(.*?)(?:'|\")?$")
for line in ifile:
match = regex.match(line.strip())
if match:
# Shell special characters ("$", quotes, backslash,
# backtick) are escaped with backslashes
ret[match.group(1)] = re.sub(
- r'\\([$"\'\\`])', r'\1', match.group(2)
+ r'\\([$"\'\\`])', r"\1", match.group(2)
)
break
except (IOError, OSError):
@@ -1605,7 +1713,7 @@ def _parse_os_release(*os_release_files):
def _parse_cpe_name(cpe):
- '''
+ """
Parse CPE_NAME data from the os-release
Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe
@@ -1616,38 +1724,42 @@ def _parse_cpe_name(cpe):
:param cpe:
:return:
- '''
+ """
part = {
- 'o': 'operating system',
- 'h': 'hardware',
- 'a': 'application',
+ "o": "operating system",
+ "h": "hardware",
+ "a": "application",
}
ret = {}
- cpe = (cpe or '').split(':')
- if len(cpe) > 4 and cpe[0] == 'cpe':
- if cpe[1].startswith('/'): # WFN to URI
- ret['vendor'], ret['product'], ret['version'] = cpe[2:5]
- ret['phase'] = cpe[5] if len(cpe) > 5 else None
- ret['part'] = part.get(cpe[1][1:])
- elif len(cpe) == 6 and cpe[1] == '2.3': # WFN to a string
- ret['vendor'], ret['product'], ret['version'] = [x if x != '*' else None for x in cpe[3:6]]
- ret['phase'] = None
- ret['part'] = part.get(cpe[2])
- elif len(cpe) > 7 and len(cpe) <= 13 and cpe[1] == '2.3': # WFN to a string
- ret['vendor'], ret['product'], ret['version'], ret['phase'] = [x if x != '*' else None for x in cpe[3:7]]
- ret['part'] = part.get(cpe[2])
+ cpe = (cpe or "").split(":")
+ if len(cpe) > 4 and cpe[0] == "cpe":
+ if cpe[1].startswith("/"): # WFN to URI
+ ret["vendor"], ret["product"], ret["version"] = cpe[2:5]
+ ret["phase"] = cpe[5] if len(cpe) > 5 else None
+ ret["part"] = part.get(cpe[1][1:])
+ elif len(cpe) == 6 and cpe[1] == "2.3": # WFN to a string
+ ret["vendor"], ret["product"], ret["version"] = [
+ x if x != "*" else None for x in cpe[3:6]
+ ]
+ ret["phase"] = None
+ ret["part"] = part.get(cpe[2])
+ elif len(cpe) > 7 and len(cpe) <= 13 and cpe[1] == "2.3": # WFN to a string
+ ret["vendor"], ret["product"], ret["version"], ret["phase"] = [
+ x if x != "*" else None for x in cpe[3:7]
+ ]
+ ret["part"] = part.get(cpe[2])
return ret
def os_data():
- '''
+ """
Return grains pertaining to the operating system
- '''
+ """
grains = {
- 'num_gpus': 0,
- 'gpus': [],
- }
+ "num_gpus": 0,
+ "gpus": [],
+ }
# Windows Server 2008 64-bit
# ('Windows', 'MINIONNAME', '2008ServerR2', '6.1.7601', 'AMD64',
@@ -1657,79 +1769,83 @@ def os_data():
# '#83-Ubuntu SMP Wed Jan 4 11:26:59 UTC 2012', 'x86_64', '')
# pylint: disable=unpacking-non-sequence
- (grains['kernel'], grains['nodename'],
- grains['kernelrelease'], grains['kernelversion'], grains['cpuarch'], _) = platform.uname()
+ (
+ grains["kernel"],
+ grains["nodename"],
+ grains["kernelrelease"],
+ grains["kernelversion"],
+ grains["cpuarch"],
+ _,
+ ) = platform.uname()
# pylint: enable=unpacking-non-sequence
if salt.utils.platform.is_proxy():
- grains['kernel'] = 'proxy'
- grains['kernelrelease'] = 'proxy'
- grains['kernelversion'] = 'proxy'
- grains['osrelease'] = 'proxy'
- grains['os'] = 'proxy'
- grains['os_family'] = 'proxy'
- grains['osfullname'] = 'proxy'
+ grains["kernel"] = "proxy"
+ grains["kernelrelease"] = "proxy"
+ grains["kernelversion"] = "proxy"
+ grains["osrelease"] = "proxy"
+ grains["os"] = "proxy"
+ grains["os_family"] = "proxy"
+ grains["osfullname"] = "proxy"
elif salt.utils.platform.is_windows():
- grains['os'] = 'Windows'
- grains['os_family'] = 'Windows'
+ grains["os"] = "Windows"
+ grains["os_family"] = "Windows"
grains.update(_memdata(grains))
grains.update(_windows_platform_data())
grains.update(_windows_cpudata())
grains.update(_windows_virtual(grains))
grains.update(_ps(grains))
- if 'Server' in grains['osrelease']:
- osrelease_info = grains['osrelease'].split('Server', 1)
- osrelease_info[1] = osrelease_info[1].lstrip('R')
+ if "Server" in grains["osrelease"]:
+ osrelease_info = grains["osrelease"].split("Server", 1)
+ osrelease_info[1] = osrelease_info[1].lstrip("R")
else:
- osrelease_info = grains['osrelease'].split('.')
+ osrelease_info = grains["osrelease"].split(".")
for idx, value in enumerate(osrelease_info):
if not value.isdigit():
continue
osrelease_info[idx] = int(value)
- grains['osrelease_info'] = tuple(osrelease_info)
+ grains["osrelease_info"] = tuple(osrelease_info)
- grains['osfinger'] = '{os}-{ver}'.format(
- os=grains['os'],
- ver=grains['osrelease'])
+ grains["osfinger"] = "{os}-{ver}".format(
+ os=grains["os"], ver=grains["osrelease"]
+ )
- grains['init'] = 'Windows'
+ grains["init"] = "Windows"
return grains
elif salt.utils.platform.is_linux():
# Add SELinux grain, if you have it
- if _linux_bin_exists('selinuxenabled'):
- log.trace('Adding selinux grains')
- grains['selinux'] = {}
- grains['selinux']['enabled'] = __salt__['cmd.retcode'](
- 'selinuxenabled'
- ) == 0
- if _linux_bin_exists('getenforce'):
- grains['selinux']['enforced'] = __salt__['cmd.run'](
- 'getenforce'
+ if _linux_bin_exists("selinuxenabled"):
+ log.trace("Adding selinux grains")
+ grains["selinux"] = {}
+ grains["selinux"]["enabled"] = (
+ __salt__["cmd.retcode"]("selinuxenabled") == 0
+ )
+ if _linux_bin_exists("getenforce"):
+ grains["selinux"]["enforced"] = __salt__["cmd.run"](
+ "getenforce"
).strip()
# Add systemd grain, if you have it
- if _linux_bin_exists('systemctl') and _linux_bin_exists('localectl'):
- log.trace('Adding systemd grains')
- grains['systemd'] = {}
- systemd_info = __salt__['cmd.run'](
- 'systemctl --version'
- ).splitlines()
- grains['systemd']['version'] = systemd_info[0].split()[1]
- grains['systemd']['features'] = systemd_info[1]
+ if _linux_bin_exists("systemctl") and _linux_bin_exists("localectl"):
+ log.trace("Adding systemd grains")
+ grains["systemd"] = {}
+ systemd_info = __salt__["cmd.run"]("systemctl --version").splitlines()
+ grains["systemd"]["version"] = systemd_info[0].split()[1]
+ grains["systemd"]["features"] = systemd_info[1]
# Add init grain
- grains['init'] = 'unknown'
- log.trace('Adding init grain')
+ grains["init"] = "unknown"
+ log.trace("Adding init grain")
try:
- os.stat('/run/systemd/system')
- grains['init'] = 'systemd'
+ os.stat("/run/systemd/system")
+ grains["init"] = "systemd"
except (OSError, IOError):
try:
- with salt.utils.files.fopen('/proc/1/cmdline') as fhr:
- init_cmdline = fhr.read().replace('\x00', ' ').split()
+ with salt.utils.files.fopen("/proc/1/cmdline") as fhr:
+ init_cmdline = fhr.read().replace("\x00", " ").split()
except (IOError, OSError):
pass
else:
@@ -1738,53 +1854,52 @@ def os_data():
except IndexError:
# Emtpy init_cmdline
init_bin = None
- log.warning('Unable to fetch data from /proc/1/cmdline')
- if init_bin is not None and init_bin.endswith('bin/init'):
- supported_inits = (b'upstart', b'sysvinit', b'systemd')
+ log.warning("Unable to fetch data from /proc/1/cmdline")
+ if init_bin is not None and init_bin.endswith("bin/init"):
+ supported_inits = (b"upstart", b"sysvinit", b"systemd")
edge_len = max(len(x) for x in supported_inits) - 1
try:
- buf_size = __opts__['file_buffer_size']
+ buf_size = __opts__["file_buffer_size"]
except KeyError:
# Default to the value of file_buffer_size for the minion
buf_size = 262144
try:
- with salt.utils.files.fopen(init_bin, 'rb') as fp_:
- edge = b''
+ with salt.utils.files.fopen(init_bin, "rb") as fp_:
+ edge = b""
buf = fp_.read(buf_size).lower()
while buf:
buf = edge + buf
for item in supported_inits:
if item in buf:
if six.PY3:
- item = item.decode('utf-8')
- grains['init'] = item
- buf = b''
+ item = item.decode("utf-8")
+ grains["init"] = item
+ buf = b""
break
edge = buf[-edge_len:]
buf = fp_.read(buf_size).lower()
except (IOError, OSError) as exc:
log.error(
- 'Unable to read from init_bin (%s): %s',
- init_bin, exc
+ "Unable to read from init_bin (%s): %s", init_bin, exc
)
- elif salt.utils.path.which('supervisord') in init_cmdline:
- grains['init'] = 'supervisord'
- elif salt.utils.path.which('dumb-init') in init_cmdline:
+ elif salt.utils.path.which("supervisord") in init_cmdline:
+ grains["init"] = "supervisord"
+ elif salt.utils.path.which("dumb-init") in init_cmdline:
# https://github.com/Yelp/dumb-init
- grains['init'] = 'dumb-init'
- elif salt.utils.path.which('tini') in init_cmdline:
+ grains["init"] = "dumb-init"
+ elif salt.utils.path.which("tini") in init_cmdline:
# https://github.com/krallin/tini
- grains['init'] = 'tini'
- elif init_cmdline == ['runit']:
- grains['init'] = 'runit'
- elif '/sbin/my_init' in init_cmdline:
+ grains["init"] = "tini"
+ elif init_cmdline == ["runit"]:
+ grains["init"] = "runit"
+ elif "/sbin/my_init" in init_cmdline:
# Phusion Base docker container use runit for srv mgmt, but
# my_init as pid1
- grains['init'] = 'runit'
+ grains["init"] = "runit"
else:
log.debug(
- 'Could not determine init system from command line: (%s)',
- ' '.join(init_cmdline)
+ "Could not determine init system from command line: (%s)",
+ " ".join(init_cmdline),
)
# Add lsb grains on any distro with lsb-release. Note that this import
@@ -1792,14 +1907,14 @@ def os_data():
# does not install the python package for the python interpreter used by
# Salt (i.e. python2 or python3)
try:
- log.trace('Getting lsb_release distro information')
+ log.trace("Getting lsb_release distro information")
import lsb_release # pylint: disable=import-error
+
release = lsb_release.get_distro_information()
for key, value in six.iteritems(release):
key = key.lower()
- lsb_param = 'lsb_{0}{1}'.format(
- '' if key.startswith('distrib_') else 'distrib_',
- key
+ lsb_param = "lsb_{0}{1}".format(
+ "" if key.startswith("distrib_") else "distrib_", key
)
grains[lsb_param] = value
# Catch a NameError to workaround possible breakage in lsb_release
@@ -1807,76 +1922,81 @@ def os_data():
except (ImportError, NameError):
# if the python library isn't available, try to parse
# /etc/lsb-release using regex
- log.trace('lsb_release python bindings not available')
+ log.trace("lsb_release python bindings not available")
grains.update(_parse_lsb_release())
- if grains.get('lsb_distrib_description', '').lower().startswith('antergos'):
+ if grains.get("lsb_distrib_description", "").lower().startswith("antergos"):
# Antergos incorrectly configures their /etc/lsb-release,
# setting the DISTRIB_ID to "Arch". This causes the "os" grain
# to be incorrectly set to "Arch".
- grains['osfullname'] = 'Antergos Linux'
- elif 'lsb_distrib_id' not in grains:
- log.trace(
- 'Failed to get lsb_distrib_id, trying to parse os-release'
- )
- os_release = _parse_os_release('/etc/os-release', '/usr/lib/os-release')
+ grains["osfullname"] = "Antergos Linux"
+ elif "lsb_distrib_id" not in grains:
+ log.trace("Failed to get lsb_distrib_id, trying to parse os-release")
+ os_release = _parse_os_release("/etc/os-release", "/usr/lib/os-release")
if os_release:
- if 'NAME' in os_release:
- grains['lsb_distrib_id'] = os_release['NAME'].strip()
- if 'VERSION_ID' in os_release:
- grains['lsb_distrib_release'] = os_release['VERSION_ID']
- if 'VERSION_CODENAME' in os_release:
- grains['lsb_distrib_codename'] = os_release['VERSION_CODENAME']
- elif 'PRETTY_NAME' in os_release:
- codename = os_release['PRETTY_NAME']
+ if "NAME" in os_release:
+ grains["lsb_distrib_id"] = os_release["NAME"].strip()
+ if "VERSION_ID" in os_release:
+ grains["lsb_distrib_release"] = os_release["VERSION_ID"]
+ if "VERSION_CODENAME" in os_release:
+ grains["lsb_distrib_codename"] = os_release["VERSION_CODENAME"]
+ elif "PRETTY_NAME" in os_release:
+ codename = os_release["PRETTY_NAME"]
# https://github.com/saltstack/salt/issues/44108
- if os_release['ID'] == 'debian':
- codename_match = re.search(r'\((\w+)\)$', codename)
+ if os_release["ID"] == "debian":
+ codename_match = re.search(r"\((\w+)\)$", codename)
if codename_match:
codename = codename_match.group(1)
- grains['lsb_distrib_codename'] = codename
- if 'CPE_NAME' in os_release:
- cpe = _parse_cpe_name(os_release['CPE_NAME'])
+ grains["lsb_distrib_codename"] = codename
+ if "CPE_NAME" in os_release:
+ cpe = _parse_cpe_name(os_release["CPE_NAME"])
if not cpe:
- log.error('Broken CPE_NAME format in /etc/os-release!')
- elif cpe.get('vendor', '').lower() in ['suse', 'opensuse']:
- grains['os'] = "SUSE"
+ log.error("Broken CPE_NAME format in /etc/os-release!")
+ elif cpe.get("vendor", "").lower() in ["suse", "opensuse"]:
+ grains["os"] = "SUSE"
# openSUSE `osfullname` grain normalization
if os_release.get("NAME") == "openSUSE Leap":
- grains['osfullname'] = "Leap"
+ grains["osfullname"] = "Leap"
elif os_release.get("VERSION") == "Tumbleweed":
- grains['osfullname'] = os_release["VERSION"]
+ grains["osfullname"] = os_release["VERSION"]
# Override VERSION_ID, if CPE_NAME around
- if cpe.get('version') and cpe.get('vendor') == 'opensuse': # Keep VERSION_ID for SLES
- grains['lsb_distrib_release'] = cpe['version']
+ if (
+ cpe.get("version") and cpe.get("vendor") == "opensuse"
+ ): # Keep VERSION_ID for SLES
+ grains["lsb_distrib_release"] = cpe["version"]
- elif os.path.isfile('/etc/SuSE-release'):
- log.trace('Parsing distrib info from /etc/SuSE-release')
- grains['lsb_distrib_id'] = 'SUSE'
- version = ''
- patch = ''
- with salt.utils.files.fopen('/etc/SuSE-release') as fhr:
+ elif os.path.isfile("/etc/SuSE-release"):
+ log.trace("Parsing distrib info from /etc/SuSE-release")
+ grains["lsb_distrib_id"] = "SUSE"
+ version = ""
+ patch = ""
+ with salt.utils.files.fopen("/etc/SuSE-release") as fhr:
for line in fhr:
- if 'enterprise' in line.lower():
- grains['lsb_distrib_id'] = 'SLES'
- grains['lsb_distrib_codename'] = re.sub(r'\(.+\)', '', line).strip()
- elif 'version' in line.lower():
- version = re.sub(r'[^0-9]', '', line)
- elif 'patchlevel' in line.lower():
- patch = re.sub(r'[^0-9]', '', line)
- grains['lsb_distrib_release'] = version
+ if "enterprise" in line.lower():
+ grains["lsb_distrib_id"] = "SLES"
+ grains["lsb_distrib_codename"] = re.sub(
+ r"\(.+\)", "", line
+ ).strip()
+ elif "version" in line.lower():
+ version = re.sub(r"[^0-9]", "", line)
+ elif "patchlevel" in line.lower():
+ patch = re.sub(r"[^0-9]", "", line)
+ grains["lsb_distrib_release"] = version
if patch:
- grains['lsb_distrib_release'] += '.' + patch
- patchstr = 'SP' + patch
- if grains['lsb_distrib_codename'] and patchstr not in grains['lsb_distrib_codename']:
- grains['lsb_distrib_codename'] += ' ' + patchstr
- if not grains.get('lsb_distrib_codename'):
- grains['lsb_distrib_codename'] = 'n.a'
- elif os.path.isfile('/etc/altlinux-release'):
- log.trace('Parsing distrib info from /etc/altlinux-release')
+ grains["lsb_distrib_release"] += "." + patch
+ patchstr = "SP" + patch
+ if (
+ grains["lsb_distrib_codename"]
+ and patchstr not in grains["lsb_distrib_codename"]
+ ):
+ grains["lsb_distrib_codename"] += " " + patchstr
+ if not grains.get("lsb_distrib_codename"):
+ grains["lsb_distrib_codename"] = "n.a"
+ elif os.path.isfile("/etc/altlinux-release"):
+ log.trace("Parsing distrib info from /etc/altlinux-release")
# ALT Linux
- grains['lsb_distrib_id'] = 'altlinux'
- with salt.utils.files.fopen('/etc/altlinux-release') as ifile:
+ grains["lsb_distrib_id"] = "altlinux"
+ with salt.utils.files.fopen("/etc/altlinux-release") as ifile:
# This file is symlinked to from:
# /etc/fedora-release
# /etc/redhat-release
@@ -1884,221 +2004,236 @@ def os_data():
for line in ifile:
# ALT Linux Sisyphus (unstable)
comps = line.split()
- if comps[0] == 'ALT':
- grains['lsb_distrib_release'] = comps[2]
- grains['lsb_distrib_codename'] = \
- comps[3].replace('(', '').replace(')', '')
- elif os.path.isfile('/etc/centos-release'):
- log.trace('Parsing distrib info from /etc/centos-release')
+ if comps[0] == "ALT":
+ grains["lsb_distrib_release"] = comps[2]
+ grains["lsb_distrib_codename"] = (
+ comps[3].replace("(", "").replace(")", "")
+ )
+ elif os.path.isfile("/etc/centos-release"):
+ log.trace("Parsing distrib info from /etc/centos-release")
# CentOS Linux
- grains['lsb_distrib_id'] = 'CentOS'
- with salt.utils.files.fopen('/etc/centos-release') as ifile:
+ grains["lsb_distrib_id"] = "CentOS"
+ with salt.utils.files.fopen("/etc/centos-release") as ifile:
for line in ifile:
# Need to pull out the version and codename
# in the case of custom content in /etc/centos-release
- find_release = re.compile(r'\d+\.\d+')
- find_codename = re.compile(r'(?<=\()(.*?)(?=\))')
+ find_release = re.compile(r"\d+\.\d+")
+ find_codename = re.compile(r"(?<=\()(.*?)(?=\))")
release = find_release.search(line)
codename = find_codename.search(line)
if release is not None:
- grains['lsb_distrib_release'] = release.group()
+ grains["lsb_distrib_release"] = release.group()
if codename is not None:
- grains['lsb_distrib_codename'] = codename.group()
- elif os.path.isfile('/etc.defaults/VERSION') \
- and os.path.isfile('/etc.defaults/synoinfo.conf'):
- grains['osfullname'] = 'Synology'
+ grains["lsb_distrib_codename"] = codename.group()
+ elif os.path.isfile("/etc.defaults/VERSION") and os.path.isfile(
+ "/etc.defaults/synoinfo.conf"
+ ):
+ grains["osfullname"] = "Synology"
log.trace(
- 'Parsing Synology distrib info from /etc/.defaults/VERSION'
+ "Parsing Synology distrib info from /etc/.defaults/VERSION"
)
- with salt.utils.files.fopen('/etc.defaults/VERSION', 'r') as fp_:
+ with salt.utils.files.fopen("/etc.defaults/VERSION", "r") as fp_:
synoinfo = {}
for line in fp_:
try:
- key, val = line.rstrip('\n').split('=')
+ key, val = line.rstrip("\n").split("=")
except ValueError:
continue
- if key in ('majorversion', 'minorversion',
- 'buildnumber'):
+ if key in ("majorversion", "minorversion", "buildnumber"):
synoinfo[key] = val.strip('"')
if len(synoinfo) != 3:
log.warning(
- 'Unable to determine Synology version info. '
- 'Please report this, as it is likely a bug.'
+ "Unable to determine Synology version info. "
+ "Please report this, as it is likely a bug."
)
else:
- grains['osrelease'] = (
- '{majorversion}.{minorversion}-{buildnumber}'
- .format(**synoinfo)
+ grains[
+ "osrelease"
+ ] = "{majorversion}.{minorversion}-{buildnumber}".format(
+ **synoinfo
)
# Use the already intelligent platform module to get distro info
# (though apparently it's not intelligent enough to strip quotes)
log.trace(
- 'Getting OS name, release, and codename from '
- 'platform.linux_distribution()'
+ "Getting OS name, release, and codename from "
+ "platform.linux_distribution()"
)
- (osname, osrelease, oscodename) = \
- [x.strip('"').strip("'") for x in
- linux_distribution(supported_dists=_supported_dists)]
+ (osname, osrelease, oscodename) = [
+ x.strip('"').strip("'")
+ for x in linux_distribution(supported_dists=_supported_dists)
+ ]
# Try to assign these three names based on the lsb info, they tend to
# be more accurate than what python gets from /etc/DISTRO-release.
# It's worth noting that Ubuntu has patched their Python distribution
# so that linux_distribution() does the /etc/lsb-release parsing, but
# we do it anyway here for the sake for full portability.
- if 'osfullname' not in grains:
+ if "osfullname" not in grains:
# If NI Linux RT distribution, set the grains['osfullname'] to 'nilrt'
- if grains.get('lsb_distrib_id', '').lower().startswith('nilrt'):
- grains['osfullname'] = 'nilrt'
+ if grains.get("lsb_distrib_id", "").lower().startswith("nilrt"):
+ grains["osfullname"] = "nilrt"
else:
- grains['osfullname'] = grains.get('lsb_distrib_id', osname).strip()
- if 'osrelease' not in grains:
+ grains["osfullname"] = grains.get("lsb_distrib_id", osname).strip()
+ if "osrelease" not in grains:
# NOTE: This is a workaround for CentOS 7 os-release bug
# https://bugs.centos.org/view.php?id=8359
# /etc/os-release contains no minor distro release number so we fall back to parse
# /etc/centos-release file instead.
# Commit introducing this comment should be reverted after the upstream bug is released.
- if 'CentOS Linux 7' in grains.get('lsb_distrib_codename', ''):
- grains.pop('lsb_distrib_release', None)
- grains['osrelease'] = grains.get('lsb_distrib_release', osrelease).strip()
- grains['oscodename'] = grains.get('lsb_distrib_codename', '').strip() or oscodename
- if 'Red Hat' in grains['oscodename']:
- grains['oscodename'] = oscodename
- distroname = _REPLACE_LINUX_RE.sub('', grains['osfullname']).strip()
+ if "CentOS Linux 7" in grains.get("lsb_distrib_codename", ""):
+ grains.pop("lsb_distrib_release", None)
+ grains["osrelease"] = grains.get("lsb_distrib_release", osrelease).strip()
+ grains["oscodename"] = (
+ grains.get("lsb_distrib_codename", "").strip() or oscodename
+ )
+ if "Red Hat" in grains["oscodename"]:
+ grains["oscodename"] = oscodename
+ distroname = _REPLACE_LINUX_RE.sub("", grains["osfullname"]).strip()
# return the first ten characters with no spaces, lowercased
- shortname = distroname.replace(' ', '').lower()[:10]
+ shortname = distroname.replace(" ", "").lower()[:10]
# this maps the long names from the /etc/DISTRO-release files to the
# traditional short names that Salt has used.
- if 'os' not in grains:
- grains['os'] = _OS_NAME_MAP.get(shortname, distroname)
+ if "os" not in grains:
+ grains["os"] = _OS_NAME_MAP.get(shortname, distroname)
grains.update(_linux_cpudata())
grains.update(_linux_gpu_data())
- elif grains['kernel'] == 'SunOS':
+ elif grains["kernel"] == "SunOS":
if salt.utils.platform.is_smartos():
# See https://github.com/joyent/smartos-live/issues/224
if HAS_UNAME:
uname_v = os.uname()[3] # format: joyent_20161101T004406Z
else:
uname_v = os.name
- uname_v = uname_v[uname_v.index('_')+1:]
- grains['os'] = grains['osfullname'] = 'SmartOS'
+ uname_v = uname_v[uname_v.index("_") + 1 :]
+ grains["os"] = grains["osfullname"] = "SmartOS"
# store a parsed version of YYYY.MM.DD as osrelease
- grains['osrelease'] = ".".join([
- uname_v.split('T')[0][0:4],
- uname_v.split('T')[0][4:6],
- uname_v.split('T')[0][6:8],
- ])
+ grains["osrelease"] = ".".join(
+ [
+ uname_v.split("T")[0][0:4],
+ uname_v.split("T")[0][4:6],
+ uname_v.split("T")[0][6:8],
+ ]
+ )
# store a untouched copy of the timestamp in osrelease_stamp
- grains['osrelease_stamp'] = uname_v
- elif os.path.isfile('/etc/release'):
- with salt.utils.files.fopen('/etc/release', 'r') as fp_:
+ grains["osrelease_stamp"] = uname_v
+ elif os.path.isfile("/etc/release"):
+ with salt.utils.files.fopen("/etc/release", "r") as fp_:
rel_data = fp_.read()
try:
release_re = re.compile(
- r'((?:Open|Oracle )?Solaris|OpenIndiana|OmniOS) (Development)?'
- r'\s*(\d+\.?\d*|v\d+)\s?[A-Z]*\s?(r\d+|\d+\/\d+|oi_\S+|snv_\S+)?'
+ r"((?:Open|Oracle )?Solaris|OpenIndiana|OmniOS) (Development)?"
+ r"\s*(\d+\.?\d*|v\d+)\s?[A-Z]*\s?(r\d+|\d+\/\d+|oi_\S+|snv_\S+)?"
)
- osname, development, osmajorrelease, osminorrelease = release_re.search(rel_data).groups()
+ (
+ osname,
+ development,
+ osmajorrelease,
+ osminorrelease,
+ ) = release_re.search(rel_data).groups()
except AttributeError:
# Set a blank osrelease grain and fallback to 'Solaris'
# as the 'os' grain.
- grains['os'] = grains['osfullname'] = 'Solaris'
- grains['osrelease'] = ''
+ grains["os"] = grains["osfullname"] = "Solaris"
+ grains["osrelease"] = ""
else:
if development is not None:
- osname = ' '.join((osname, development))
+ osname = " ".join((osname, development))
if HAS_UNAME:
uname_v = os.uname()[3]
else:
uname_v = os.name
- grains['os'] = grains['osfullname'] = osname
- if osname in ['Oracle Solaris'] and uname_v.startswith(osmajorrelease):
+ grains["os"] = grains["osfullname"] = osname
+ if osname in ["Oracle Solaris"] and uname_v.startswith(
+ osmajorrelease
+ ):
# Oracla Solars 11 and up have minor version in uname
- grains['osrelease'] = uname_v
- elif osname in ['OmniOS']:
+ grains["osrelease"] = uname_v
+ elif osname in ["OmniOS"]:
# OmniOS
osrelease = []
osrelease.append(osmajorrelease[1:])
osrelease.append(osminorrelease[1:])
- grains['osrelease'] = ".".join(osrelease)
- grains['osrelease_stamp'] = uname_v
+ grains["osrelease"] = ".".join(osrelease)
+ grains["osrelease_stamp"] = uname_v
else:
# Sun Solaris 10 and earlier/comparable
osrelease = []
osrelease.append(osmajorrelease)
if osminorrelease:
osrelease.append(osminorrelease)
- grains['osrelease'] = ".".join(osrelease)
- grains['osrelease_stamp'] = uname_v
+ grains["osrelease"] = ".".join(osrelease)
+ grains["osrelease_stamp"] = uname_v
grains.update(_sunos_cpudata())
- elif grains['kernel'] == 'VMkernel':
- grains['os'] = 'ESXi'
- elif grains['kernel'] == 'Darwin':
- osrelease = __salt__['cmd.run']('sw_vers -productVersion')
- osname = __salt__['cmd.run']('sw_vers -productName')
- osbuild = __salt__['cmd.run']('sw_vers -buildVersion')
- grains['os'] = 'MacOS'
- grains['os_family'] = 'MacOS'
- grains['osfullname'] = "{0} {1}".format(osname, osrelease)
- grains['osrelease'] = osrelease
- grains['osbuild'] = osbuild
- grains['init'] = 'launchd'
+ elif grains["kernel"] == "VMkernel":
+ grains["os"] = "ESXi"
+ elif grains["kernel"] == "Darwin":
+ osrelease = __salt__["cmd.run"]("sw_vers -productVersion")
+ osname = __salt__["cmd.run"]("sw_vers -productName")
+ osbuild = __salt__["cmd.run"]("sw_vers -buildVersion")
+ grains["os"] = "MacOS"
+ grains["os_family"] = "MacOS"
+ grains["osfullname"] = "{0} {1}".format(osname, osrelease)
+ grains["osrelease"] = osrelease
+ grains["osbuild"] = osbuild
+ grains["init"] = "launchd"
grains.update(_bsd_cpudata(grains))
grains.update(_osx_gpudata())
grains.update(_osx_platform_data())
- elif grains['kernel'] == 'AIX':
- osrelease = __salt__['cmd.run']('oslevel')
- osrelease_techlevel = __salt__['cmd.run']('oslevel -r')
- osname = __salt__['cmd.run']('uname')
- grains['os'] = 'AIX'
- grains['osfullname'] = osname
- grains['osrelease'] = osrelease
- grains['osrelease_techlevel'] = osrelease_techlevel
+ elif grains["kernel"] == "AIX":
+ osrelease = __salt__["cmd.run"]("oslevel")
+ osrelease_techlevel = __salt__["cmd.run"]("oslevel -r")
+ osname = __salt__["cmd.run"]("uname")
+ grains["os"] = "AIX"
+ grains["osfullname"] = osname
+ grains["osrelease"] = osrelease
+ grains["osrelease_techlevel"] = osrelease_techlevel
grains.update(_aix_cpudata())
else:
- grains['os'] = grains['kernel']
- if grains['kernel'] == 'FreeBSD':
- grains['osfullname'] = grains['os']
+ grains["os"] = grains["kernel"]
+ if grains["kernel"] == "FreeBSD":
+ grains["osfullname"] = grains["os"]
try:
- grains['osrelease'] = __salt__['cmd.run']('freebsd-version -u').split('-')[0]
+ grains["osrelease"] = __salt__["cmd.run"]("freebsd-version -u").split("-")[
+ 0
+ ]
except salt.exceptions.CommandExecutionError:
# freebsd-version was introduced in 10.0.
# derive osrelease from kernelversion prior to that
- grains['osrelease'] = grains['kernelrelease'].split('-')[0]
+ grains["osrelease"] = grains["kernelrelease"].split("-")[0]
grains.update(_bsd_cpudata(grains))
- if grains['kernel'] in ('OpenBSD', 'NetBSD'):
+ if grains["kernel"] in ("OpenBSD", "NetBSD"):
grains.update(_bsd_cpudata(grains))
- grains['osrelease'] = grains['kernelrelease'].split('-')[0]
- if grains['kernel'] == 'NetBSD':
+ grains["osrelease"] = grains["kernelrelease"].split("-")[0]
+ if grains["kernel"] == "NetBSD":
grains.update(_netbsd_gpu_data())
- if not grains['os']:
- grains['os'] = 'Unknown {0}'.format(grains['kernel'])
- grains['os_family'] = 'Unknown'
+ if not grains["os"]:
+ grains["os"] = "Unknown {0}".format(grains["kernel"])
+ grains["os_family"] = "Unknown"
else:
# this assigns family names based on the os name
# family defaults to the os name if not found
- grains['os_family'] = _OS_FAMILY_MAP.get(grains['os'],
- grains['os'])
+ grains["os_family"] = _OS_FAMILY_MAP.get(grains["os"], grains["os"])
# Build the osarch grain. This grain will be used for platform-specific
# considerations such as package management. Fall back to the CPU
# architecture.
- if grains.get('os_family') == 'Debian':
- osarch = __salt__['cmd.run']('dpkg --print-architecture').strip()
- elif grains.get('os_family') in ['RedHat', 'Suse']:
+ if grains.get("os_family") == "Debian":
+ osarch = __salt__["cmd.run"]("dpkg --print-architecture").strip()
+ elif grains.get("os_family") in ["RedHat", "Suse"]:
osarch = salt.utils.pkg.rpm.get_osarch()
- elif grains.get('os_family') in ('NILinuxRT', 'Poky'):
+ elif grains.get("os_family") in ("NILinuxRT", "Poky"):
archinfo = {}
- for line in __salt__['cmd.run']('opkg print-architecture').splitlines():
- if line.startswith('arch'):
+ for line in __salt__["cmd.run"]("opkg print-architecture").splitlines():
+ if line.startswith("arch"):
_, arch, priority = line.split()
archinfo[arch.strip()] = int(priority.strip())
# Return osarch in priority order (higher to lower)
osarch = sorted(archinfo, key=archinfo.get, reverse=True)
else:
- osarch = grains['cpuarch']
- grains['osarch'] = osarch
+ osarch = grains["cpuarch"]
+ grains["osarch"] = osarch
grains.update(_memdata(grains))
@@ -2110,68 +2245,78 @@ def os_data():
grains.update(_virtual_hv(grains))
grains.update(_ps(grains))
- if grains.get('osrelease', ''):
- osrelease_info = grains['osrelease'].split('.')
+ if grains.get("osrelease", ""):
+ osrelease_info = grains["osrelease"].split(".")
for idx, value in enumerate(osrelease_info):
if not value.isdigit():
continue
osrelease_info[idx] = int(value)
- grains['osrelease_info'] = tuple(osrelease_info)
+ grains["osrelease_info"] = tuple(osrelease_info)
try:
- grains['osmajorrelease'] = int(grains['osrelease_info'][0])
+ grains["osmajorrelease"] = int(grains["osrelease_info"][0])
except (IndexError, TypeError, ValueError):
log.debug(
- 'Unable to derive osmajorrelease from osrelease_info \'%s\'. '
- 'The osmajorrelease grain will not be set.',
- grains['osrelease_info']
+ "Unable to derive osmajorrelease from osrelease_info '%s'. "
+ "The osmajorrelease grain will not be set.",
+ grains["osrelease_info"],
)
- os_name = grains['os' if grains.get('os') in (
- 'Debian', 'FreeBSD', 'OpenBSD', 'NetBSD', 'Mac', 'Raspbian') else 'osfullname']
- grains['osfinger'] = '{0}-{1}'.format(
- os_name, grains['osrelease'] if os_name in ('Ubuntu',) else grains['osrelease_info'][0])
+ os_name = grains[
+ "os"
+ if grains.get("os")
+ in ("Debian", "FreeBSD", "OpenBSD", "NetBSD", "Mac", "Raspbian")
+ else "osfullname"
+ ]
+ grains["osfinger"] = "{0}-{1}".format(
+ os_name,
+ grains["osrelease"]
+ if os_name in ("Ubuntu",)
+ else grains["osrelease_info"][0],
+ )
return grains
def locale_info():
- '''
+ """
Provides
defaultlanguage
defaultencoding
- '''
+ """
grains = {}
- grains['locale_info'] = {}
+ grains["locale_info"] = {}
if salt.utils.platform.is_proxy():
return grains
try:
(
- grains['locale_info']['defaultlanguage'],
- grains['locale_info']['defaultencoding']
+ grains["locale_info"]["defaultlanguage"],
+ grains["locale_info"]["defaultencoding"],
) = locale.getdefaultlocale()
except Exception: # pylint: disable=broad-except
# locale.getdefaultlocale can ValueError!! Catch anything else it
# might do, per #2205
- grains['locale_info']['defaultlanguage'] = 'unknown'
- grains['locale_info']['defaultencoding'] = 'unknown'
- grains['locale_info']['detectedencoding'] = __salt_system_encoding__
+ grains["locale_info"]["defaultlanguage"] = "unknown"
+ grains["locale_info"]["defaultencoding"] = "unknown"
+ grains["locale_info"]["detectedencoding"] = __salt_system_encoding__
- grains['locale_info']['timezone'] = 'unknown'
+ grains["locale_info"]["timezone"] = "unknown"
if _DATEUTIL_TZ:
try:
- grains['locale_info']['timezone'] = datetime.datetime.now(dateutil.tz.tzlocal()).tzname()
+ grains["locale_info"]["timezone"] = datetime.datetime.now(
+ dateutil.tz.tzlocal()
+ ).tzname()
except UnicodeDecodeError:
# Because the method 'tzname' is not a part of salt the decoding error cant be fixed.
# The error is in datetime in the python2 lib
if salt.utils.platform.is_windows():
- grains['locale_info']['timezone'] = time.tzname[0].decode('mbcs')
+ grains["locale_info"]["timezone"] = time.tzname[0].decode("mbcs")
return grains
def hostname():
- '''
+ """
Return fqdn, hostname, domainname
.. note::
@@ -2179,7 +2324,7 @@ def hostname():
instead of the Windows domain to which the host is joined. It may also
be empty if not a part of any domain. Refer to the ``windowsdomain``
grain instead
- '''
+ """
# This is going to need some work
# Provides:
# fqdn
@@ -2192,7 +2337,7 @@ def hostname():
if salt.utils.platform.is_proxy():
return grains
- grains['localhost'] = socket.gethostname()
+ grains["localhost"] = socket.gethostname()
if __FQDN__ is None:
__FQDN__ = salt.utils.network.get_fqhostname()
@@ -2201,46 +2346,50 @@ def hostname():
# In this case we punt and log a message at error level, but force the
# hostname and domain to be localhost.localdomain
# Otherwise we would stacktrace below
- if __FQDN__ is None: # still!
- log.error('Having trouble getting a hostname. Does this machine have its hostname and domain set properly?')
- __FQDN__ = 'localhost.localdomain'
+ if __FQDN__ is None: # still!
+ log.error(
+ "Having trouble getting a hostname. Does this machine have its hostname and domain set properly?"
+ )
+ __FQDN__ = "localhost.localdomain"
- grains['fqdn'] = __FQDN__
- (grains['host'], grains['domain']) = grains['fqdn'].partition('.')[::2]
+ grains["fqdn"] = __FQDN__
+ (grains["host"], grains["domain"]) = grains["fqdn"].partition(".")[::2]
return grains
def append_domain():
- '''
+ """
Return append_domain if set
- '''
+ """
grain = {}
if salt.utils.platform.is_proxy():
return grain
- if 'append_domain' in __opts__:
- grain['append_domain'] = __opts__['append_domain']
+ if "append_domain" in __opts__:
+ grain["append_domain"] = __opts__["append_domain"]
return grain
def fqdns():
- '''
+ """
Return all known FQDNs for the system by enumerating all interfaces and
then trying to reverse resolve them (excluding 'lo' interface).
- '''
+ """
# Provides:
# fqdns
grains = {}
fqdns = set()
- addresses = salt.utils.network.ip_addrs(include_loopback=False,
- interface_data=_INTERFACES)
- addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False,
- interface_data=_INTERFACES))
- err_message = 'An exception occurred resolving address \'%s\': %s'
+ addresses = salt.utils.network.ip_addrs(
+ include_loopback=False, interface_data=_INTERFACES
+ )
+ addresses.extend(
+ salt.utils.network.ip_addrs6(include_loopback=False, interface_data=_INTERFACES)
+ )
+ err_message = "An exception occurred resolving address '%s': %s"
for ip in addresses:
try:
fqdns.add(socket.getfqdn(socket.gethostbyaddr(ip)[0]))
@@ -2253,25 +2402,25 @@ def fqdns():
except (socket.error, socket.gaierror, socket.timeout) as err:
log.error(err_message, ip, err)
- grains['fqdns'] = sorted(list(fqdns))
+ grains["fqdns"] = sorted(list(fqdns))
return grains
def ip_fqdn():
- '''
+ """
Return ip address and FQDN grains
- '''
+ """
if salt.utils.platform.is_proxy():
return {}
ret = {}
- ret['ipv4'] = salt.utils.network.ip_addrs(include_loopback=True)
- ret['ipv6'] = salt.utils.network.ip_addrs6(include_loopback=True)
+ ret["ipv4"] = salt.utils.network.ip_addrs(include_loopback=True)
+ ret["ipv6"] = salt.utils.network.ip_addrs6(include_loopback=True)
- _fqdn = hostname()['fqdn']
- for socket_type, ipv_num in ((socket.AF_INET, '4'), (socket.AF_INET6, '6')):
- key = 'fqdn_ip' + ipv_num
- if not ret['ipv' + ipv_num]:
+ _fqdn = hostname()["fqdn"]
+ for socket_type, ipv_num in ((socket.AF_INET, "4"), (socket.AF_INET6, "6")):
+ key = "fqdn_ip" + ipv_num
+ if not ret["ipv" + ipv_num]:
ret[key] = []
else:
try:
@@ -2280,12 +2429,15 @@ def ip_fqdn():
ret[key] = list(set(item[4][0] for item in info))
except socket.error:
timediff = datetime.datetime.utcnow() - start_time
- if timediff.seconds > 5 and __opts__['__role'] == 'master':
+ if timediff.seconds > 5 and __opts__["__role"] == "master":
log.warning(
'Unable to find IPv%s record for "%s" causing a %s '
- 'second timeout when rendering grains. Set the dns or '
- '/etc/hosts for IPv%s to clear this.',
- ipv_num, _fqdn, timediff, ipv_num
+ "second timeout when rendering grains. Set the dns or "
+ "/etc/hosts for IPv%s to clear this.",
+ ipv_num,
+ _fqdn,
+ timediff,
+ ipv_num,
)
ret[key] = []
@@ -2293,10 +2445,10 @@ def ip_fqdn():
def ip_interfaces():
- '''
+ """
Provide a dict of the connected interfaces and their ip addresses
The addresses will be passed as a list for each interface
- '''
+ """
# Provides:
# ip_interfaces
@@ -2307,24 +2459,24 @@ def ip_interfaces():
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
- for inet in ifaces[face].get('inet', []):
- if 'address' in inet:
- iface_ips.append(inet['address'])
- for inet in ifaces[face].get('inet6', []):
- if 'address' in inet:
- iface_ips.append(inet['address'])
- for secondary in ifaces[face].get('secondary', []):
- if 'address' in secondary:
- iface_ips.append(secondary['address'])
+ for inet in ifaces[face].get("inet", []):
+ if "address" in inet:
+ iface_ips.append(inet["address"])
+ for inet in ifaces[face].get("inet6", []):
+ if "address" in inet:
+ iface_ips.append(inet["address"])
+ for secondary in ifaces[face].get("secondary", []):
+ if "address" in secondary:
+ iface_ips.append(secondary["address"])
ret[face] = iface_ips
- return {'ip_interfaces': ret}
+ return {"ip_interfaces": ret}
def ip4_interfaces():
- '''
+ """
Provide a dict of the connected interfaces and their ip4 addresses
The addresses will be passed as a list for each interface
- '''
+ """
# Provides:
# ip_interfaces
@@ -2335,21 +2487,21 @@ def ip4_interfaces():
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
- for inet in ifaces[face].get('inet', []):
- if 'address' in inet:
- iface_ips.append(inet['address'])
- for secondary in ifaces[face].get('secondary', []):
- if 'address' in secondary:
- iface_ips.append(secondary['address'])
+ for inet in ifaces[face].get("inet", []):
+ if "address" in inet:
+ iface_ips.append(inet["address"])
+ for secondary in ifaces[face].get("secondary", []):
+ if "address" in secondary:
+ iface_ips.append(secondary["address"])
ret[face] = iface_ips
- return {'ip4_interfaces': ret}
+ return {"ip4_interfaces": ret}
def ip6_interfaces():
- '''
+ """
Provide a dict of the connected interfaces and their ip6 addresses
The addresses will be passed as a list for each interface
- '''
+ """
# Provides:
# ip_interfaces
@@ -2360,159 +2512,166 @@ def ip6_interfaces():
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
- for inet in ifaces[face].get('inet6', []):
- if 'address' in inet:
- iface_ips.append(inet['address'])
- for secondary in ifaces[face].get('secondary', []):
- if 'address' in secondary:
- iface_ips.append(secondary['address'])
+ for inet in ifaces[face].get("inet6", []):
+ if "address" in inet:
+ iface_ips.append(inet["address"])
+ for secondary in ifaces[face].get("secondary", []):
+ if "address" in secondary:
+ iface_ips.append(secondary["address"])
ret[face] = iface_ips
- return {'ip6_interfaces': ret}
+ return {"ip6_interfaces": ret}
def hwaddr_interfaces():
- '''
+ """
Provide a dict of the connected interfaces and their
hw addresses (Mac Address)
- '''
+ """
# Provides:
# hwaddr_interfaces
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
- if 'hwaddr' in ifaces[face]:
- ret[face] = ifaces[face]['hwaddr']
- return {'hwaddr_interfaces': ret}
+ if "hwaddr" in ifaces[face]:
+ ret[face] = ifaces[face]["hwaddr"]
+ return {"hwaddr_interfaces": ret}
def dns():
- '''
+ """
Parse the resolver configuration file
.. versionadded:: 2016.3.0
- '''
+ """
# Provides:
# dns
- if salt.utils.platform.is_windows() or 'proxyminion' in __opts__:
+ if salt.utils.platform.is_windows() or "proxyminion" in __opts__:
return {}
resolv = salt.utils.dns.parse_resolv()
- for key in ('nameservers', 'ip4_nameservers', 'ip6_nameservers',
- 'sortlist'):
+ for key in ("nameservers", "ip4_nameservers", "ip6_nameservers", "sortlist"):
if key in resolv:
resolv[key] = [six.text_type(i) for i in resolv[key]]
- return {'dns': resolv} if resolv else {}
+ return {"dns": resolv} if resolv else {}
def get_machine_id():
- '''
+ """
Provide the machine-id for machine/virtualization combination
- '''
+ """
# Provides:
# machine-id
- if platform.system() == 'AIX':
+ if platform.system() == "AIX":
return _aix_get_machine_id()
- locations = ['/etc/machine-id', '/var/lib/dbus/machine-id']
+ locations = ["/etc/machine-id", "/var/lib/dbus/machine-id"]
existing_locations = [loc for loc in locations if os.path.exists(loc)]
if not existing_locations:
return {}
else:
with salt.utils.files.fopen(existing_locations[0]) as machineid:
- return {'machine_id': machineid.read().strip()}
+ return {"machine_id": machineid.read().strip()}
def cwd():
- '''
+ """
Current working directory
- '''
- return {'cwd': os.getcwd()}
+ """
+ return {"cwd": os.getcwd()}
def path():
- '''
+ """
Return the path
- '''
+ """
# Provides:
# path
- return {'path': os.environ.get('PATH', '').strip()}
+ # systempath
+ _path = salt.utils.stringutils.to_unicode(os.environ.get("PATH", "").strip())
+ return {
+ "path": _path,
+ "systempath": _path.split(os.path.pathsep),
+ }
def pythonversion():
- '''
+ """
Return the Python version
- '''
+ """
# Provides:
# pythonversion
- return {'pythonversion': list(sys.version_info)}
+ return {"pythonversion": list(sys.version_info)}
def pythonpath():
- '''
+ """
Return the Python path
- '''
+ """
# Provides:
# pythonpath
- return {'pythonpath': sys.path}
+ return {"pythonpath": sys.path}
def pythonexecutable():
- '''
+ """
Return the python executable in use
- '''
+ """
# Provides:
# pythonexecutable
- return {'pythonexecutable': sys.executable}
+ return {"pythonexecutable": sys.executable}
def saltpath():
- '''
+ """
Return the path of the salt module
- '''
+ """
# Provides:
# saltpath
salt_path = os.path.abspath(os.path.join(__file__, os.path.pardir))
- return {'saltpath': os.path.dirname(salt_path)}
+ return {"saltpath": os.path.dirname(salt_path)}
def saltversion():
- '''
+ """
Return the version of salt
- '''
+ """
# Provides:
# saltversion
from salt.version import __version__
- return {'saltversion': __version__}
+
+ return {"saltversion": __version__}
def zmqversion():
- '''
+ """
Return the zeromq version
- '''
+ """
# Provides:
# zmqversion
try:
import zmq
- return {'zmqversion': zmq.zmq_version()} # pylint: disable=no-member
+
+ return {"zmqversion": zmq.zmq_version()} # pylint: disable=no-member
except ImportError:
return {}
def saltversioninfo():
- '''
+ """
Return the version_info of salt
.. versionadded:: 0.17.0
- '''
+ """
# Provides:
# saltversioninfo
from salt.version import __version_info__
- return {'saltversioninfo': list(__version_info__)}
+
+ return {"saltversioninfo": list(__version_info__)}
def _hw_data(osdata):
- '''
+ """
Get system specific hardware data from dmidecode
Provides
@@ -2524,232 +2683,244 @@ def _hw_data(osdata):
uuid
.. versionadded:: 0.9.5
- '''
+ """
if salt.utils.platform.is_proxy():
return {}
grains = {}
- if osdata['kernel'] == 'Linux' and os.path.exists('/sys/class/dmi/id'):
+ if osdata["kernel"] == "Linux" and os.path.exists("/sys/class/dmi/id"):
# On many Linux distributions basic firmware information is available via sysfs
# requires CONFIG_DMIID to be enabled in the Linux kernel configuration
sysfs_firmware_info = {
- 'biosversion': 'bios_version',
- 'productname': 'product_name',
- 'manufacturer': 'sys_vendor',
- 'biosreleasedate': 'bios_date',
- 'uuid': 'product_uuid',
- 'serialnumber': 'product_serial'
+ "biosversion": "bios_version",
+ "productname": "product_name",
+ "manufacturer": "sys_vendor",
+ "biosreleasedate": "bios_date",
+ "uuid": "product_uuid",
+ "serialnumber": "product_serial",
}
for key, fw_file in sysfs_firmware_info.items():
- contents_file = os.path.join('/sys/class/dmi/id', fw_file)
+ contents_file = os.path.join("/sys/class/dmi/id", fw_file)
if os.path.exists(contents_file):
try:
- with salt.utils.files.fopen(contents_file, 'r') as ifile:
- grains[key] = salt.utils.stringutils.to_unicode(ifile.read().strip(), errors='replace')
- if key == 'uuid':
- grains['uuid'] = grains['uuid'].lower()
+ with salt.utils.files.fopen(contents_file, "r") as ifile:
+ grains[key] = salt.utils.stringutils.to_unicode(
+ ifile.read().strip(), errors="replace"
+ )
+ if key == "uuid":
+ grains["uuid"] = grains["uuid"].lower()
except (IOError, OSError) as err:
# PermissionError is new to Python 3, but corresponds to the EACESS and
# EPERM error numbers. Use those instead here for PY2 compatibility.
if err.errno == EACCES or err.errno == EPERM:
# Skip the grain if non-root user has no access to the file.
pass
- elif salt.utils.path.which_bin(['dmidecode', 'smbios']) is not None and not (
- salt.utils.platform.is_smartos() or
- ( # SunOS on SPARC - 'smbios: failed to load SMBIOS: System does not export an SMBIOS table'
- osdata['kernel'] == 'SunOS' and
- osdata['cpuarch'].startswith('sparc')
- )):
+ elif salt.utils.path.which_bin(["dmidecode", "smbios"]) is not None and not (
+ salt.utils.platform.is_smartos()
+ or ( # SunOS on SPARC - 'smbios: failed to load SMBIOS: System does not export an SMBIOS table'
+ osdata["kernel"] == "SunOS" and osdata["cpuarch"].startswith("sparc")
+ )
+ ):
# On SmartOS (possibly SunOS also) smbios only works in the global zone
# smbios is also not compatible with linux's smbios (smbios -s = print summarized)
grains = {
- 'biosversion': __salt__['smbios.get']('bios-version'),
- 'productname': __salt__['smbios.get']('system-product-name'),
- 'manufacturer': __salt__['smbios.get']('system-manufacturer'),
- 'biosreleasedate': __salt__['smbios.get']('bios-release-date'),
- 'uuid': __salt__['smbios.get']('system-uuid')
+ "biosversion": __salt__["smbios.get"]("bios-version"),
+ "productname": __salt__["smbios.get"]("system-product-name"),
+ "manufacturer": __salt__["smbios.get"]("system-manufacturer"),
+ "biosreleasedate": __salt__["smbios.get"]("bios-release-date"),
+ "uuid": __salt__["smbios.get"]("system-uuid"),
}
grains = dict([(key, val) for key, val in grains.items() if val is not None])
- uuid = __salt__['smbios.get']('system-uuid')
+ uuid = __salt__["smbios.get"]("system-uuid")
if uuid is not None:
- grains['uuid'] = uuid.lower()
- for serial in ('system-serial-number', 'chassis-serial-number', 'baseboard-serial-number'):
- serial = __salt__['smbios.get'](serial)
+ grains["uuid"] = uuid.lower()
+ for serial in (
+ "system-serial-number",
+ "chassis-serial-number",
+ "baseboard-serial-number",
+ ):
+ serial = __salt__["smbios.get"](serial)
if serial is not None:
- grains['serialnumber'] = serial
+ grains["serialnumber"] = serial
break
- elif salt.utils.path.which_bin(['fw_printenv']) is not None:
+ elif salt.utils.path.which_bin(["fw_printenv"]) is not None:
# ARM Linux devices expose UBOOT env variables via fw_printenv
hwdata = {
- 'manufacturer': 'manufacturer',
- 'serialnumber': 'serial#',
- 'productname': 'DeviceDesc',
+ "manufacturer": "manufacturer",
+ "serialnumber": "serial#",
+ "productname": "DeviceDesc",
}
for grain_name, cmd_key in six.iteritems(hwdata):
- result = __salt__['cmd.run_all']('fw_printenv {0}'.format(cmd_key))
- if result['retcode'] == 0:
- uboot_keyval = result['stdout'].split('=')
+ result = __salt__["cmd.run_all"]("fw_printenv {0}".format(cmd_key))
+ if result["retcode"] == 0:
+ uboot_keyval = result["stdout"].split("=")
grains[grain_name] = _clean_value(grain_name, uboot_keyval[1])
- elif osdata['kernel'] == 'FreeBSD':
+ elif osdata["kernel"] == "FreeBSD":
# On FreeBSD /bin/kenv (already in base system)
# can be used instead of dmidecode
- kenv = salt.utils.path.which('kenv')
+ kenv = salt.utils.path.which("kenv")
if kenv:
# In theory, it will be easier to add new fields to this later
fbsd_hwdata = {
- 'biosversion': 'smbios.bios.version',
- 'manufacturer': 'smbios.system.maker',
- 'serialnumber': 'smbios.system.serial',
- 'productname': 'smbios.system.product',
- 'biosreleasedate': 'smbios.bios.reldate',
- 'uuid': 'smbios.system.uuid',
+ "biosversion": "smbios.bios.version",
+ "manufacturer": "smbios.system.maker",
+ "serialnumber": "smbios.system.serial",
+ "productname": "smbios.system.product",
+ "biosreleasedate": "smbios.bios.reldate",
+ "uuid": "smbios.system.uuid",
}
for key, val in six.iteritems(fbsd_hwdata):
- value = __salt__['cmd.run']('{0} {1}'.format(kenv, val))
+ value = __salt__["cmd.run"]("{0} {1}".format(kenv, val))
grains[key] = _clean_value(key, value)
- elif osdata['kernel'] == 'OpenBSD':
- sysctl = salt.utils.path.which('sysctl')
- hwdata = {'biosversion': 'hw.version',
- 'manufacturer': 'hw.vendor',
- 'productname': 'hw.product',
- 'serialnumber': 'hw.serialno',
- 'uuid': 'hw.uuid'}
+ elif osdata["kernel"] == "OpenBSD":
+ sysctl = salt.utils.path.which("sysctl")
+ hwdata = {
+ "biosversion": "hw.version",
+ "manufacturer": "hw.vendor",
+ "productname": "hw.product",
+ "serialnumber": "hw.serialno",
+ "uuid": "hw.uuid",
+ }
for key, oid in six.iteritems(hwdata):
- value = __salt__['cmd.run']('{0} -n {1}'.format(sysctl, oid))
- if not value.endswith(' value is not available'):
+ value = __salt__["cmd.run"]("{0} -n {1}".format(sysctl, oid))
+ if not value.endswith(" value is not available"):
grains[key] = _clean_value(key, value)
- elif osdata['kernel'] == 'NetBSD':
- sysctl = salt.utils.path.which('sysctl')
+ elif osdata["kernel"] == "NetBSD":
+ sysctl = salt.utils.path.which("sysctl")
nbsd_hwdata = {
- 'biosversion': 'machdep.dmi.board-version',
- 'manufacturer': 'machdep.dmi.system-vendor',
- 'serialnumber': 'machdep.dmi.system-serial',
- 'productname': 'machdep.dmi.system-product',
- 'biosreleasedate': 'machdep.dmi.bios-date',
- 'uuid': 'machdep.dmi.system-uuid',
+ "biosversion": "machdep.dmi.board-version",
+ "manufacturer": "machdep.dmi.system-vendor",
+ "serialnumber": "machdep.dmi.system-serial",
+ "productname": "machdep.dmi.system-product",
+ "biosreleasedate": "machdep.dmi.bios-date",
+ "uuid": "machdep.dmi.system-uuid",
}
for key, oid in six.iteritems(nbsd_hwdata):
- result = __salt__['cmd.run_all']('{0} -n {1}'.format(sysctl, oid))
- if result['retcode'] == 0:
- grains[key] = _clean_value(key, result['stdout'])
- elif osdata['kernel'] == 'Darwin':
- grains['manufacturer'] = 'Apple Inc.'
- sysctl = salt.utils.path.which('sysctl')
- hwdata = {'productname': 'hw.model'}
+ result = __salt__["cmd.run_all"]("{0} -n {1}".format(sysctl, oid))
+ if result["retcode"] == 0:
+ grains[key] = _clean_value(key, result["stdout"])
+ elif osdata["kernel"] == "Darwin":
+ grains["manufacturer"] = "Apple Inc."
+ sysctl = salt.utils.path.which("sysctl")
+ hwdata = {"productname": "hw.model"}
for key, oid in hwdata.items():
- value = __salt__['cmd.run']('{0} -b {1}'.format(sysctl, oid))
- if not value.endswith(' is invalid'):
+ value = __salt__["cmd.run"]("{0} -b {1}".format(sysctl, oid))
+ if not value.endswith(" is invalid"):
grains[key] = _clean_value(key, value)
- elif osdata['kernel'] == 'SunOS' and osdata['cpuarch'].startswith('sparc'):
+ elif osdata["kernel"] == "SunOS" and osdata["cpuarch"].startswith("sparc"):
# Depending on the hardware model, commands can report different bits
# of information. With that said, consolidate the output from various
# commands and attempt various lookups.
data = ""
- for (cmd, args) in (('/usr/sbin/prtdiag', '-v'), ('/usr/sbin/prtconf', '-vp'), ('/usr/sbin/virtinfo', '-a')):
+ for (cmd, args) in (
+ ("/usr/sbin/prtdiag", "-v"),
+ ("/usr/sbin/prtconf", "-vp"),
+ ("/usr/sbin/virtinfo", "-a"),
+ ):
if salt.utils.path.which(cmd): # Also verifies that cmd is executable
- data += __salt__['cmd.run']('{0} {1}'.format(cmd, args))
- data += '\n'
+ data += __salt__["cmd.run"]("{0} {1}".format(cmd, args))
+ data += "\n"
sn_regexes = [
- re.compile(r) for r in [
- r'(?im)^\s*Chassis\s+Serial\s+Number\n-+\n(\S+)', # prtdiag
- r'(?im)^\s*chassis-sn:\s*(\S+)', # prtconf
- r'(?im)^\s*Chassis\s+Serial#:\s*(\S+)', # virtinfo
+ re.compile(r)
+ for r in [
+ r"(?im)^\s*Chassis\s+Serial\s+Number\n-+\n(\S+)", # prtdiag
+ r"(?im)^\s*chassis-sn:\s*(\S+)", # prtconf
+ r"(?im)^\s*Chassis\s+Serial#:\s*(\S+)", # virtinfo
]
]
obp_regexes = [
- re.compile(r) for r in [
- r'(?im)^\s*System\s+PROM\s+revisions.*\nVersion\n-+\nOBP\s+(\S+)\s+(\S+)', # prtdiag
- r'(?im)^\s*version:\s*\'OBP\s+(\S+)\s+(\S+)', # prtconf
+ re.compile(r)
+ for r in [
+ r"(?im)^\s*System\s+PROM\s+revisions.*\nVersion\n-+\nOBP\s+(\S+)\s+(\S+)", # prtdiag
+ r"(?im)^\s*version:\s*\'OBP\s+(\S+)\s+(\S+)", # prtconf
]
]
fw_regexes = [
- re.compile(r) for r in [
- r'(?im)^\s*Sun\s+System\s+Firmware\s+(\S+)\s+(\S+)', # prtdiag
- ]
+ re.compile(r)
+ for r in [r"(?im)^\s*Sun\s+System\s+Firmware\s+(\S+)\s+(\S+)"] # prtdiag
]
uuid_regexes = [
- re.compile(r) for r in [
- r'(?im)^\s*Domain\s+UUID:\s*(\S+)', # virtinfo
- ]
+ re.compile(r) for r in [r"(?im)^\s*Domain\s+UUID:\s*(\S+)"] # virtinfo
]
manufacture_regexes = [
- re.compile(r) for r in [
- r'(?im)^\s*System\s+Configuration:\s*(.*)(?=sun)', # prtdiag
- ]
+ re.compile(r)
+ for r in [r"(?im)^\s*System\s+Configuration:\s*(.*)(?=sun)"] # prtdiag
]
product_regexes = [
- re.compile(r) for r in [
- r'(?im)^\s*System\s+Configuration:\s*.*?sun\d\S+[^\S\r\n]*(.*)', # prtdiag
- r'(?im)^[^\S\r\n]*banner-name:[^\S\r\n]*(.*)', # prtconf
- r'(?im)^[^\S\r\n]*product-name:[^\S\r\n]*(.*)', # prtconf
+ re.compile(r)
+ for r in [
+ r"(?im)^\s*System\s+Configuration:\s*.*?sun\d\S+[^\S\r\n]*(.*)", # prtdiag
+ r"(?im)^[^\S\r\n]*banner-name:[^\S\r\n]*(.*)", # prtconf
+ r"(?im)^[^\S\r\n]*product-name:[^\S\r\n]*(.*)", # prtconf
]
]
sn_regexes = [
- re.compile(r) for r in [
- r'(?im)Chassis\s+Serial\s+Number\n-+\n(\S+)', # prtdiag
- r'(?i)Chassis\s+Serial#:\s*(\S+)', # virtinfo
- r'(?i)chassis-sn:\s*(\S+)', # prtconf
+ re.compile(r)
+ for r in [
+ r"(?im)Chassis\s+Serial\s+Number\n-+\n(\S+)", # prtdiag
+ r"(?i)Chassis\s+Serial#:\s*(\S+)", # virtinfo
+ r"(?i)chassis-sn:\s*(\S+)", # prtconf
]
]
obp_regexes = [
- re.compile(r) for r in [
- r'(?im)System\s+PROM\s+revisions.*\nVersion\n-+\nOBP\s+(\S+)\s+(\S+)', # prtdiag
- r'(?im)version:\s*\'OBP\s+(\S+)\s+(\S+)', # prtconf
+ re.compile(r)
+ for r in [
+ r"(?im)System\s+PROM\s+revisions.*\nVersion\n-+\nOBP\s+(\S+)\s+(\S+)", # prtdiag
+ r"(?im)version:\s*\'OBP\s+(\S+)\s+(\S+)", # prtconf
]
]
fw_regexes = [
- re.compile(r) for r in [
- r'(?i)Sun\s+System\s+Firmware\s+(\S+)\s+(\S+)', # prtdiag
- ]
+ re.compile(r)
+ for r in [r"(?i)Sun\s+System\s+Firmware\s+(\S+)\s+(\S+)"] # prtdiag
]
uuid_regexes = [
- re.compile(r) for r in [
- r'(?i)Domain\s+UUID:\s+(\S+)', # virtinfo
- ]
+ re.compile(r) for r in [r"(?i)Domain\s+UUID:\s+(\S+)"] # virtinfo
]
for regex in sn_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
- grains['serialnumber'] = res.group(1).strip().replace("'", "")
+ grains["serialnumber"] = res.group(1).strip().replace("'", "")
break
for regex in obp_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
- obp_rev, obp_date = res.groups()[0:2] # Limit the number in case we found the data in multiple places
- grains['biosversion'] = obp_rev.strip().replace("'", "")
- grains['biosreleasedate'] = obp_date.strip().replace("'", "")
+ obp_rev, obp_date = res.groups()[
+ 0:2
+ ] # Limit the number in case we found the data in multiple places
+ grains["biosversion"] = obp_rev.strip().replace("'", "")
+ grains["biosreleasedate"] = obp_date.strip().replace("'", "")
for regex in fw_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
fw_rev, fw_date = res.groups()[0:2]
- grains['systemfirmware'] = fw_rev.strip().replace("'", "")
- grains['systemfirmwaredate'] = fw_date.strip().replace("'", "")
+ grains["systemfirmware"] = fw_rev.strip().replace("'", "")
+ grains["systemfirmwaredate"] = fw_date.strip().replace("'", "")
break
for regex in uuid_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
- grains['uuid'] = res.group(1).strip().replace("'", "")
+ grains["uuid"] = res.group(1).strip().replace("'", "")
break
for regex in manufacture_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
- grains['manufacture'] = res.group(1).strip().replace("'", "")
+ grains["manufacture"] = res.group(1).strip().replace("'", "")
break
for regex in product_regexes:
@@ -2757,80 +2928,85 @@ def _hw_data(osdata):
if res and len(res.groups()) >= 1:
t_productname = res.group(1).strip().replace("'", "")
if t_productname:
- grains['product'] = t_productname
- grains['productname'] = t_productname
+ grains["product"] = t_productname
+ grains["productname"] = t_productname
break
- elif osdata['kernel'] == 'AIX':
- cmd = salt.utils.path.which('prtconf')
+ elif osdata["kernel"] == "AIX":
+ cmd = salt.utils.path.which("prtconf")
if cmd:
- data = __salt__['cmd.run']('{0}'.format(cmd)) + os.linesep
- for dest, regstring in (('serialnumber', r'(?im)^\s*Machine\s+Serial\s+Number:\s+(\S+)'),
- ('systemfirmware', r'(?im)^\s*Firmware\s+Version:\s+(.*)')):
+ data = __salt__["cmd.run"]("{0}".format(cmd)) + os.linesep
+ for dest, regstring in (
+ ("serialnumber", r"(?im)^\s*Machine\s+Serial\s+Number:\s+(\S+)"),
+ ("systemfirmware", r"(?im)^\s*Firmware\s+Version:\s+(.*)"),
+ ):
for regex in [re.compile(r) for r in [regstring]]:
res = regex.search(data)
if res and len(res.groups()) >= 1:
- grains[dest] = res.group(1).strip().replace("'", '')
+ grains[dest] = res.group(1).strip().replace("'", "")
- product_regexes = [re.compile(r'(?im)^\s*System\s+Model:\s+(\S+)')]
+ product_regexes = [re.compile(r"(?im)^\s*System\s+Model:\s+(\S+)")]
for regex in product_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
- grains['manufacturer'], grains['productname'] = res.group(1).strip().replace("'", "").split(",")
+ grains["manufacturer"], grains["productname"] = (
+ res.group(1).strip().replace("'", "").split(",")
+ )
break
else:
- log.error('The \'prtconf\' binary was not found in $PATH.')
+ log.error("The 'prtconf' binary was not found in $PATH.")
return grains
def get_server_id():
- '''
+ """
Provides an integer based on the FQDN of a machine.
Useful as server-id in MySQL replication or anywhere else you'll need an ID
like this.
- '''
+ """
# Provides:
# server_id
if salt.utils.platform.is_proxy():
return {}
- id_ = __opts__.get('id', '')
+ id_ = __opts__.get("id", "")
id_hash = None
py_ver = sys.version_info[:2]
if py_ver >= (3, 3):
# Python 3.3 enabled hash randomization, so we need to shell out to get
# a reliable hash.
- id_hash = __salt__['cmd.run'](
- [sys.executable, '-c', 'print(hash("{0}"))'.format(id_)],
- env={'PYTHONHASHSEED': '0'}
+ id_hash = __salt__["cmd.run"](
+ [sys.executable, "-c", 'print(hash("{0}"))'.format(id_)],
+ env={"PYTHONHASHSEED": "0"},
)
try:
id_hash = int(id_hash)
except (TypeError, ValueError):
log.debug(
- 'Failed to hash the ID to get the server_id grain. Result of '
- 'hash command: %s', id_hash
+ "Failed to hash the ID to get the server_id grain. Result of "
+ "hash command: %s",
+ id_hash,
)
id_hash = None
if id_hash is None:
# Python < 3.3 or error encountered above
id_hash = hash(id_)
- return {'server_id': abs(id_hash % (2 ** 31))}
+ return {"server_id": abs(id_hash % (2 ** 31))}
def get_master():
- '''
+ """
Provides the minion with the name of its master.
This is useful in states to target other services running on the master.
- '''
+ """
# Provides:
# master
- return {'master': __opts__.get('master', '')}
+ return {"master": __opts__.get("master", "")}
def default_gateway():
- '''
+ """
Populates grains which describe whether a server has a default gateway
configured or not. Uses `ip -4 route show` and `ip -6 route show` and greps
for a `default` at the beginning of any line. Assuming the standard
@@ -2847,28 +3023,28 @@ def default_gateway():
ip4_gw: True # ip/True/False if default ipv4 gateway
ip6_gw: True # ip/True/False if default ipv6 gateway
ip_gw: True # True if either of the above is True, False otherwise
- '''
+ """
grains = {}
- ip_bin = salt.utils.path.which('ip')
+ ip_bin = salt.utils.path.which("ip")
if not ip_bin:
return {}
- grains['ip_gw'] = False
- grains['ip4_gw'] = False
- grains['ip6_gw'] = False
- for ip_version in ('4', '6'):
+ grains["ip_gw"] = False
+ grains["ip4_gw"] = False
+ grains["ip6_gw"] = False
+ for ip_version in ("4", "6"):
try:
- out = __salt__['cmd.run']([ip_bin, '-' + ip_version, 'route', 'show'])
+ out = __salt__["cmd.run"]([ip_bin, "-" + ip_version, "route", "show"])
for line in out.splitlines():
- if line.startswith('default'):
- grains['ip_gw'] = True
- grains['ip{0}_gw'.format(ip_version)] = True
+ if line.startswith("default"):
+ grains["ip_gw"] = True
+ grains["ip{0}_gw".format(ip_version)] = True
try:
via, gw_ip = line.split()[1:3]
except ValueError:
pass
else:
- if via == 'via':
- grains['ip{0}_gw'.format(ip_version)] = gw_ip
+ if via == "via":
+ grains["ip{0}_gw".format(ip_version)] = gw_ip
break
except Exception: # pylint: disable=broad-except
continue
diff --git a/salt/grains/disks.py b/salt/grains/disks.py
index 38fb7755d85..aa72262348b 100644
--- a/salt/grains/disks.py
+++ b/salt/grains/disks.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
-'''
+"""
Detect disks
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
@@ -9,27 +9,27 @@ import glob
import logging
import re
+# Solve the Chicken and egg problem where grains need to run before any
+# of the modules are loaded and are generally available for any usage.
+import salt.modules.cmdmod
+
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
-# Solve the Chicken and egg problem where grains need to run before any
-# of the modules are loaded and are generally available for any usage.
-import salt.modules.cmdmod
-
__salt__ = {
- 'cmd.run': salt.modules.cmdmod._run_quiet,
- 'cmd.run_all': salt.modules.cmdmod._run_all_quiet
+ "cmd.run": salt.modules.cmdmod._run_quiet,
+ "cmd.run_all": salt.modules.cmdmod._run_all_quiet,
}
log = logging.getLogger(__name__)
def disks():
- '''
+ """
Return list of disk devices
- '''
+ """
if salt.utils.platform.is_freebsd():
return _freebsd_geom()
elif salt.utils.platform.is_linux():
@@ -37,45 +37,45 @@ def disks():
elif salt.utils.platform.is_windows():
return _windows_disks()
else:
- log.trace('Disk grain does not support OS')
+ log.trace("Disk grain does not support OS")
class _geomconsts(object):
- GEOMNAME = 'Geom name'
- MEDIASIZE = 'Mediasize'
- SECTORSIZE = 'Sectorsize'
- STRIPESIZE = 'Stripesize'
- STRIPEOFFSET = 'Stripeoffset'
- DESCR = 'descr' # model
- LUNID = 'lunid'
- LUNNAME = 'lunname'
- IDENT = 'ident' # serial
- ROTATIONRATE = 'rotationrate' # RPM or 0 for non-rotating
+ GEOMNAME = "Geom name"
+ MEDIASIZE = "Mediasize"
+ SECTORSIZE = "Sectorsize"
+ STRIPESIZE = "Stripesize"
+ STRIPEOFFSET = "Stripeoffset"
+ DESCR = "descr" # model
+ LUNID = "lunid"
+ LUNNAME = "lunname"
+ IDENT = "ident" # serial
+ ROTATIONRATE = "rotationrate" # RPM or 0 for non-rotating
# Preserve the API where possible with Salt < 2016.3
_aliases = {
- DESCR: 'device_model',
- IDENT: 'serial_number',
- ROTATIONRATE: 'media_RPM',
- LUNID: 'WWN',
+ DESCR: "device_model",
+ IDENT: "serial_number",
+ ROTATIONRATE: "media_RPM",
+ LUNID: "WWN",
}
_datatypes = {
- MEDIASIZE: ('re_int', r'(\d+)'),
- SECTORSIZE: 'try_int',
- STRIPESIZE: 'try_int',
- STRIPEOFFSET: 'try_int',
- ROTATIONRATE: 'try_int',
+ MEDIASIZE: ("re_int", r"(\d+)"),
+ SECTORSIZE: "try_int",
+ STRIPESIZE: "try_int",
+ STRIPEOFFSET: "try_int",
+ ROTATIONRATE: "try_int",
}
def _datavalue(datatype, data):
- if datatype == 'try_int':
+ if datatype == "try_int":
try:
return int(data)
except ValueError:
return None
- elif datatype is tuple and datatype[0] == 're_int':
+ elif datatype is tuple and datatype[0] == "re_int":
search = re.search(datatype[1], data)
if search:
try:
@@ -87,37 +87,39 @@ def _datavalue(datatype, data):
return data
-_geom_attribs = [_geomconsts.__dict__[key] for key in
- _geomconsts.__dict__ if not key.startswith('_')]
+_geom_attribs = [
+ _geomconsts.__dict__[key] for key in _geomconsts.__dict__ if not key.startswith("_")
+]
def _freebsd_geom():
- geom = salt.utils.path.which('geom')
- ret = {'disks': {}, 'SSDs': []}
+ geom = salt.utils.path.which("geom")
+ ret = {"disks": {}, "SSDs": []}
- devices = __salt__['cmd.run']('{0} disk list'.format(geom))
- devices = devices.split('\n\n')
+ devices = __salt__["cmd.run"]("{0} disk list".format(geom))
+ devices = devices.split("\n\n")
def parse_geom_attribs(device):
tmp = {}
- for line in device.split('\n'):
+ for line in device.split("\n"):
for attrib in _geom_attribs:
- search = re.search(r'{0}:\s(.*)'.format(attrib), line)
+ search = re.search(r"{0}:\s(.*)".format(attrib), line)
if search:
- value = _datavalue(_geomconsts._datatypes.get(attrib),
- search.group(1))
+ value = _datavalue(
+ _geomconsts._datatypes.get(attrib), search.group(1)
+ )
tmp[attrib] = value
if attrib in _geomconsts._aliases:
tmp[_geomconsts._aliases[attrib]] = value
name = tmp.pop(_geomconsts.GEOMNAME)
- if name.startswith('cd'):
+ if name.startswith("cd"):
return
- ret['disks'][name] = tmp
+ ret["disks"][name] = tmp
if tmp.get(_geomconsts.ROTATIONRATE) == 0:
- log.trace('Device %s reports itself as an SSD', device)
- ret['SSDs'].append(name)
+ log.trace("Device %s reports itself as an SSD", device)
+ ret["SSDs"].append(name)
for device in devices:
parse_geom_attribs(device)
@@ -126,26 +128,27 @@ def _freebsd_geom():
def _linux_disks():
- '''
+ """
Return list of disk devices and work out if they are SSD or HDD.
- '''
- ret = {'disks': [], 'SSDs': []}
+ """
+ ret = {"disks": [], "SSDs": []}
- for entry in glob.glob('/sys/block/*/queue/rotational'):
+ for entry in glob.glob("/sys/block/*/queue/rotational"):
try:
with salt.utils.files.fopen(entry) as entry_fp:
- device = entry.split('/')[3]
+ device = entry.split("/")[3]
flag = entry_fp.read(1)
- if flag == '0':
- ret['SSDs'].append(device)
- log.trace('Device %s reports itself as an SSD', device)
- elif flag == '1':
- ret['disks'].append(device)
- log.trace('Device %s reports itself as an HDD', device)
+ if flag == "0":
+ ret["SSDs"].append(device)
+ log.trace("Device %s reports itself as an SSD", device)
+ elif flag == "1":
+ ret["disks"].append(device)
+ log.trace("Device %s reports itself as an HDD", device)
else:
log.trace(
- 'Unable to identify device %s as an SSD or HDD. It does '
- 'not report 0 or 1', device
+ "Unable to identify device %s as an SSD or HDD. It does "
+ "not report 0 or 1",
+ device,
)
except IOError:
pass
@@ -153,39 +156,41 @@ def _linux_disks():
def _windows_disks():
- wmic = salt.utils.path.which('wmic')
+ wmic = salt.utils.path.which("wmic")
- namespace = r'\\root\microsoft\windows\storage'
- path = 'MSFT_PhysicalDisk'
- get = 'DeviceID,MediaType'
+ namespace = r"\\root\microsoft\windows\storage"
+ path = "MSFT_PhysicalDisk"
+ get = "DeviceID,MediaType"
- ret = {'disks': [], 'SSDs': []}
+ ret = {"disks": [], "SSDs": []}
- cmdret = __salt__['cmd.run_all'](
- '{0} /namespace:{1} path {2} get {3} /format:table'.format(
- wmic, namespace, path, get))
+ cmdret = __salt__["cmd.run_all"](
+ "{0} /namespace:{1} path {2} get {3} /format:table".format(
+ wmic, namespace, path, get
+ )
+ )
- if cmdret['retcode'] != 0:
- log.trace('Disk grain does not support this version of Windows')
+ if cmdret["retcode"] != 0:
+ log.trace("Disk grain does not support this version of Windows")
else:
- for line in cmdret['stdout'].splitlines():
+ for line in cmdret["stdout"].splitlines():
info = line.split()
if len(info) != 2 or not info[0].isdigit() or not info[1].isdigit():
continue
- device = r'\\.\PhysicalDrive{0}'.format(info[0])
+ device = r"\\.\PhysicalDrive{0}".format(info[0])
mediatype = info[1]
- if mediatype == '3':
- log.trace('Device %s reports itself as an HDD', device)
- ret['disks'].append(device)
- elif mediatype == '4':
- log.trace('Device %s reports itself as an SSD', device)
- ret['SSDs'].append(device)
- ret['disks'].append(device)
- elif mediatype == '5':
- log.trace('Device %s reports itself as an SCM', device)
- ret['disks'].append(device)
+ if mediatype == "3":
+ log.trace("Device %s reports itself as an HDD", device)
+ ret["disks"].append(device)
+ elif mediatype == "4":
+ log.trace("Device %s reports itself as an SSD", device)
+ ret["SSDs"].append(device)
+ ret["disks"].append(device)
+ elif mediatype == "5":
+ log.trace("Device %s reports itself as an SCM", device)
+ ret["disks"].append(device)
else:
- log.trace('Device %s reports itself as Unspecified', device)
- ret['disks'].append(device)
+ log.trace("Device %s reports itself as Unspecified", device)
+ ret["disks"].append(device)
return ret
diff --git a/salt/grains/esxi.py b/salt/grains/esxi.py
index 95805622039..7291fd2d62b 100644
--- a/salt/grains/esxi.py
+++ b/salt/grains/esxi.py
@@ -1,22 +1,24 @@
# -*- coding: utf-8 -*-
-'''
+"""
Generate baseline proxy minion grains for ESXi hosts.
.. versionadded:: 2015.8.4
-'''
+"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
+import salt.modules.vsphere
+import salt.utils.platform
+
# Import Salt Libs
from salt.exceptions import SaltSystemExit
-import salt.utils.platform
-import salt.modules.vsphere
-__proxyenabled__ = ['esxi']
-__virtualname__ = 'esxi'
+__proxyenabled__ = ["esxi"]
+__virtualname__ = "esxi"
log = logging.getLogger(__file__)
@@ -26,7 +28,7 @@ GRAINS_CACHE = {}
def __virtual__():
try:
- if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'esxi':
+ if salt.utils.platform.is_proxy() and __opts__["proxy"]["proxytype"] == "esxi":
return __virtualname__
except KeyError:
pass
@@ -39,7 +41,7 @@ def esxi():
def kernel():
- return {'kernel': 'proxy'}
+ return {"kernel": "proxy"}
def os():
@@ -47,29 +49,29 @@ def os():
GRAINS_CACHE.update(_grains())
try:
- return {'os': GRAINS_CACHE.get('fullName')}
+ return {"os": GRAINS_CACHE.get("fullName")}
except AttributeError:
- return {'os': 'Unknown'}
+ return {"os": "Unknown"}
def os_family():
- return {'os_family': 'proxy'}
+ return {"os_family": "proxy"}
def _find_credentials(host):
- '''
+ """
Cycle through all the possible credentials and return the first one that
works.
- '''
- user_names = [__pillar__['proxy'].get('username', 'root')]
- passwords = __pillar__['proxy']['passwords']
+ """
+ user_names = [__pillar__["proxy"].get("username", "root")]
+ passwords = __pillar__["proxy"]["passwords"]
for user in user_names:
for password in passwords:
try:
# Try to authenticate with the given user/password combination
- ret = salt.modules.vsphere.system_info(host=host,
- username=user,
- password=password)
+ ret = salt.modules.vsphere.system_info(
+ host=host, username=user, password=password
+ )
except SaltSystemExit:
# If we can't authenticate, continue on to try the next password.
continue
@@ -77,24 +79,28 @@ def _find_credentials(host):
if ret:
return user, password
# We've reached the end of the list without successfully authenticating.
- raise SaltSystemExit('Cannot complete login due to an incorrect user name or password.')
+ raise SaltSystemExit(
+ "Cannot complete login due to an incorrect user name or password."
+ )
def _grains():
- '''
+ """
Get the grains from the proxied device.
- '''
+ """
try:
- host = __pillar__['proxy']['host']
+ host = __pillar__["proxy"]["host"]
if host:
username, password = _find_credentials(host)
- protocol = __pillar__['proxy'].get('protocol')
- port = __pillar__['proxy'].get('port')
- ret = salt.modules.vsphere.system_info(host=host,
- username=username,
- password=password,
- protocol=protocol,
- port=port)
+ protocol = __pillar__["proxy"].get("protocol")
+ port = __pillar__["proxy"].get("port")
+ ret = salt.modules.vsphere.system_info(
+ host=host,
+ username=username,
+ password=password,
+ protocol=protocol,
+ port=port,
+ )
GRAINS_CACHE.update(ret)
except KeyError:
pass
diff --git a/salt/grains/extra.py b/salt/grains/extra.py
index 9ce644b7664..2fdbe6526ac 100644
--- a/salt/grains/extra.py
+++ b/salt/grains/extra.py
@@ -2,73 +2,64 @@
from __future__ import absolute_import, print_function, unicode_literals
-# Import python libs
-import os
-
# Import third party libs
import logging
+# Import python libs
+import os
+
# Import salt libs
import salt.utils.data
import salt.utils.files
import salt.utils.platform
import salt.utils.yaml
-__proxyenabled__ = ['*']
+__proxyenabled__ = ["*"]
log = logging.getLogger(__name__)
def shell():
- '''
+ """
Return the default shell to use on this system
- '''
+ """
# Provides:
# shell
if salt.utils.platform.is_windows():
- env_var = 'COMSPEC'
- default = r'C:\Windows\system32\cmd.exe'
+ env_var = "COMSPEC"
+ default = r"C:\Windows\system32\cmd.exe"
else:
- env_var = 'SHELL'
- default = '/bin/sh'
+ env_var = "SHELL"
+ default = "/bin/sh"
- return {'shell': os.environ.get(env_var, default)}
+ return {"shell": os.environ.get(env_var, default)}
def config():
- '''
+ """
Return the grains set in the grains file
- '''
- if 'conf_file' not in __opts__:
+ """
+ if "conf_file" not in __opts__:
return {}
- if os.path.isdir(__opts__['conf_file']):
+ if os.path.isdir(__opts__["conf_file"]):
if salt.utils.platform.is_proxy():
gfn = os.path.join(
- __opts__['conf_file'],
- 'proxy.d',
- __opts__['id'],
- 'grains'
- )
+ __opts__["conf_file"], "proxy.d", __opts__["id"], "grains"
+ )
else:
- gfn = os.path.join(
- __opts__['conf_file'],
- 'grains'
- )
+ gfn = os.path.join(__opts__["conf_file"], "grains")
else:
if salt.utils.platform.is_proxy():
gfn = os.path.join(
- os.path.dirname(__opts__['conf_file']),
- 'proxy.d',
- __opts__['id'],
- 'grains'
- )
+ os.path.dirname(__opts__["conf_file"]),
+ "proxy.d",
+ __opts__["id"],
+ "grains",
+ )
else:
- gfn = os.path.join(
- os.path.dirname(__opts__['conf_file']),
- 'grains'
- )
+ gfn = os.path.join(os.path.dirname(__opts__["conf_file"]), "grains")
if os.path.isfile(gfn):
- log.debug('Loading static grains from %s', gfn)
- with salt.utils.files.fopen(gfn, 'rb') as fp_:
+ log.debug("Loading static grains from %s", gfn)
+ with salt.utils.files.fopen(gfn, "rb") as fp_:
try:
return salt.utils.data.decode(salt.utils.yaml.safe_load(fp_))
except Exception: # pylint: disable=broad-except
diff --git a/salt/grains/fibre_channel.py b/salt/grains/fibre_channel.py
index 5396bbde7a9..7c2c7490797 100644
--- a/salt/grains/fibre_channel.py
+++ b/salt/grains/fibre_channel.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Grains for Fibre Channel WWN's. On Windows this runs a PowerShell command that
queries WMI to get the Fibre Channel WWN's available.
@@ -10,7 +10,7 @@ To enable these grains set ``fibre_channel_grains: True``.
.. code-block:: yaml
fibre_channel_grains: True
-'''
+"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
@@ -19,29 +19,29 @@ import logging
# Import Salt libs
import salt.modules.cmdmod
-import salt.utils.platform
import salt.utils.files
+import salt.utils.platform
-__virtualname__ = 'fibre_channel'
+__virtualname__ = "fibre_channel"
# Get logging started
log = logging.getLogger(__name__)
def __virtual__():
- if __opts__.get('fibre_channel_grains', False) is False:
+ if __opts__.get("fibre_channel_grains", False) is False:
return False
else:
return __virtualname__
def _linux_wwns():
- '''
+ """
Return Fibre Channel port WWNs from a Linux host.
- '''
+ """
ret = []
- for fc_file in glob.glob('/sys/class/fc_host/*/port_name'):
- with salt.utils.files.fopen(fc_file, 'r') as _wwn:
+ for fc_file in glob.glob("/sys/class/fc_host/*/port_name"):
+ with salt.utils.files.fopen(fc_file, "r") as _wwn:
content = _wwn.read()
for line in content.splitlines():
ret.append(line.rstrip()[2:])
@@ -49,14 +49,16 @@ def _linux_wwns():
def _windows_wwns():
- '''
+ """
Return Fibre Channel port WWNs from a Windows host.
- '''
- ps_cmd = r'Get-WmiObject -ErrorAction Stop ' \
- r'-class MSFC_FibrePortHBAAttributes ' \
- r'-namespace "root\WMI" | ' \
- r'Select -Expandproperty Attributes | ' \
- r'%{($_.PortWWN | % {"{0:x2}" -f $_}) -join ""}'
+ """
+ ps_cmd = (
+ r"Get-WmiObject -ErrorAction Stop "
+ r"-class MSFC_FibrePortHBAAttributes "
+ r'-namespace "root\WMI" | '
+ r"Select -Expandproperty Attributes | "
+ r'%{($_.PortWWN | % {"{0:x2}" -f $_}) -join ""}'
+ )
ret = []
cmd_ret = salt.modules.cmdmod.powershell(ps_cmd)
for line in cmd_ret:
@@ -65,12 +67,12 @@ def _windows_wwns():
def fibre_channel_wwns():
- '''
+ """
Return list of fiber channel HBA WWNs
- '''
- grains = {'fc_wwn': False}
+ """
+ grains = {"fc_wwn": False}
if salt.utils.platform.is_linux():
- grains['fc_wwn'] = _linux_wwns()
+ grains["fc_wwn"] = _linux_wwns()
elif salt.utils.platform.is_windows():
- grains['fc_wwn'] = _windows_wwns()
+ grains["fc_wwn"] = _windows_wwns()
return grains
diff --git a/salt/grains/fx2.py b/salt/grains/fx2.py
index 9bb8522200b..eaf8d24e1dc 100644
--- a/salt/grains/fx2.py
+++ b/salt/grains/fx2.py
@@ -1,21 +1,23 @@
# -*- coding: utf-8 -*-
-'''
+"""
Generate baseline proxy minion grains for Dell FX2 chassis.
The challenge is that most of Salt isn't bootstrapped yet,
so we need to repeat a bunch of things that would normally happen
in proxy/fx2.py--just enough to get data from the chassis to include
in grains.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
-import salt.proxy.fx2
+
import salt.modules.cmdmod
import salt.modules.dracr
+import salt.proxy.fx2
import salt.utils.platform
-__proxyenabled__ = ['fx2']
+__proxyenabled__ = ["fx2"]
-__virtualname__ = 'fx2'
+__virtualname__ = "fx2"
logger = logging.getLogger(__file__)
@@ -24,61 +26,74 @@ GRAINS_CACHE = {}
def __virtual__():
- if salt.utils.platform.is_proxy() and 'proxy' in __opts__ and __opts__['proxy'].get('proxytype') == 'fx2':
+ if (
+ salt.utils.platform.is_proxy()
+ and "proxy" in __opts__
+ and __opts__["proxy"].get("proxytype") == "fx2"
+ ):
return __virtualname__
return False
def _find_credentials():
- '''
+ """
Cycle through all the possible credentials and return the first one that
works
- '''
+ """
usernames = []
- usernames.append(__pillar__['proxy'].get('admin_username', 'root'))
- if 'fallback_admin_username' in __pillar__.get('proxy'):
- usernames.append(__pillar__['proxy'].get('fallback_admin_username'))
+ usernames.append(__pillar__["proxy"].get("admin_username", "root"))
+ if "fallback_admin_username" in __pillar__.get("proxy"):
+ usernames.append(__pillar__["proxy"].get("fallback_admin_username"))
for user in usernames:
- for pwd in __pillar__['proxy']['passwords']:
+ for pwd in __pillar__["proxy"]["passwords"]:
r = salt.modules.dracr.get_chassis_name(
- host=__pillar__['proxy']['host'],
+ host=__pillar__["proxy"]["host"],
admin_username=user,
- admin_password=pwd)
+ admin_password=pwd,
+ )
# Retcode will be present if the chassis_name call failed
try:
- if r.get('retcode', None) is None:
- __opts__['proxy']['admin_username'] = user
- __opts__['proxy']['admin_password'] = pwd
+ if r.get("retcode", None) is None:
+ __opts__["proxy"]["admin_username"] = user
+ __opts__["proxy"]["admin_password"] = pwd
return (user, pwd)
except AttributeError:
# Then the above was a string, and we can return the username
# and password
- __opts__['proxy']['admin_username'] = user
- __opts__['proxy']['admin_password'] = pwd
+ __opts__["proxy"]["admin_username"] = user
+ __opts__["proxy"]["admin_password"] = pwd
return (user, pwd)
- logger.debug('grains fx2.find_credentials found no valid credentials, using Dell default')
- return ('root', 'calvin')
+ logger.debug(
+ "grains fx2.find_credentials found no valid credentials, using Dell default"
+ )
+ return ("root", "calvin")
def _grains():
- '''
+ """
Get the grains from the proxied device
- '''
+ """
(username, password) = _find_credentials()
- r = salt.modules.dracr.system_info(host=__pillar__['proxy']['host'],
- admin_username=username,
- admin_password=password)
+ r = salt.modules.dracr.system_info(
+ host=__pillar__["proxy"]["host"],
+ admin_username=username,
+ admin_password=password,
+ )
- if r.get('retcode', 0) == 0:
+ if r.get("retcode", 0) == 0:
GRAINS_CACHE = r
else:
GRAINS_CACHE = {}
- GRAINS_CACHE.update(salt.modules.dracr.inventory(host=__pillar__['proxy']['host'],
- admin_username=username,
- admin_password=password))
+ GRAINS_CACHE.update(
+ salt.modules.dracr.inventory(
+ host=__pillar__["proxy"]["host"],
+ admin_username=username,
+ admin_password=password,
+ )
+ )
return GRAINS_CACHE
@@ -88,7 +103,7 @@ def fx2():
def kernel():
- return {'kernel': 'proxy'}
+ return {"kernel": "proxy"}
def location():
@@ -96,14 +111,16 @@ def location():
GRAINS_CACHE.update(_grains())
try:
- return {'location': GRAINS_CACHE.get('Chassis Information').get('Chassis Location')}
+ return {
+ "location": GRAINS_CACHE.get("Chassis Information").get("Chassis Location")
+ }
except AttributeError:
- return {'location': 'Unknown'}
+ return {"location": "Unknown"}
def os_family():
- return {'os_family': 'proxy'}
+ return {"os_family": "proxy"}
def os_data():
- return {'os_data': 'Unknown'}
+ return {"os_data": "Unknown"}
diff --git a/salt/grains/iscsi.py b/salt/grains/iscsi.py
index 56b6601439a..af62bb94859 100644
--- a/salt/grains/iscsi.py
+++ b/salt/grains/iscsi.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Grains for iSCSI Qualified Names (IQN).
.. versionadded:: 2018.3.0
@@ -9,7 +9,7 @@ To enable these grains set `iscsi_grains: True`.
.. code-block:: yaml
iscsi_grains: True
-'''
+"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
@@ -22,47 +22,47 @@ import salt.utils.files
import salt.utils.path
import salt.utils.platform
-__virtualname__ = 'iscsi'
+__virtualname__ = "iscsi"
# Get logging started
log = logging.getLogger(__name__)
def __virtual__():
- if __opts__.get('iscsi_grains', False) is False:
+ if __opts__.get("iscsi_grains", False) is False:
return False
else:
return __virtualname__
def iscsi_iqn():
- '''
+ """
Return iSCSI IQN
- '''
+ """
grains = {}
- grains['iscsi_iqn'] = False
+ grains["iscsi_iqn"] = False
if salt.utils.platform.is_linux():
- grains['iscsi_iqn'] = _linux_iqn()
+ grains["iscsi_iqn"] = _linux_iqn()
elif salt.utils.platform.is_windows():
- grains['iscsi_iqn'] = _windows_iqn()
+ grains["iscsi_iqn"] = _windows_iqn()
elif salt.utils.platform.is_aix():
- grains['iscsi_iqn'] = _aix_iqn()
+ grains["iscsi_iqn"] = _aix_iqn()
return grains
def _linux_iqn():
- '''
+ """
Return iSCSI IQN from a Linux host.
- '''
+ """
ret = []
- initiator = '/etc/iscsi/initiatorname.iscsi'
+ initiator = "/etc/iscsi/initiatorname.iscsi"
try:
- with salt.utils.files.fopen(initiator, 'r') as _iscsi:
+ with salt.utils.files.fopen(initiator, "r") as _iscsi:
for line in _iscsi:
line = line.strip()
- if line.startswith('InitiatorName='):
- ret.append(line.split('=', 1)[1])
+ if line.startswith("InitiatorName="):
+ ret.append(line.split("=", 1)[1])
except IOError as ex:
if ex.errno != errno.ENOENT:
log.debug("Error while accessing '%s': %s", initiator, ex)
@@ -71,12 +71,12 @@ def _linux_iqn():
def _aix_iqn():
- '''
+ """
Return iSCSI IQN from an AIX host.
- '''
+ """
ret = []
- aix_cmd = 'lsattr -E -l iscsi0 | grep initiator_name'
+ aix_cmd = "lsattr -E -l iscsi0 | grep initiator_name"
aix_ret = salt.modules.cmdmod.run(aix_cmd)
if aix_ret[0].isalpha():
@@ -88,26 +88,27 @@ def _aix_iqn():
def _windows_iqn():
- '''
+ """
Return iSCSI IQN from a Windows host.
- '''
+ """
ret = []
- wmic = salt.utils.path.which('wmic')
+ wmic = salt.utils.path.which("wmic")
if not wmic:
return ret
- namespace = r'\\root\WMI'
- path = 'MSiSCSIInitiator_MethodClass'
- get = 'iSCSINodeName'
+ namespace = r"\\root\WMI"
+ path = "MSiSCSIInitiator_MethodClass"
+ get = "iSCSINodeName"
cmd_ret = salt.modules.cmdmod.run_all(
- '{0} /namespace:{1} path {2} get {3} /format:table'
- ''.format(wmic, namespace, path, get))
+ "{0} /namespace:{1} path {2} get {3} /format:table"
+ "".format(wmic, namespace, path, get)
+ )
- for line in cmd_ret['stdout'].splitlines():
- if line.startswith('iqn.'):
+ for line in cmd_ret["stdout"].splitlines():
+ if line.startswith("iqn."):
line = line.rstrip()
ret.append(line.rstrip())
diff --git a/salt/grains/junos.py b/salt/grains/junos.py
index 5da49f9a795..d2d2b5377b2 100644
--- a/salt/grains/junos.py
+++ b/salt/grains/junos.py
@@ -1,55 +1,56 @@
# -*- coding: utf-8 -*-
-'''
+"""
Grains for junos.
NOTE this is a little complicated--junos can only be accessed
via salt-proxy-minion.Thus, some grains make sense to get them
from the minion (PYTHONPATH), but others don't (ip_interfaces)
-'''
+"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
# Import Salt libs
from salt.ext import six
-__proxyenabled__ = ['junos']
-__virtualname__ = 'junos'
+__proxyenabled__ = ["junos"]
+__virtualname__ = "junos"
# Get looging started
log = logging.getLogger(__name__)
def __virtual__():
- if 'proxy' not in __opts__:
+ if "proxy" not in __opts__:
return False
else:
return __virtualname__
def _remove_complex_types(dictionary):
- '''
+ """
Linode-python is now returning some complex types that
are not serializable by msgpack. Kill those.
- '''
+ """
for k, v in six.iteritems(dictionary):
if isinstance(v, dict):
dictionary[k] = _remove_complex_types(v)
- elif hasattr(v, 'to_eng_string'):
+ elif hasattr(v, "to_eng_string"):
dictionary[k] = v.to_eng_string()
return dictionary
def defaults():
- return {'os': 'proxy', 'kernel': 'unknown', 'osrelease': 'proxy'}
+ return {"os": "proxy", "kernel": "unknown", "osrelease": "proxy"}
def facts(proxy=None):
- if proxy is None or proxy['junos.initialized']() is False:
+ if proxy is None or proxy["junos.initialized"]() is False:
return {}
- return {'junos_facts': proxy['junos.get_serialized_facts']()}
+ return {"junos_facts": proxy["junos.get_serialized_facts"]()}
def os_family():
- return {'os_family': 'junos'}
+ return {"os_family": "junos"}
diff --git a/salt/grains/marathon.py b/salt/grains/marathon.py
index fd9c73c3245..0afcf781212 100644
--- a/salt/grains/marathon.py
+++ b/salt/grains/marathon.py
@@ -1,49 +1,53 @@
# -*- coding: utf-8 -*-
-'''
+"""
Generate marathon proxy minion grains.
.. versionadded:: 2015.8.2
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
import salt.utils.http
import salt.utils.platform
-__proxyenabled__ = ['marathon']
-__virtualname__ = 'marathon'
+
+__proxyenabled__ = ["marathon"]
+__virtualname__ = "marathon"
def __virtual__():
- if salt.utils.platform.is_proxy() and 'proxy' in __opts__ and __opts__['proxy'].get('proxytype') == 'marathon':
+ if (
+ salt.utils.platform.is_proxy()
+ and "proxy" in __opts__
+ and __opts__["proxy"].get("proxytype") == "marathon"
+ ):
return __virtualname__
return False
def kernel():
- return {'kernel': 'marathon'}
+ return {"kernel": "marathon"}
def os():
- return {'os': 'marathon'}
+ return {"os": "marathon"}
def os_family():
- return {'os_family': 'marathon'}
+ return {"os_family": "marathon"}
def os_data():
- return {'os_data': 'marathon'}
+ return {"os_data": "marathon"}
def marathon():
response = salt.utils.http.query(
- "{0}/v2/info".format(__opts__['proxy'].get(
- 'base_url',
- "http://locahost:8080",
- )),
- decode_type='json',
+ "{0}/v2/info".format(
+ __opts__["proxy"].get("base_url", "http://locahost:8080",)
+ ),
+ decode_type="json",
decode=True,
)
- if not response or 'dict' not in response:
- return {'marathon': None}
- return {'marathon': response['dict']}
+ if not response or "dict" not in response:
+ return {"marathon": None}
+ return {"marathon": response["dict"]}
diff --git a/salt/grains/mdadm.py b/salt/grains/mdadm.py
index 182aedc9273..f5e0fc704a5 100644
--- a/salt/grains/mdadm.py
+++ b/salt/grains/mdadm.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
-'''
+"""
Detect MDADM RAIDs
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
@@ -14,25 +14,25 @@ log = logging.getLogger(__name__)
def mdadm():
- '''
+ """
Return list of mdadm devices
- '''
+ """
devices = set()
try:
- with salt.utils.files.fopen('/proc/mdstat', 'r') as mdstat:
+ with salt.utils.files.fopen("/proc/mdstat", "r") as mdstat:
for line in mdstat:
line = salt.utils.stringutils.to_unicode(line)
- if line.startswith('Personalities : '):
+ if line.startswith("Personalities : "):
continue
- if line.startswith('unused devices:'):
+ if line.startswith("unused devices:"):
continue
- if ' : ' in line:
- devices.add(line.split(' : ')[0])
+ if " : " in line:
+ devices.add(line.split(" : ")[0])
except IOError:
return {}
devices = sorted(devices)
if devices:
- log.trace('mdadm devices detected: %s', ', '.join(devices))
+ log.trace("mdadm devices detected: %s", ", ".join(devices))
- return {'mdadm': devices}
+ return {"mdadm": devices}
diff --git a/salt/grains/mdata.py b/salt/grains/mdata.py
index 37a56072ce9..1deda096c24 100644
--- a/salt/grains/mdata.py
+++ b/salt/grains/mdata.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
SmartOS Metadata grain provider
:maintainer: Jorge Schrauwen
@@ -9,12 +9,17 @@ SmartOS Metadata grain provider
.. versionadded:: nitrogen
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
+import logging
+
# Import python libs
import os
-import logging
+
+# Solve the Chicken and egg problem where grains need to run before any
+# of the modules are loaded and are generally available for any usage.
+import salt.modules.cmdmod
# Import salt libs
import salt.utils.dictupdate
@@ -22,120 +27,129 @@ import salt.utils.json
import salt.utils.path
import salt.utils.platform
-# Solve the Chicken and egg problem where grains need to run before any
-# of the modules are loaded and are generally available for any usage.
-import salt.modules.cmdmod
-
-__virtualname__ = 'mdata'
+__virtualname__ = "mdata"
__salt__ = {
- 'cmd.run': salt.modules.cmdmod.run,
+ "cmd.run": salt.modules.cmdmod.run,
}
log = logging.getLogger(__name__)
def __virtual__():
- '''
+ """
Figure out if we need to be loaded
- '''
+ """
## collect mdata grains in a SmartOS zone
if salt.utils.platform.is_smartos_zone():
return __virtualname__
## collect mdata grains in a LX zone
- if salt.utils.platform.is_linux() and 'BrandZ virtual linux' in os.uname():
+ if salt.utils.platform.is_linux() and "BrandZ virtual linux" in os.uname():
return __virtualname__
return False
def _user_mdata(mdata_list=None, mdata_get=None):
- '''
+ """
User Metadata
- '''
+ """
grains = {}
if not mdata_list:
- mdata_list = salt.utils.path.which('mdata-list')
+ mdata_list = salt.utils.path.which("mdata-list")
if not mdata_get:
- mdata_get = salt.utils.path.which('mdata-get')
+ mdata_get = salt.utils.path.which("mdata-get")
if not mdata_list or not mdata_get:
return grains
- for mdata_grain in __salt__['cmd.run'](mdata_list, ignore_retcode=True).splitlines():
- mdata_value = __salt__['cmd.run']('{0} {1}'.format(mdata_get, mdata_grain), ignore_retcode=True)
+ for mdata_grain in __salt__["cmd.run"](
+ mdata_list, ignore_retcode=True
+ ).splitlines():
+ mdata_value = __salt__["cmd.run"](
+ "{0} {1}".format(mdata_get, mdata_grain), ignore_retcode=True
+ )
- if not mdata_grain.startswith('sdc:'):
- if 'mdata' not in grains:
- grains['mdata'] = {}
+ if not mdata_grain.startswith("sdc:"):
+ if "mdata" not in grains:
+ grains["mdata"] = {}
- log.debug('found mdata entry %s with value %s', mdata_grain, mdata_value)
- mdata_grain = mdata_grain.replace('-', '_')
- mdata_grain = mdata_grain.replace(':', '_')
- grains['mdata'][mdata_grain] = mdata_value
+ log.debug("found mdata entry %s with value %s", mdata_grain, mdata_value)
+ mdata_grain = mdata_grain.replace("-", "_")
+ mdata_grain = mdata_grain.replace(":", "_")
+ grains["mdata"][mdata_grain] = mdata_value
return grains
def _sdc_mdata(mdata_list=None, mdata_get=None):
- '''
+ """
SDC Metadata specified by there specs
https://eng.joyent.com/mdata/datadict.html
- '''
+ """
grains = {}
sdc_text_keys = [
- 'uuid',
- 'server_uuid',
- 'datacenter_name',
- 'hostname',
- 'dns_domain',
+ "uuid",
+ "server_uuid",
+ "datacenter_name",
+ "hostname",
+ "dns_domain",
]
sdc_json_keys = [
- 'resolvers',
- 'nics',
- 'routes',
+ "resolvers",
+ "nics",
+ "routes",
]
if not mdata_list:
- mdata_list = salt.utils.path.which('mdata-list')
+ mdata_list = salt.utils.path.which("mdata-list")
if not mdata_get:
- mdata_get = salt.utils.path.which('mdata-get')
+ mdata_get = salt.utils.path.which("mdata-get")
if not mdata_list or not mdata_get:
return grains
- for mdata_grain in sdc_text_keys+sdc_json_keys:
- mdata_value = __salt__['cmd.run']('{0} sdc:{1}'.format(mdata_get, mdata_grain), ignore_retcode=True)
+ for mdata_grain in sdc_text_keys + sdc_json_keys:
+ mdata_value = __salt__["cmd.run"](
+ "{0} sdc:{1}".format(mdata_get, mdata_grain), ignore_retcode=True
+ )
- if not mdata_value.startswith('No metadata for '):
- if 'mdata' not in grains:
- grains['mdata'] = {}
- if 'sdc' not in grains['mdata']:
- grains['mdata']['sdc'] = {}
+ if not mdata_value.startswith("No metadata for "):
+ if "mdata" not in grains:
+ grains["mdata"] = {}
+ if "sdc" not in grains["mdata"]:
+ grains["mdata"]["sdc"] = {}
- log.debug('found mdata entry sdc:%s with value %s', mdata_grain, mdata_value)
- mdata_grain = mdata_grain.replace('-', '_')
- mdata_grain = mdata_grain.replace(':', '_')
+ log.debug(
+ "found mdata entry sdc:%s with value %s", mdata_grain, mdata_value
+ )
+ mdata_grain = mdata_grain.replace("-", "_")
+ mdata_grain = mdata_grain.replace(":", "_")
if mdata_grain in sdc_json_keys:
- grains['mdata']['sdc'][mdata_grain] = salt.utils.json.loads(mdata_value)
+ grains["mdata"]["sdc"][mdata_grain] = salt.utils.json.loads(mdata_value)
else:
- grains['mdata']['sdc'][mdata_grain] = mdata_value
+ grains["mdata"]["sdc"][mdata_grain] = mdata_value
return grains
def mdata():
- '''
+ """
Provide grains from the SmartOS metadata
- '''
+ """
grains = {}
- mdata_list = salt.utils.path.which('mdata-list')
- mdata_get = salt.utils.path.which('mdata-get')
+ mdata_list = salt.utils.path.which("mdata-list")
+ mdata_get = salt.utils.path.which("mdata-get")
- grains = salt.utils.dictupdate.update(grains, _user_mdata(mdata_list, mdata_get), merge_lists=True)
- grains = salt.utils.dictupdate.update(grains, _sdc_mdata(mdata_list, mdata_get), merge_lists=True)
+ grains = salt.utils.dictupdate.update(
+ grains, _user_mdata(mdata_list, mdata_get), merge_lists=True
+ )
+ grains = salt.utils.dictupdate.update(
+ grains, _sdc_mdata(mdata_list, mdata_get), merge_lists=True
+ )
return grains
+
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
diff --git a/salt/grains/metadata.py b/salt/grains/metadata.py
index 11512b47805..676153ff32e 100644
--- a/salt/grains/metadata.py
+++ b/salt/grains/metadata.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Grains from cloud metadata servers at 169.254.169.254
.. versionadded:: 2017.7.0
@@ -13,7 +13,7 @@ metadata server set `metadata_server_grains: True`.
metadata_server_grains: True
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
@@ -27,56 +27,60 @@ import salt.utils.http as http
import salt.utils.json
import salt.utils.stringutils
-
# metadata server information
-IP = '169.254.169.254'
-HOST = 'http://{0}/'.format(IP)
+IP = "169.254.169.254"
+HOST = "http://{0}/".format(IP)
def __virtual__():
- if __opts__.get('metadata_server_grains', False) is False:
+ if __opts__.get("metadata_server_grains", False) is False:
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock.settimeout(.1)
+ sock.settimeout(0.1)
result = sock.connect_ex((IP, 80))
if result != 0:
return False
- if http.query(os.path.join(HOST, 'latest/'), status=True).get('status') != 200:
+ if http.query(os.path.join(HOST, "latest/"), status=True).get("status") != 200:
return False
return True
def _search(prefix="latest/"):
- '''
+ """
Recursively look up all grains in the metadata server
- '''
+ """
ret = {}
linedata = http.query(os.path.join(HOST, prefix), headers=True)
- if 'body' not in linedata:
+ if "body" not in linedata:
return ret
- body = salt.utils.stringutils.to_unicode(linedata['body'])
- if linedata['headers'].get('Content-Type', 'text/plain') == 'application/octet-stream':
+ body = salt.utils.stringutils.to_unicode(linedata["body"])
+ if (
+ linedata["headers"].get("Content-Type", "text/plain")
+ == "application/octet-stream"
+ ):
return body
- for line in body.split('\n'):
- if line.endswith('/'):
+ for line in body.split("\n"):
+ if line.endswith("/"):
ret[line[:-1]] = _search(prefix=os.path.join(prefix, line))
- elif prefix == 'latest/':
+ elif prefix == "latest/":
# (gtmanfred) The first level should have a forward slash since
# they have stuff underneath. This will not be doubled up though,
# because lines ending with a slash are checked first.
- ret[line] = _search(prefix=os.path.join(prefix, line + '/'))
- elif line.endswith(('dynamic', 'meta-data')):
+ ret[line] = _search(prefix=os.path.join(prefix, line + "/"))
+ elif line.endswith(("dynamic", "meta-data")):
ret[line] = _search(prefix=os.path.join(prefix, line))
- elif '=' in line:
- key, value = line.split('=')
+ elif "=" in line:
+ key, value = line.split("=")
ret[value] = _search(prefix=os.path.join(prefix, key))
else:
- retdata = http.query(os.path.join(HOST, prefix, line)).get('body', None)
+ retdata = http.query(os.path.join(HOST, prefix, line)).get("body", None)
# (gtmanfred) This try except block is slightly faster than
# checking if the string starts with a curly brace
if isinstance(retdata, six.binary_type):
try:
- ret[line] = salt.utils.json.loads(salt.utils.stringutils.to_unicode(retdata))
+ ret[line] = salt.utils.json.loads(
+ salt.utils.stringutils.to_unicode(retdata)
+ )
except ValueError:
ret[line] = salt.utils.stringutils.to_unicode(retdata)
else:
diff --git a/salt/grains/minion_process.py b/salt/grains/minion_process.py
index b1120de617f..0dc325422cc 100644
--- a/salt/grains/minion_process.py
+++ b/salt/grains/minion_process.py
@@ -1,67 +1,69 @@
# -*- coding: utf-8 -*-
-'''
+"""
Set grains describing the minion process.
-'''
+"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
+
import os
+import salt.utils.platform
+
# Import salt libs
import salt.utils.user
-import salt.utils.platform
def _uid():
- '''
+ """
Grain for the minion User ID
- '''
+ """
return salt.utils.user.get_uid()
def _username():
- '''
+ """
Grain for the minion username
- '''
+ """
return salt.utils.user.get_user()
def _gid():
- '''
+ """
Grain for the minion Group ID
- '''
+ """
return salt.utils.user.get_gid()
def _groupname():
- '''
+ """
Grain for the minion groupname
- '''
+ """
try:
- return salt.utils.user.get_default_group(_username()) or ''
+ return salt.utils.user.get_default_group(_username()) or ""
except KeyError:
- return ''
+ return ""
def _pid():
- '''
+ """
Return the current process pid
- '''
+ """
return os.getpid()
def grains():
- '''
+ """
Return the grains dictionary
- '''
+ """
ret = {
- 'username': _username(),
- 'groupname': _groupname(),
- 'pid': _pid(),
+ "username": _username(),
+ "groupname": _groupname(),
+ "pid": _pid(),
}
if not salt.utils.platform.is_windows():
- ret['gid'] = _gid()
- ret['uid'] = _uid()
+ ret["gid"] = _gid()
+ ret["uid"] = _uid()
return ret
diff --git a/salt/grains/napalm.py b/salt/grains/napalm.py
index d61ad9a38e0..45987c17766 100644
--- a/salt/grains/napalm.py
+++ b/salt/grains/napalm.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
NAPALM Grains
=============
@@ -14,23 +14,25 @@ Dependencies
- :mod:`NAPALM proxy module `
.. versionadded:: 2016.11.0
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
-log = logging.getLogger(__name__)
# Salt lib
import salt.utils.dns
import salt.utils.napalm
+log = logging.getLogger(__name__)
+
+
# ----------------------------------------------------------------------------------------------------------------------
# grains properties
# ----------------------------------------------------------------------------------------------------------------------
-__virtualname__ = 'napalm'
-__proxyenabled__ = ['napalm']
+__virtualname__ = "napalm"
+__proxyenabled__ = ["napalm"]
# ----------------------------------------------------------------------------------------------------------------------
# global variables
@@ -40,8 +42,8 @@ GRAINS_CACHE = {}
DEVICE_CACHE = {}
_FORBIDDEN_OPT_ARGS = [
- 'secret', # used by IOS to enter in enable mode
- 'enable_password' # used by EOS
+ "secret", # used by IOS to enter in enable mode
+ "enable_password", # used by EOS
]
# ----------------------------------------------------------------------------------------------------------------------
@@ -50,45 +52,42 @@ _FORBIDDEN_OPT_ARGS = [
def __virtual__():
- '''
+ """
NAPALM library must be installed for this module to work and run in a (proxy) minion.
- '''
+ """
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
+
# ----------------------------------------------------------------------------------------------------------------------
# helpers
# ----------------------------------------------------------------------------------------------------------------------
def _retrieve_grains_cache(proxy=None):
- '''
+ """
Retrieves the grains from the network device if not cached already.
- '''
+ """
global GRAINS_CACHE
if not GRAINS_CACHE:
if proxy and salt.utils.napalm.is_proxy(__opts__):
# if proxy var passed and is NAPALM-type proxy minion
- GRAINS_CACHE = proxy['napalm.get_grains']()
+ GRAINS_CACHE = proxy["napalm.get_grains"]()
elif not proxy and salt.utils.napalm.is_minion(__opts__):
# if proxy var not passed and is running in a straight minion
- GRAINS_CACHE = salt.utils.napalm.call(
- DEVICE_CACHE,
- 'get_facts',
- **{}
- )
+ GRAINS_CACHE = salt.utils.napalm.call(DEVICE_CACHE, "get_facts", **{})
return GRAINS_CACHE
def _retrieve_device_cache(proxy=None):
- '''
+ """
Loads the network device details if not cached already.
- '''
+ """
global DEVICE_CACHE
if not DEVICE_CACHE:
if proxy and salt.utils.napalm.is_proxy(__opts__):
# if proxy var passed and is NAPALM-type proxy minion
- if 'napalm.get_device' in proxy:
- DEVICE_CACHE = proxy['napalm.get_device']()
+ if "napalm.get_device" in proxy:
+ DEVICE_CACHE = proxy["napalm.get_device"]()
elif not proxy and salt.utils.napalm.is_minion(__opts__):
# if proxy var not passed and is running in a straight minion
DEVICE_CACHE = salt.utils.napalm.get_device(__opts__)
@@ -96,28 +95,29 @@ def _retrieve_device_cache(proxy=None):
def _get_grain(name, proxy=None):
- '''
+ """
Retrieves the grain value from the cached dictionary.
- '''
+ """
grains = _retrieve_grains_cache(proxy=proxy)
- if grains.get('result', False) and grains.get('out', {}):
- return grains.get('out').get(name)
+ if grains.get("result", False) and grains.get("out", {}):
+ return grains.get("out").get(name)
def _get_device_grain(name, proxy=None):
- '''
+ """
Retrieves device-specific grains.
- '''
+ """
device = _retrieve_device_cache(proxy=proxy)
return device.get(name.upper())
+
# ----------------------------------------------------------------------------------------------------------------------
# actual grains
# ----------------------------------------------------------------------------------------------------------------------
def getos(proxy=None):
- '''
+ """
Returns the Operating System name running on the network device.
Example: junos, iosxr, eos, ios etc.
@@ -127,12 +127,12 @@ def getos(proxy=None):
.. code-block:: bash
salt -G 'os:junos' test.ping
- '''
- return {'os': _get_device_grain('driver_name', proxy=proxy)}
+ """
+ return {"os": _get_device_grain("driver_name", proxy=proxy)}
def version(proxy=None):
- '''
+ """
Returns the OS version.
Example: 13.3R6.5, 6.0.2 etc.
@@ -155,12 +155,12 @@ def version(proxy=None):
MX480
edge01.muc01:
MX240
- '''
- return {'version': _get_grain('os_version', proxy=proxy)}
+ """
+ return {"version": _get_grain("os_version", proxy=proxy)}
def model(proxy=None):
- '''
+ """
Returns the network device chassis model.
Example: MX480, ASR-9904-AC etc.
@@ -170,12 +170,12 @@ def model(proxy=None):
.. code-block:: bash
salt -G 'model:MX480' net.traceroute 8.8.8.8
- '''
- return {'model': _get_grain('model', proxy=proxy)}
+ """
+ return {"model": _get_grain("model", proxy=proxy)}
def serial(proxy=None):
- '''
+ """
Returns the chassis serial number.
Example: FOX1234W00F
@@ -198,12 +198,12 @@ def serial(proxy=None):
FOXW00F003
edge01.mrs01:
FOXW00F004
- '''
- return {'serial': _get_grain('serial_number', proxy=proxy)}
+ """
+ return {"serial": _get_grain("serial_number", proxy=proxy)}
def vendor(proxy=None):
- '''
+ """
Returns the network device vendor.
Example: juniper, cisco, arista etc.
@@ -213,12 +213,12 @@ def vendor(proxy=None):
.. code-block:: bash
salt -G 'vendor:cisco' net.cli "shut"
- '''
- return {'vendor': _get_grain('vendor', proxy=proxy)}
+ """
+ return {"vendor": _get_grain("vendor", proxy=proxy)}
def uptime(proxy=None):
- '''
+ """
Returns the uptime in seconds.
CLI Example - select all devices started/restarted within the last hour:
@@ -226,12 +226,12 @@ def uptime(proxy=None):
.. code-block:: bash
salt -G 'uptime<3600' test.ping
- '''
- return {'uptime': _get_grain('uptime', proxy=proxy)}
+ """
+ return {"uptime": _get_grain("uptime", proxy=proxy)}
def interfaces(proxy=None):
- '''
+ """
Returns the complete interfaces list of the network device.
Example: ['lc-0/0/0', 'pfe-0/0/0', 'xe-1/3/0', 'lo0', 'irb', 'demux0', 'fxp0']
@@ -258,12 +258,12 @@ def interfaces(proxy=None):
True
edge01.kix01:
True
- '''
- return {'interfaces': _get_grain('interface_list', proxy=proxy)}
+ """
+ return {"interfaces": _get_grain("interface_list", proxy=proxy)}
def username(proxy=None):
- '''
+ """
Return the username.
.. versionadded:: 2017.7.0
@@ -282,15 +282,15 @@ def username(proxy=None):
True
device2:
True
- '''
+ """
if proxy and salt.utils.napalm.is_proxy(__opts__):
# only if proxy will override the username
# otherwise will use the default Salt grains
- return {'username': _get_device_grain('username', proxy=proxy)}
+ return {"username": _get_device_grain("username", proxy=proxy)}
def hostname(proxy=None):
- '''
+ """
Return the hostname as configured on the network device.
CLI Example:
@@ -309,12 +309,12 @@ def hostname(proxy=None):
edge01.bjm01
device3:
edge01.flw01
- '''
- return {'hostname': _get_grain('hostname', proxy=proxy)}
+ """
+ return {"hostname": _get_grain("hostname", proxy=proxy)}
def host(proxy=None):
- '''
+ """
This grain is set by the NAPALM grain module
only when running in a proxy minion.
When Salt is installed directly on the network device,
@@ -349,15 +349,15 @@ def host(proxy=None):
ip-172-31-11-193.us-east-2.compute.internal
device3:
ip-172-31-2-181.us-east-2.compute.internal
- '''
+ """
if proxy and salt.utils.napalm.is_proxy(__opts__):
# this grain is set only when running in a proxy minion
# otherwise will use the default Salt grains
- return {'host': _get_device_grain('hostname', proxy=proxy)}
+ return {"host": _get_device_grain("hostname", proxy=proxy)}
def host_dns(proxy=None):
- '''
+ """
Return the DNS information of the host.
This grain is a dictionary having two keys:
@@ -401,29 +401,24 @@ def host_dns(proxy=None):
- 172.31.8.167
AAAA:
- fd0f:9fd6:5fab::1
- '''
- if not __opts__.get('napalm_host_dns_grain', False):
+ """
+ if not __opts__.get("napalm_host_dns_grain", False):
return
device_host = host(proxy=proxy)
if device_host:
- device_host_value = device_host['host']
- host_dns_ret = {
- 'host_dns': {
- 'A': [],
- 'AAAA': []
- }
- }
- dns_a = salt.utils.dns.lookup(device_host_value, 'A')
+ device_host_value = device_host["host"]
+ host_dns_ret = {"host_dns": {"A": [], "AAAA": []}}
+ dns_a = salt.utils.dns.lookup(device_host_value, "A")
if dns_a:
- host_dns_ret['host_dns']['A'] = dns_a
- dns_aaaa = salt.utils.dns.lookup(device_host_value, 'AAAA')
+ host_dns_ret["host_dns"]["A"] = dns_a
+ dns_aaaa = salt.utils.dns.lookup(device_host_value, "AAAA")
if dns_aaaa:
- host_dns_ret['host_dns']['AAAA'] = dns_aaaa
+ host_dns_ret["host_dns"]["AAAA"] = dns_aaaa
return host_dns_ret
def optional_args(proxy=None):
- '''
+ """
Return the connection optional args.
.. note::
@@ -446,9 +441,9 @@ def optional_args(proxy=None):
True
device2:
True
- '''
- opt_args = _get_device_grain('optional_args', proxy=proxy) or {}
+ """
+ opt_args = _get_device_grain("optional_args", proxy=proxy) or {}
if opt_args and _FORBIDDEN_OPT_ARGS:
for arg in _FORBIDDEN_OPT_ARGS:
opt_args.pop(arg, None)
- return {'optional_args': opt_args}
+ return {"optional_args": opt_args}
diff --git a/salt/grains/nvme.py b/salt/grains/nvme.py
index 697c8562cd6..27e86f7320d 100644
--- a/salt/grains/nvme.py
+++ b/salt/grains/nvme.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Grains for NVMe Qualified Names (NQN).
.. versionadded:: 3000
@@ -9,7 +9,7 @@ To enable these grains set `nvme_grains: True`.
.. code-block:: yaml
nvme_grains: True
-'''
+"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
@@ -21,41 +21,41 @@ import salt.utils.files
import salt.utils.path
import salt.utils.platform
-__virtualname__ = 'nvme'
+__virtualname__ = "nvme"
# Get logging started
log = logging.getLogger(__name__)
def __virtual__():
- if __opts__.get('nvme_grains', False) is False:
+ if __opts__.get("nvme_grains", False) is False:
return False
return __virtualname__
def nvme_nqn():
- '''
+ """
Return NVMe NQN
- '''
+ """
grains = {}
- grains['nvme_nqn'] = False
+ grains["nvme_nqn"] = False
if salt.utils.platform.is_linux():
- grains['nvme_nqn'] = _linux_nqn()
+ grains["nvme_nqn"] = _linux_nqn()
return grains
def _linux_nqn():
- '''
+ """
Return NVMe NQN from a Linux host.
- '''
+ """
ret = []
- initiator = '/etc/nvme/hostnqn'
+ initiator = "/etc/nvme/hostnqn"
try:
- with salt.utils.files.fopen(initiator, 'r') as _nvme:
+ with salt.utils.files.fopen(initiator, "r") as _nvme:
for line in _nvme:
line = line.strip()
- if line.startswith('nqn.'):
+ if line.startswith("nqn."):
ret.append(line)
except IOError as ex:
if ex.errno != errno.ENOENT:
diff --git a/salt/grains/nxos.py b/salt/grains/nxos.py
index c009f09861b..c02f8abdd13 100644
--- a/salt/grains/nxos.py
+++ b/salt/grains/nxos.py
@@ -1,29 +1,31 @@
# -*- coding: utf-8 -*-
-'''
+"""
Grains for Cisco NX OS Switches Proxy minions
.. versionadded: 2016.11.0
For documentation on setting up the nxos proxy minion look in the documentation
for :mod:`salt.proxy.nxos`.
-'''
+"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
-# Import Salt Libs
-import salt.utils.platform
+import logging
+
import salt.modules.nxos
-import logging
+# Import Salt Libs
+import salt.utils.platform
+
log = logging.getLogger(__name__)
-__proxyenabled__ = ['nxos']
-__virtualname__ = 'nxos'
+__proxyenabled__ = ["nxos"]
+__virtualname__ = "nxos"
def __virtual__():
try:
- if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'nxos':
+ if salt.utils.platform.is_proxy() and __opts__["proxy"]["proxytype"] == "nxos":
return __virtualname__
except KeyError:
pass
@@ -34,6 +36,6 @@ def __virtual__():
def proxy_functions(proxy=None):
if proxy is None:
return {}
- if proxy['nxos.initialized']() is False:
+ if proxy["nxos.initialized"]() is False:
return {}
- return {'nxos': proxy['nxos.grains']()}
+ return {"nxos": proxy["nxos.grains"]()}
diff --git a/salt/grains/opts.py b/salt/grains/opts.py
index b2ffe689b04..78f6c394a2c 100644
--- a/salt/grains/opts.py
+++ b/salt/grains/opts.py
@@ -1,16 +1,17 @@
# -*- coding: utf-8 -*-
-'''
+"""
Simple grain to merge the opts into the grains directly if the grain_opts
configuration value is set
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
def opts():
- '''
+ """
Return the minion configuration settings
- '''
- if __opts__.get('grain_opts', False) or \
- (isinstance(__pillar__, dict) and __pillar__.get('grain_opts', False)):
+ """
+ if __opts__.get("grain_opts", False) or (
+ isinstance(__pillar__, dict) and __pillar__.get("grain_opts", False)
+ ):
return __opts__
return {}
diff --git a/salt/grains/panos.py b/salt/grains/panos.py
index 0c9f7aada9c..aead7896426 100644
--- a/salt/grains/panos.py
+++ b/salt/grains/panos.py
@@ -1,28 +1,30 @@
# -*- coding: utf-8 -*-
-'''
+"""
Generate baseline proxy minion grains for panos hosts.
-'''
+"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
+import salt.proxy.panos
+
# Import Salt Libs
import salt.utils.platform
-import salt.proxy.panos
-__proxyenabled__ = ['panos']
-__virtualname__ = 'panos'
+__proxyenabled__ = ["panos"]
+__virtualname__ = "panos"
log = logging.getLogger(__file__)
-GRAINS_CACHE = {'os_family': 'panos'}
+GRAINS_CACHE = {"os_family": "panos"}
def __virtual__():
try:
- if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'panos':
+ if salt.utils.platform.is_proxy() and __opts__["proxy"]["proxytype"] == "panos":
return __virtualname__
except KeyError:
pass
@@ -33,6 +35,6 @@ def __virtual__():
def panos(proxy=None):
if not proxy:
return {}
- if proxy['panos.initialized']() is False:
+ if proxy["panos.initialized"]() is False:
return {}
- return {'panos': proxy['panos.grains']()}
+ return {"panos": proxy["panos.grains"]()}
diff --git a/salt/grains/philips_hue.py b/salt/grains/philips_hue.py
index 9ca6b1e2d3d..c52cedf4783 100644
--- a/salt/grains/philips_hue.py
+++ b/salt/grains/philips_hue.py
@@ -14,39 +14,39 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-'''
+"""
Static grains for the Philips HUE lamps
.. versionadded:: 2015.8.3
-'''
+"""
-__proxyenabled__ = ['philips_hue']
+__proxyenabled__ = ["philips_hue"]
-__virtualname__ = 'hue'
+__virtualname__ = "hue"
def __virtual__():
- if 'proxy' not in __opts__:
+ if "proxy" not in __opts__:
return False
else:
return __virtualname__
def kernel():
- return {'kernel': 'RTOS'}
+ return {"kernel": "RTOS"}
def os():
- return {'os': 'FreeRTOS'}
+ return {"os": "FreeRTOS"}
def os_family():
- return {'os_family': 'RTOS'}
+ return {"os_family": "RTOS"}
def vendor():
- return {'vendor': 'Philips'}
+ return {"vendor": "Philips"}
def product():
- return {'product': 'HUE'}
+ return {"product": "HUE"}
diff --git a/salt/grains/rest_sample.py b/salt/grains/rest_sample.py
index 4ff9e3a124a..02ee95b2102 100644
--- a/salt/grains/rest_sample.py
+++ b/salt/grains/rest_sample.py
@@ -1,18 +1,22 @@
# -*- coding: utf-8 -*-
-'''
+"""
Generate baseline proxy minion grains
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
+
import salt.utils.platform
-__proxyenabled__ = ['rest_sample']
+__proxyenabled__ = ["rest_sample"]
-__virtualname__ = 'rest_sample'
+__virtualname__ = "rest_sample"
def __virtual__():
try:
- if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'rest_sample':
+ if (
+ salt.utils.platform.is_proxy()
+ and __opts__["proxy"]["proxytype"] == "rest_sample"
+ ):
return __virtualname__
except KeyError:
pass
@@ -21,31 +25,31 @@ def __virtual__():
def kernel():
- return {'kernel': 'proxy'}
+ return {"kernel": "proxy"}
def proxy_functions(proxy):
- '''
+ """
The loader will execute functions with one argument and pass
a reference to the proxymodules LazyLoader object. However,
grains sometimes get called before the LazyLoader object is setup
so `proxy` might be None.
- '''
+ """
if proxy:
- return {'proxy_functions': proxy['rest_sample.fns']()}
+ return {"proxy_functions": proxy["rest_sample.fns"]()}
def os():
- return {'os': 'RestExampleOS'}
+ return {"os": "RestExampleOS"}
def location():
- return {'location': 'In this darn virtual machine. Let me out!'}
+ return {"location": "In this darn virtual machine. Let me out!"}
def os_family():
- return {'os_family': 'proxy'}
+ return {"os_family": "proxy"}
def os_data():
- return {'os_data': 'funkyHttp release 1.0.a.4.g'}
+ return {"os_data": "funkyHttp release 1.0.a.4.g"}
diff --git a/salt/grains/smartos.py b/salt/grains/smartos.py
index 6a92265c40b..8681ed9f38d 100644
--- a/salt/grains/smartos.py
+++ b/salt/grains/smartos.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
SmartOS grain provider
:maintainer: Jorge Schrauwen
@@ -9,13 +9,18 @@ SmartOS grain provider
.. versionadded:: nitrogen
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
+import logging
+
# Import python libs
import os
import re
-import logging
+
+# Solve the Chicken and egg problem where grains need to run before any
+# of the modules are loaded and are generally available for any usage.
+import salt.modules.cmdmod
# Import salt libs
import salt.utils.dictupdate
@@ -25,31 +30,27 @@ import salt.utils.platform
import salt.utils.stringutils
from salt.ext.six.moves import zip
-# Solve the Chicken and egg problem where grains need to run before any
-# of the modules are loaded and are generally available for any usage.
-import salt.modules.cmdmod
-
-__virtualname__ = 'smartos'
+__virtualname__ = "smartos"
__salt__ = {
- 'cmd.run': salt.modules.cmdmod.run,
+ "cmd.run": salt.modules.cmdmod.run,
}
log = logging.getLogger(__name__)
def __virtual__():
- '''
+ """
Only load when we are on SmartOS
- '''
+ """
if salt.utils.platform.is_smartos():
return __virtualname__
return False
def _smartos_computenode_data():
- '''
+ """
Return useful information from a SmartOS compute node
- '''
+ """
# Provides:
# vms_total
# vms_running
@@ -63,137 +64,149 @@ def _smartos_computenode_data():
# collect vm data
vms = {}
- for vm in __salt__['cmd.run']('vmadm list -p -o uuid,alias,state,type').split("\n"):
- vm = dict(list(zip(['uuid', 'alias', 'state', 'type'], vm.split(':'))))
- vms[vm['uuid']] = vm
- del vms[vm['uuid']]['uuid']
+ for vm in __salt__["cmd.run"]("vmadm list -p -o uuid,alias,state,type").split("\n"):
+ vm = dict(list(zip(["uuid", "alias", "state", "type"], vm.split(":"))))
+ vms[vm["uuid"]] = vm
+ del vms[vm["uuid"]]["uuid"]
# set vm grains
- grains['computenode_vms_total'] = len(vms)
- grains['computenode_vms_running'] = 0
- grains['computenode_vms_stopped'] = 0
- grains['computenode_vms_type'] = {'KVM': 0, 'LX': 0, 'OS': 0}
+ grains["computenode_vms_total"] = len(vms)
+ grains["computenode_vms_running"] = 0
+ grains["computenode_vms_stopped"] = 0
+ grains["computenode_vms_type"] = {"KVM": 0, "LX": 0, "OS": 0}
for vm in vms:
- if vms[vm]['state'].lower() == 'running':
- grains['computenode_vms_running'] += 1
- elif vms[vm]['state'].lower() == 'stopped':
- grains['computenode_vms_stopped'] += 1
+ if vms[vm]["state"].lower() == "running":
+ grains["computenode_vms_running"] += 1
+ elif vms[vm]["state"].lower() == "stopped":
+ grains["computenode_vms_stopped"] += 1
- if vms[vm]['type'] not in grains['computenode_vms_type']:
+ if vms[vm]["type"] not in grains["computenode_vms_type"]:
# NOTE: be prepared for when bhyve gets its own type
- grains['computenode_vms_type'][vms[vm]['type']] = 0
- grains['computenode_vms_type'][vms[vm]['type']] += 1
+ grains["computenode_vms_type"][vms[vm]["type"]] = 0
+ grains["computenode_vms_type"][vms[vm]["type"]] += 1
# sysinfo derived grains
- sysinfo = salt.utils.json.loads(__salt__['cmd.run']('sysinfo'))
- grains['computenode_sdc_version'] = sysinfo['SDC Version']
- grains['computenode_vm_capable'] = sysinfo['VM Capable']
- if sysinfo['VM Capable']:
- grains['computenode_vm_hw_virt'] = sysinfo['CPU Virtualization']
+ sysinfo = salt.utils.json.loads(__salt__["cmd.run"]("sysinfo"))
+ grains["computenode_sdc_version"] = sysinfo["SDC Version"]
+ grains["computenode_vm_capable"] = sysinfo["VM Capable"]
+ if sysinfo["VM Capable"]:
+ grains["computenode_vm_hw_virt"] = sysinfo["CPU Virtualization"]
# sysinfo derived smbios grains
- grains['manufacturer'] = sysinfo['Manufacturer']
- grains['productname'] = sysinfo['Product']
- grains['uuid'] = sysinfo['UUID']
+ grains["manufacturer"] = sysinfo["Manufacturer"]
+ grains["productname"] = sysinfo["Product"]
+ grains["uuid"] = sysinfo["UUID"]
return grains
def _smartos_zone_data():
- '''
+ """
Return useful information from a SmartOS zone
- '''
+ """
# Provides:
# zoneid
# zonename
# imageversion
grains = {
- 'zoneid': __salt__['cmd.run']('zoneadm list -p | awk -F: \'{ print $1 }\'', python_shell=True),
- 'zonename': __salt__['cmd.run']('zonename'),
- 'imageversion': 'Unknown',
+ "zoneid": __salt__["cmd.run"](
+ "zoneadm list -p | awk -F: '{ print $1 }'", python_shell=True
+ ),
+ "zonename": __salt__["cmd.run"]("zonename"),
+ "imageversion": "Unknown",
}
- imageversion = re.compile('Image:\\s(.+)')
- if os.path.isfile('/etc/product'):
- with salt.utils.files.fopen('/etc/product', 'r') as fp_:
+ imageversion = re.compile("Image:\\s(.+)")
+ if os.path.isfile("/etc/product"):
+ with salt.utils.files.fopen("/etc/product", "r") as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
match = imageversion.match(line)
if match:
- grains['imageversion'] = match.group(1)
+ grains["imageversion"] = match.group(1)
return grains
def _smartos_zone_pkgsrc_data():
- '''
+ """
SmartOS zone pkgsrc information
- '''
+ """
# Provides:
# pkgsrcversion
# pkgsrcpath
grains = {
- 'pkgsrcversion': 'Unknown',
- 'pkgsrcpath': 'Unknown',
+ "pkgsrcversion": "Unknown",
+ "pkgsrcpath": "Unknown",
}
- pkgsrcversion = re.compile('^release:\\s(.+)')
- if os.path.isfile('/etc/pkgsrc_version'):
- with salt.utils.files.fopen('/etc/pkgsrc_version', 'r') as fp_:
+ pkgsrcversion = re.compile("^release:\\s(.+)")
+ if os.path.isfile("/etc/pkgsrc_version"):
+ with salt.utils.files.fopen("/etc/pkgsrc_version", "r") as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
match = pkgsrcversion.match(line)
if match:
- grains['pkgsrcversion'] = match.group(1)
+ grains["pkgsrcversion"] = match.group(1)
- pkgsrcpath = re.compile('PKG_PATH=(.+)')
- if os.path.isfile('/opt/local/etc/pkg_install.conf'):
- with salt.utils.files.fopen('/opt/local/etc/pkg_install.conf', 'r') as fp_:
+ pkgsrcpath = re.compile("PKG_PATH=(.+)")
+ if os.path.isfile("/opt/local/etc/pkg_install.conf"):
+ with salt.utils.files.fopen("/opt/local/etc/pkg_install.conf", "r") as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
match = pkgsrcpath.match(line)
if match:
- grains['pkgsrcpath'] = match.group(1)
+ grains["pkgsrcpath"] = match.group(1)
return grains
def _smartos_zone_pkgin_data():
- '''
+ """
SmartOS zone pkgsrc information
- '''
+ """
# Provides:
# pkgin_repositories
grains = {
- 'pkgin_repositories': [],
+ "pkgin_repositories": [],
}
- pkginrepo = re.compile('^(?:https|http|ftp|file)://.*$')
- if os.path.isfile('/opt/local/etc/pkgin/repositories.conf'):
- with salt.utils.files.fopen('/opt/local/etc/pkgin/repositories.conf', 'r') as fp_:
+ pkginrepo = re.compile("^(?:https|http|ftp|file)://.*$")
+ if os.path.isfile("/opt/local/etc/pkgin/repositories.conf"):
+ with salt.utils.files.fopen(
+ "/opt/local/etc/pkgin/repositories.conf", "r"
+ ) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if pkginrepo.match(line):
- grains['pkgin_repositories'].append(line)
+ grains["pkgin_repositories"].append(line)
return grains
def smartos():
- '''
+ """
Provide grains for SmartOS
- '''
+ """
grains = {}
if salt.utils.platform.is_smartos_zone():
- grains = salt.utils.dictupdate.update(grains, _smartos_zone_data(), merge_lists=True)
- grains = salt.utils.dictupdate.update(grains, _smartos_zone_pkgsrc_data(), merge_lists=True)
- grains = salt.utils.dictupdate.update(grains, _smartos_zone_pkgin_data(), merge_lists=True)
+ grains = salt.utils.dictupdate.update(
+ grains, _smartos_zone_data(), merge_lists=True
+ )
+ grains = salt.utils.dictupdate.update(
+ grains, _smartos_zone_pkgsrc_data(), merge_lists=True
+ )
+ grains = salt.utils.dictupdate.update(
+ grains, _smartos_zone_pkgin_data(), merge_lists=True
+ )
elif salt.utils.platform.is_smartos_globalzone():
- grains = salt.utils.dictupdate.update(grains, _smartos_computenode_data(), merge_lists=True)
+ grains = salt.utils.dictupdate.update(
+ grains, _smartos_computenode_data(), merge_lists=True
+ )
return grains
diff --git a/salt/grains/ssh_sample.py b/salt/grains/ssh_sample.py
index e8c04081e92..1985a14cb61 100644
--- a/salt/grains/ssh_sample.py
+++ b/salt/grains/ssh_sample.py
@@ -1,18 +1,22 @@
# -*- coding: utf-8 -*-
-'''
+"""
Generate baseline proxy minion grains
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
+
import salt.utils.platform
-__proxyenabled__ = ['ssh_sample']
+__proxyenabled__ = ["ssh_sample"]
-__virtualname__ = 'ssh_sample'
+__virtualname__ = "ssh_sample"
def __virtual__():
try:
- if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'ssh_sample':
+ if (
+ salt.utils.platform.is_proxy()
+ and __opts__["proxy"]["proxytype"] == "ssh_sample"
+ ):
return __virtualname__
except KeyError:
pass
@@ -21,22 +25,22 @@ def __virtual__():
def kernel():
- return {'kernel': 'proxy'}
+ return {"kernel": "proxy"}
def proxy_functions(proxy):
- '''
+ """
The loader will execute functions with one argument and pass
a reference to the proxymodules LazyLoader object. However,
grains sometimes get called before the LazyLoader object is setup
so `proxy` might be None.
- '''
- return {'proxy_functions': proxy['ssh_sample.fns']()}
+ """
+ return {"proxy_functions": proxy["ssh_sample.fns"]()}
def location():
- return {'location': 'At the other end of an SSH Tunnel!!'}
+ return {"location": "At the other end of an SSH Tunnel!!"}
def os_data():
- return {'os_data': 'DumbShell Endpoint release 4.09.g'}
+ return {"os_data": "DumbShell Endpoint release 4.09.g"}
diff --git a/salt/grains/zfs.py b/salt/grains/zfs.py
index fec70dfe8d0..d351611eb2a 100644
--- a/salt/grains/zfs.py
+++ b/salt/grains/zfs.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
ZFS grain provider
:maintainer: Jorge Schrauwen
@@ -9,80 +9,81 @@ ZFS grain provider
.. versionadded:: 2018.3.0
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
+# Solve the Chicken and egg problem where grains need to run before any
+# of the modules are loaded and are generally available for any usage.
+import salt.modules.cmdmod
+
# Import salt libs
import salt.utils.dictupdate
import salt.utils.path
import salt.utils.platform
-
-# Solve the Chicken and egg problem where grains need to run before any
-# of the modules are loaded and are generally available for any usage.
-import salt.modules.cmdmod
import salt.utils.zfs
-__virtualname__ = 'zfs'
+__virtualname__ = "zfs"
__salt__ = {
- 'cmd.run': salt.modules.cmdmod.run,
+ "cmd.run": salt.modules.cmdmod.run,
}
__utils__ = {
- 'zfs.is_supported': salt.utils.zfs.is_supported,
- 'zfs.has_feature_flags': salt.utils.zfs.has_feature_flags,
- 'zfs.zpool_command': salt.utils.zfs.zpool_command,
- 'zfs.to_size': salt.utils.zfs.to_size,
+ "zfs.is_supported": salt.utils.zfs.is_supported,
+ "zfs.has_feature_flags": salt.utils.zfs.has_feature_flags,
+ "zfs.zpool_command": salt.utils.zfs.zpool_command,
+ "zfs.to_size": salt.utils.zfs.to_size,
}
log = logging.getLogger(__name__)
def __virtual__():
- '''
+ """
Load zfs grains
- '''
+ """
# NOTE: we always load this grain so we can properly export
# at least the zfs_support grain
# except for Windows... don't try to load this on Windows (#51703)
if salt.utils.platform.is_windows():
- return False, 'ZFS: Not available on Windows'
+ return False, "ZFS: Not available on Windows"
return __virtualname__
def _zfs_pool_data():
- '''
+ """
Provide grains about zpools
- '''
+ """
grains = {}
# collect zpool data
- zpool_list_cmd = __utils__['zfs.zpool_command'](
- 'list',
- flags=['-H'],
- opts={'-o': 'name,size'},
+ zpool_list_cmd = __utils__["zfs.zpool_command"](
+ "list", flags=["-H"], opts={"-o": "name,size"},
)
- for zpool in __salt__['cmd.run'](zpool_list_cmd, ignore_retcode=True).splitlines():
- if 'zpool' not in grains:
- grains['zpool'] = {}
+ for zpool in __salt__["cmd.run"](zpool_list_cmd, ignore_retcode=True).splitlines():
+ if "zpool" not in grains:
+ grains["zpool"] = {}
zpool = zpool.split()
- grains['zpool'][zpool[0]] = __utils__['zfs.to_size'](zpool[1], False)
+ grains["zpool"][zpool[0]] = __utils__["zfs.to_size"](zpool[1], False)
# return grain data
return grains
def zfs():
- '''
+ """
Provide grains for zfs/zpool
- '''
+ """
grains = {}
- grains['zfs_support'] = __utils__['zfs.is_supported']()
- grains['zfs_feature_flags'] = __utils__['zfs.has_feature_flags']()
- if grains['zfs_support']:
- grains = salt.utils.dictupdate.update(grains, _zfs_pool_data(), merge_lists=True)
+ grains["zfs_support"] = __utils__["zfs.is_supported"]()
+ grains["zfs_feature_flags"] = __utils__["zfs.has_feature_flags"]()
+ if grains["zfs_support"]:
+ grains = salt.utils.dictupdate.update(
+ grains, _zfs_pool_data(), merge_lists=True
+ )
return grains
+
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
diff --git a/salt/key.py b/salt/key.py
index 6cc4c33a755..6d6cf5ded4a 100644
--- a/salt/key.py
+++ b/salt/key.py
@@ -1,15 +1,16 @@
# -*- coding: utf-8 -*-
-'''
+"""
The Salt Key backend API and interface used by the CLI. The Key class can be
used to manage salt keys directly without interfacing with the CLI.
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
-import os
-import shutil
+
import fnmatch
import logging
+import os
+import shutil
# Import salt libs
import salt.cache
@@ -33,6 +34,7 @@ import salt.utils.user
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves import input, zip_longest
+
# pylint: enable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
@@ -43,100 +45,108 @@ def get_key(opts):
class KeyCLI(object):
- '''
+ """
Manage key CLI operations
- '''
- CLI_KEY_MAP = {'list': 'list_status',
- 'delete': 'delete_key',
- 'gen_signature': 'gen_keys_signature',
- 'print': 'key_str',
- }
+ """
+
+ CLI_KEY_MAP = {
+ "list": "list_status",
+ "delete": "delete_key",
+ "gen_signature": "gen_keys_signature",
+ "print": "key_str",
+ }
def __init__(self, opts):
self.opts = opts
self.client = salt.wheel.WheelClient(opts)
self.key = Key
# instantiate the key object for masterless mode
- if not opts.get('eauth'):
+ if not opts.get("eauth"):
self.key = self.key(opts)
self.auth = None
def _update_opts(self):
# get the key command
- for cmd in ('gen_keys',
- 'gen_signature',
- 'list',
- 'list_all',
- 'print',
- 'print_all',
- 'accept',
- 'accept_all',
- 'reject',
- 'reject_all',
- 'delete',
- 'delete_all',
- 'finger',
- 'finger_all',
- 'list_all'): # last is default
+ for cmd in (
+ "gen_keys",
+ "gen_signature",
+ "list",
+ "list_all",
+ "print",
+ "print_all",
+ "accept",
+ "accept_all",
+ "reject",
+ "reject_all",
+ "delete",
+ "delete_all",
+ "finger",
+ "finger_all",
+ "list_all",
+ ): # last is default
if self.opts[cmd]:
break
# set match if needed
- if not cmd.startswith('gen_'):
- if cmd == 'list_all':
- self.opts['match'] = 'all'
- elif cmd.endswith('_all'):
- self.opts['match'] = '*'
+ if not cmd.startswith("gen_"):
+ if cmd == "list_all":
+ self.opts["match"] = "all"
+ elif cmd.endswith("_all"):
+ self.opts["match"] = "*"
else:
- self.opts['match'] = self.opts[cmd]
- if cmd.startswith('accept'):
- self.opts['include_rejected'] = self.opts['include_all'] or self.opts['include_rejected']
- self.opts['include_accepted'] = False
- elif cmd.startswith('reject'):
- self.opts['include_accepted'] = self.opts['include_all'] or self.opts['include_accepted']
- self.opts['include_rejected'] = False
- elif cmd == 'gen_keys':
- self.opts['keydir'] = self.opts['gen_keys_dir']
- self.opts['keyname'] = self.opts['gen_keys']
+ self.opts["match"] = self.opts[cmd]
+ if cmd.startswith("accept"):
+ self.opts["include_rejected"] = (
+ self.opts["include_all"] or self.opts["include_rejected"]
+ )
+ self.opts["include_accepted"] = False
+ elif cmd.startswith("reject"):
+ self.opts["include_accepted"] = (
+ self.opts["include_all"] or self.opts["include_accepted"]
+ )
+ self.opts["include_rejected"] = False
+ elif cmd == "gen_keys":
+ self.opts["keydir"] = self.opts["gen_keys_dir"]
+ self.opts["keyname"] = self.opts["gen_keys"]
# match is set to opts, now we can forget about *_all commands
- self.opts['fun'] = cmd.replace('_all', '')
+ self.opts["fun"] = cmd.replace("_all", "")
def _init_auth(self):
if self.auth:
return
low = {}
- skip_perm_errors = self.opts['eauth'] != ''
+ skip_perm_errors = self.opts["eauth"] != ""
- if self.opts['eauth']:
- if 'token' in self.opts:
+ if self.opts["eauth"]:
+ if "token" in self.opts:
try:
- with salt.utils.files.fopen(os.path.join(self.opts['cachedir'], '.root_key'), 'r') as fp_:
- low['key'] = \
- salt.utils.stringutils.to_unicode(fp_.readline())
+ with salt.utils.files.fopen(
+ os.path.join(self.opts["cachedir"], ".root_key"), "r"
+ ) as fp_:
+ low["key"] = salt.utils.stringutils.to_unicode(fp_.readline())
except IOError:
- low['token'] = self.opts['token']
+ low["token"] = self.opts["token"]
#
# If using eauth and a token hasn't already been loaded into
# low, prompt the user to enter auth credentials
- if 'token' not in low and 'key' not in low and self.opts['eauth']:
+ if "token" not in low and "key" not in low and self.opts["eauth"]:
# This is expensive. Don't do it unless we need to.
resolver = salt.auth.Resolver(self.opts)
- res = resolver.cli(self.opts['eauth'])
- if self.opts['mktoken'] and res:
- tok = resolver.token_cli(
- self.opts['eauth'],
- res
- )
+ res = resolver.cli(self.opts["eauth"])
+ if self.opts["mktoken"] and res:
+ tok = resolver.token_cli(self.opts["eauth"], res)
if tok:
- low['token'] = tok.get('token', '')
+ low["token"] = tok.get("token", "")
if not res:
- log.error('Authentication failed')
+ log.error("Authentication failed")
return {}
low.update(res)
- low['eauth'] = self.opts['eauth']
+ low["eauth"] = self.opts["eauth"]
else:
- low['user'] = salt.utils.user.get_specific_user()
- low['key'] = salt.utils.master.get_master_key(low['user'], self.opts, skip_perm_errors)
+ low["user"] = salt.utils.user.get_specific_user()
+ low["key"] = salt.utils.master.get_master_key(
+ low["user"], self.opts, skip_perm_errors
+ )
self.auth = low
@@ -147,8 +157,9 @@ class KeyCLI(object):
if argspec.args:
# Iterate in reverse order to ensure we get the correct default
# value for the positional argument.
- for arg, default in zip_longest(reversed(argspec.args),
- reversed(argspec.defaults or ())):
+ for arg, default in zip_longest(
+ reversed(argspec.args), reversed(argspec.defaults or ())
+ ):
args.append(self.opts.get(arg, default))
# Reverse the args so that they are in the correct order
args = args[::-1]
@@ -156,31 +167,32 @@ class KeyCLI(object):
if argspec.keywords is None:
kwargs = {}
else:
- args, kwargs = salt.minion.load_args_and_kwargs(
- fun,
- args)
+ args, kwargs = salt.minion.load_args_and_kwargs(fun, args)
return args, kwargs
def _run_cmd(self, cmd, args=None):
- if not self.opts.get('eauth'):
+ if not self.opts.get("eauth"):
cmd = self.CLI_KEY_MAP.get(cmd, cmd)
fun = getattr(self.key, cmd)
args, kwargs = self._get_args_kwargs(fun, args)
ret = fun(*args, **kwargs)
- if (isinstance(ret, dict) and 'local' in ret and
- cmd not in ('finger', 'finger_all')):
- ret.pop('local', None)
+ if (
+ isinstance(ret, dict)
+ and "local" in ret
+ and cmd not in ("finger", "finger_all")
+ ):
+ ret.pop("local", None)
return ret
- fstr = 'key.{0}'.format(cmd)
+ fstr = "key.{0}".format(cmd)
fun = self.client.functions[fstr]
args, kwargs = self._get_args_kwargs(fun, args)
low = {
- 'fun': fstr,
- 'arg': args,
- 'kwarg': kwargs,
- }
+ "fun": fstr,
+ "arg": args,
+ "kwarg": kwargs,
+ }
self._init_auth()
low.update(self.auth)
@@ -188,292 +200,305 @@ class KeyCLI(object):
# Execute the key request!
ret = self.client.cmd_sync(low)
- ret = ret['data']['return']
- if (isinstance(ret, dict) and 'local' in ret and
- cmd not in ('finger', 'finger_all')):
- ret.pop('local', None)
+ ret = ret["data"]["return"]
+ if (
+ isinstance(ret, dict)
+ and "local" in ret
+ and cmd not in ("finger", "finger_all")
+ ):
+ ret.pop("local", None)
return ret
def _filter_ret(self, cmd, ret):
- if cmd.startswith('delete'):
+ if cmd.startswith("delete"):
return ret
keys = {}
if self.key.PEND in ret:
keys[self.key.PEND] = ret[self.key.PEND]
- if self.opts['include_accepted'] and bool(ret.get(self.key.ACC)):
+ if self.opts["include_accepted"] and bool(ret.get(self.key.ACC)):
keys[self.key.ACC] = ret[self.key.ACC]
- if self.opts['include_rejected'] and bool(ret.get(self.key.REJ)):
+ if self.opts["include_rejected"] and bool(ret.get(self.key.REJ)):
keys[self.key.REJ] = ret[self.key.REJ]
- if self.opts['include_denied'] and bool(ret.get(self.key.DEN)):
+ if self.opts["include_denied"] and bool(ret.get(self.key.DEN)):
keys[self.key.DEN] = ret[self.key.DEN]
return keys
def _print_no_match(self, cmd, match):
- statuses = ['unaccepted']
- if self.opts['include_accepted']:
- statuses.append('accepted')
- if self.opts['include_rejected']:
- statuses.append('rejected')
- if self.opts['include_denied']:
- statuses.append('denied')
+ statuses = ["unaccepted"]
+ if self.opts["include_accepted"]:
+ statuses.append("accepted")
+ if self.opts["include_rejected"]:
+ statuses.append("rejected")
+ if self.opts["include_denied"]:
+ statuses.append("denied")
if len(statuses) == 1:
stat_str = statuses[0]
else:
- stat_str = '{0} or {1}'.format(', '.join(statuses[:-1]), statuses[-1])
- msg = 'The key glob \'{0}\' does not match any {1} keys.'.format(match, stat_str)
+ stat_str = "{0} or {1}".format(", ".join(statuses[:-1]), statuses[-1])
+ msg = "The key glob '{0}' does not match any {1} keys.".format(match, stat_str)
print(msg)
def run(self):
- '''
+ """
Run the logic for saltkey
- '''
+ """
self._update_opts()
- cmd = self.opts['fun']
+ cmd = self.opts["fun"]
veri = None
ret = None
try:
- if cmd in ('accept', 'reject', 'delete'):
- ret = self._run_cmd('name_match')
+ if cmd in ("accept", "reject", "delete"):
+ ret = self._run_cmd("name_match")
if not isinstance(ret, dict):
- salt.output.display_output(ret, 'key', opts=self.opts)
+ salt.output.display_output(ret, "key", opts=self.opts)
return ret
ret = self._filter_ret(cmd, ret)
if not ret:
- self._print_no_match(cmd, self.opts['match'])
+ self._print_no_match(cmd, self.opts["match"])
return
- print('The following keys are going to be {0}ed:'.format(cmd.rstrip('e')))
- salt.output.display_output(ret, 'key', opts=self.opts)
+ print(
+ "The following keys are going to be {0}ed:".format(cmd.rstrip("e"))
+ )
+ salt.output.display_output(ret, "key", opts=self.opts)
- if not self.opts.get('yes', False):
+ if not self.opts.get("yes", False):
try:
- if cmd.startswith('delete'):
- veri = input('Proceed? [N/y] ')
+ if cmd.startswith("delete"):
+ veri = input("Proceed? [N/y] ")
if not veri:
- veri = 'n'
+ veri = "n"
else:
- veri = input('Proceed? [n/Y] ')
+ veri = input("Proceed? [n/Y] ")
if not veri:
- veri = 'y'
+ veri = "y"
except KeyboardInterrupt:
raise SystemExit("\nExiting on CTRL-c")
# accept/reject/delete the same keys we're printed to the user
- self.opts['match_dict'] = ret
- self.opts.pop('match', None)
+ self.opts["match_dict"] = ret
+ self.opts.pop("match", None)
list_ret = ret
- if veri is None or veri.lower().startswith('y'):
+ if veri is None or veri.lower().startswith("y"):
ret = self._run_cmd(cmd)
- if cmd in ('accept', 'reject', 'delete'):
- if cmd == 'delete':
+ if cmd in ("accept", "reject", "delete"):
+ if cmd == "delete":
ret = list_ret
for minions in ret.values():
for minion in minions:
- print('Key for minion {0} {1}ed.'.format(minion,
- cmd.rstrip('e')))
+ print(
+ "Key for minion {0} {1}ed.".format(
+ minion, cmd.rstrip("e")
+ )
+ )
elif isinstance(ret, dict):
- salt.output.display_output(ret, 'key', opts=self.opts)
+ salt.output.display_output(ret, "key", opts=self.opts)
else:
- salt.output.display_output({'return': ret}, 'key', opts=self.opts)
+ salt.output.display_output({"return": ret}, "key", opts=self.opts)
except salt.exceptions.SaltException as exc:
- ret = '{0}'.format(exc)
- if not self.opts.get('quiet', False):
- salt.output.display_output(ret, 'nested', self.opts)
+ ret = "{0}".format(exc)
+ if not self.opts.get("quiet", False):
+ salt.output.display_output(ret, "nested", self.opts)
return ret
class Key(object):
- '''
+ """
The object that encapsulates saltkey actions
- '''
- ACC = 'minions'
- PEND = 'minions_pre'
- REJ = 'minions_rejected'
- DEN = 'minions_denied'
+ """
+
+ ACC = "minions"
+ PEND = "minions_pre"
+ REJ = "minions_rejected"
+ DEN = "minions_denied"
def __init__(self, opts, io_loop=None):
self.opts = opts
- kind = self.opts.get('__role', '') # application kind
+ kind = self.opts.get("__role", "") # application kind
if kind not in salt.utils.kinds.APPL_KINDS:
emsg = "Invalid application kind = '{0}'.".format(kind)
log.error(emsg)
raise ValueError(emsg)
self.event = salt.utils.event.get_event(
- kind,
- opts['sock_dir'],
- opts['transport'],
- opts=opts,
- listen=False,
- io_loop=io_loop
- )
+ kind,
+ opts["sock_dir"],
+ opts["transport"],
+ opts=opts,
+ listen=False,
+ io_loop=io_loop,
+ )
- self.passphrase = salt.utils.sdb.sdb_get(self.opts.get('signing_key_pass'), self.opts)
+ self.passphrase = salt.utils.sdb.sdb_get(
+ self.opts.get("signing_key_pass"), self.opts
+ )
def _check_minions_directories(self):
- '''
+ """
Return the minion keys directory paths
- '''
- minions_accepted = os.path.join(self.opts['pki_dir'], self.ACC)
- minions_pre = os.path.join(self.opts['pki_dir'], self.PEND)
- minions_rejected = os.path.join(self.opts['pki_dir'],
- self.REJ)
+ """
+ minions_accepted = os.path.join(self.opts["pki_dir"], self.ACC)
+ minions_pre = os.path.join(self.opts["pki_dir"], self.PEND)
+ minions_rejected = os.path.join(self.opts["pki_dir"], self.REJ)
- minions_denied = os.path.join(self.opts['pki_dir'],
- self.DEN)
+ minions_denied = os.path.join(self.opts["pki_dir"], self.DEN)
return minions_accepted, minions_pre, minions_rejected, minions_denied
- def _get_key_attrs(self, keydir, keyname,
- keysize, user):
+ def _get_key_attrs(self, keydir, keyname, keysize, user):
if not keydir:
- if 'gen_keys_dir' in self.opts:
- keydir = self.opts['gen_keys_dir']
+ if "gen_keys_dir" in self.opts:
+ keydir = self.opts["gen_keys_dir"]
else:
- keydir = self.opts['pki_dir']
+ keydir = self.opts["pki_dir"]
if not keyname:
- if 'gen_keys' in self.opts:
- keyname = self.opts['gen_keys']
+ if "gen_keys" in self.opts:
+ keyname = self.opts["gen_keys"]
else:
- keyname = 'minion'
+ keyname = "minion"
if not keysize:
- keysize = self.opts['keysize']
+ keysize = self.opts["keysize"]
return keydir, keyname, keysize, user
def gen_keys(self, keydir=None, keyname=None, keysize=None, user=None):
- '''
+ """
Generate minion RSA public keypair
- '''
- keydir, keyname, keysize, user = self._get_key_attrs(keydir, keyname,
- keysize, user)
+ """
+ keydir, keyname, keysize, user = self._get_key_attrs(
+ keydir, keyname, keysize, user
+ )
salt.crypt.gen_keys(keydir, keyname, keysize, user, self.passphrase)
- return salt.utils.crypt.pem_finger(os.path.join(keydir, keyname + '.pub'))
+ return salt.utils.crypt.pem_finger(os.path.join(keydir, keyname + ".pub"))
def gen_signature(self, privkey, pubkey, sig_path):
- '''
+ """
Generate master public-key-signature
- '''
- return salt.crypt.gen_signature(privkey,
- pubkey,
- sig_path,
- self.passphrase)
+ """
+ return salt.crypt.gen_signature(privkey, pubkey, sig_path, self.passphrase)
- def gen_keys_signature(self, priv, pub, signature_path, auto_create=False, keysize=None):
- '''
+ def gen_keys_signature(
+ self, priv, pub, signature_path, auto_create=False, keysize=None
+ ):
+ """
Generate master public-key-signature
- '''
+ """
# check given pub-key
if pub:
if not os.path.isfile(pub):
- return 'Public-key {0} does not exist'.format(pub)
+ return "Public-key {0} does not exist".format(pub)
# default to master.pub
else:
- mpub = self.opts['pki_dir'] + '/' + 'master.pub'
+ mpub = self.opts["pki_dir"] + "/" + "master.pub"
if os.path.isfile(mpub):
pub = mpub
# check given priv-key
if priv:
if not os.path.isfile(priv):
- return 'Private-key {0} does not exist'.format(priv)
+ return "Private-key {0} does not exist".format(priv)
# default to master_sign.pem
else:
- mpriv = self.opts['pki_dir'] + '/' + 'master_sign.pem'
+ mpriv = self.opts["pki_dir"] + "/" + "master_sign.pem"
if os.path.isfile(mpriv):
priv = mpriv
if not priv:
if auto_create:
log.debug(
- 'Generating new signing key-pair .%s.* in %s',
- self.opts['master_sign_key_name'], self.opts['pki_dir']
+ "Generating new signing key-pair .%s.* in %s",
+ self.opts["master_sign_key_name"],
+ self.opts["pki_dir"],
+ )
+ salt.crypt.gen_keys(
+ self.opts["pki_dir"],
+ self.opts["master_sign_key_name"],
+ keysize or self.opts["keysize"],
+ self.opts.get("user"),
+ self.passphrase,
)
- salt.crypt.gen_keys(self.opts['pki_dir'],
- self.opts['master_sign_key_name'],
- keysize or self.opts['keysize'],
- self.opts.get('user'),
- self.passphrase)
- priv = self.opts['pki_dir'] + '/' + self.opts['master_sign_key_name'] + '.pem'
+ priv = (
+ self.opts["pki_dir"]
+ + "/"
+ + self.opts["master_sign_key_name"]
+ + ".pem"
+ )
else:
- return 'No usable private-key found'
+ return "No usable private-key found"
if not pub:
- return 'No usable public-key found'
+ return "No usable public-key found"
- log.debug('Using public-key %s', pub)
- log.debug('Using private-key %s', priv)
+ log.debug("Using public-key %s", pub)
+ log.debug("Using private-key %s", priv)
if signature_path:
if not os.path.isdir(signature_path):
- log.debug('target directory %s does not exist', signature_path)
+ log.debug("target directory %s does not exist", signature_path)
else:
- signature_path = self.opts['pki_dir']
+ signature_path = self.opts["pki_dir"]
- sign_path = signature_path + '/' + self.opts['master_pubkey_signature']
+ sign_path = signature_path + "/" + self.opts["master_pubkey_signature"]
skey = get_key(self.opts)
return skey.gen_signature(priv, pub, sign_path)
def check_minion_cache(self, preserve_minions=None):
- '''
+ """
Check the minion cache to make sure that old minion data is cleared
Optionally, pass in a list of minions which should have their caches
preserved. To preserve all caches, set __opts__['preserve_minion_cache']
- '''
+ """
if preserve_minions is None:
preserve_minions = []
keys = self.list_keys()
minions = []
for key, val in six.iteritems(keys):
minions.extend(val)
- if not self.opts.get('preserve_minion_cache', False):
- m_cache = os.path.join(self.opts['cachedir'], self.ACC)
+ if not self.opts.get("preserve_minion_cache", False):
+ m_cache = os.path.join(self.opts["cachedir"], self.ACC)
if os.path.isdir(m_cache):
for minion in os.listdir(m_cache):
if minion not in minions and minion not in preserve_minions:
try:
shutil.rmtree(os.path.join(m_cache, minion))
except (OSError, IOError) as ex:
- log.warning('Key: Delete cache for %s got OSError/IOError: %s \n',
- minion,
- ex)
+ log.warning(
+ "Key: Delete cache for %s got OSError/IOError: %s \n",
+ minion,
+ ex,
+ )
continue
cache = salt.cache.factory(self.opts)
clist = cache.list(self.ACC)
if clist:
for minion in clist:
if minion not in minions and minion not in preserve_minions:
- cache.flush('{0}/{1}'.format(self.ACC, minion))
+ cache.flush("{0}/{1}".format(self.ACC, minion))
def check_master(self):
- '''
+ """
Log if the master is not running
:rtype: bool
:return: Whether or not the master is running
- '''
- if not os.path.exists(
- os.path.join(
- self.opts['sock_dir'],
- 'publish_pull.ipc'
- )
- ):
+ """
+ if not os.path.exists(os.path.join(self.opts["sock_dir"], "publish_pull.ipc")):
return False
return True
def name_match(self, match, full=False):
- '''
+ """
Accept a glob which to match the of a key and return the key's location
- '''
+ """
if full:
matches = self.all_keys()
else:
matches = self.list_keys()
ret = {}
- if ',' in match and isinstance(match, six.string_types):
- match = match.split(',')
+ if "," in match and isinstance(match, six.string_types):
+ match = match.split(",")
for status, keys in six.iteritems(matches):
for key in salt.utils.data.sorted_ignorecase(keys):
if isinstance(match, list):
@@ -490,10 +515,10 @@ class Key(object):
return ret
def dict_match(self, match_dict):
- '''
+ """
Accept a dictionary of keys and return the current state of the
specified keys
- '''
+ """
ret = {}
cur_keys = self.list_keys()
for status, keys in six.iteritems(match_dict):
@@ -504,21 +529,21 @@ class Key(object):
return ret
def local_keys(self):
- '''
+ """
Return a dict of local keys
- '''
- ret = {'local': []}
- for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(self.opts['pki_dir'])):
- if fn_.endswith('.pub') or fn_.endswith('.pem'):
- path = os.path.join(self.opts['pki_dir'], fn_)
+ """
+ ret = {"local": []}
+ for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(self.opts["pki_dir"])):
+ if fn_.endswith(".pub") or fn_.endswith(".pem"):
+ path = os.path.join(self.opts["pki_dir"], fn_)
if os.path.isfile(path):
- ret['local'].append(fn_)
+ ret["local"].append(fn_)
return ret
def list_keys(self):
- '''
+ """
Return a dict of managed keys and what the key status are
- '''
+ """
key_dirs = self._check_minions_directories()
ret = {}
@@ -529,7 +554,7 @@ class Key(object):
ret[os.path.basename(dir_)] = []
try:
for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(dir_)):
- if not fn_.startswith('.'):
+ if not fn_.startswith("."):
if os.path.isfile(os.path.join(dir_, fn_)):
ret[os.path.basename(dir_)].append(
salt.utils.stringutils.to_unicode(fn_)
@@ -540,80 +565,80 @@ class Key(object):
return ret
def all_keys(self):
- '''
+ """
Merge managed keys with local keys
- '''
+ """
keys = self.list_keys()
keys.update(self.local_keys())
return keys
def list_status(self, match):
- '''
+ """
Return a dict of managed keys under a named status
- '''
+ """
acc, pre, rej, den = self._check_minions_directories()
ret = {}
- if match.startswith('acc'):
+ if match.startswith("acc"):
ret[os.path.basename(acc)] = []
for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(acc)):
- if not fn_.startswith('.'):
+ if not fn_.startswith("."):
if os.path.isfile(os.path.join(acc, fn_)):
ret[os.path.basename(acc)].append(fn_)
- elif match.startswith('pre') or match.startswith('un'):
+ elif match.startswith("pre") or match.startswith("un"):
ret[os.path.basename(pre)] = []
for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(pre)):
- if not fn_.startswith('.'):
+ if not fn_.startswith("."):
if os.path.isfile(os.path.join(pre, fn_)):
ret[os.path.basename(pre)].append(fn_)
- elif match.startswith('rej'):
+ elif match.startswith("rej"):
ret[os.path.basename(rej)] = []
for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(rej)):
- if not fn_.startswith('.'):
+ if not fn_.startswith("."):
if os.path.isfile(os.path.join(rej, fn_)):
ret[os.path.basename(rej)].append(fn_)
- elif match.startswith('den') and den is not None:
+ elif match.startswith("den") and den is not None:
ret[os.path.basename(den)] = []
for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(den)):
- if not fn_.startswith('.'):
+ if not fn_.startswith("."):
if os.path.isfile(os.path.join(den, fn_)):
ret[os.path.basename(den)].append(fn_)
- elif match.startswith('all'):
+ elif match.startswith("all"):
return self.all_keys()
return ret
def key_str(self, match):
- '''
+ """
Return the specified public key or keys based on a glob
- '''
+ """
ret = {}
for status, keys in six.iteritems(self.name_match(match)):
ret[status] = {}
for key in salt.utils.data.sorted_ignorecase(keys):
- path = os.path.join(self.opts['pki_dir'], status, key)
- with salt.utils.files.fopen(path, 'r') as fp_:
- ret[status][key] = \
- salt.utils.stringutils.to_unicode(fp_.read())
+ path = os.path.join(self.opts["pki_dir"], status, key)
+ with salt.utils.files.fopen(path, "r") as fp_:
+ ret[status][key] = salt.utils.stringutils.to_unicode(fp_.read())
return ret
def key_str_all(self):
- '''
+ """
Return all managed key strings
- '''
+ """
ret = {}
for status, keys in six.iteritems(self.list_keys()):
ret[status] = {}
for key in salt.utils.data.sorted_ignorecase(keys):
- path = os.path.join(self.opts['pki_dir'], status, key)
- with salt.utils.files.fopen(path, 'r') as fp_:
- ret[status][key] = \
- salt.utils.stringutils.to_unicode(fp_.read())
+ path = os.path.join(self.opts["pki_dir"], status, key)
+ with salt.utils.files.fopen(path, "r") as fp_:
+ ret[status][key] = salt.utils.stringutils.to_unicode(fp_.read())
return ret
- def accept(self, match=None, match_dict=None, include_rejected=False, include_denied=False):
- '''
+ def accept(
+ self, match=None, match_dict=None, include_rejected=False, include_denied=False
+ ):
+ """
Accept public keys. If "match" is passed, it is evaluated as a glob.
Pre-gathered matches can also be passed via "match_dict".
- '''
+ """
if match is not None:
matches = self.name_match(match)
elif match_dict is not None and isinstance(match_dict, dict):
@@ -629,64 +654,41 @@ class Key(object):
for key in matches.get(keydir, []):
try:
shutil.move(
- os.path.join(
- self.opts['pki_dir'],
- keydir,
- key),
- os.path.join(
- self.opts['pki_dir'],
- self.ACC,
- key)
- )
- eload = {'result': True,
- 'act': 'accept',
- 'id': key}
- self.event.fire_event(eload,
- salt.utils.event.tagify(prefix='key'))
+ os.path.join(self.opts["pki_dir"], keydir, key),
+ os.path.join(self.opts["pki_dir"], self.ACC, key),
+ )
+ eload = {"result": True, "act": "accept", "id": key}
+ self.event.fire_event(eload, salt.utils.event.tagify(prefix="key"))
except (IOError, OSError):
pass
- return (
- self.name_match(match) if match is not None
- else self.dict_match(matches)
- )
+ return self.name_match(match) if match is not None else self.dict_match(matches)
def accept_all(self):
- '''
+ """
Accept all keys in pre
- '''
+ """
keys = self.list_keys()
for key in keys[self.PEND]:
try:
shutil.move(
- os.path.join(
- self.opts['pki_dir'],
- self.PEND,
- key),
- os.path.join(
- self.opts['pki_dir'],
- self.ACC,
- key)
- )
- eload = {'result': True,
- 'act': 'accept',
- 'id': key}
- self.event.fire_event(eload,
- salt.utils.event.tagify(prefix='key'))
+ os.path.join(self.opts["pki_dir"], self.PEND, key),
+ os.path.join(self.opts["pki_dir"], self.ACC, key),
+ )
+ eload = {"result": True, "act": "accept", "id": key}
+ self.event.fire_event(eload, salt.utils.event.tagify(prefix="key"))
except (IOError, OSError):
pass
return self.list_keys()
- def delete_key(self,
- match=None,
- match_dict=None,
- preserve_minions=None,
- revoke_auth=False):
- '''
+ def delete_key(
+ self, match=None, match_dict=None, preserve_minions=None, revoke_auth=False
+ ):
+ """
Delete public keys. If "match" is passed, it is evaluated as a glob.
Pre-gathered matches can also be passed via "match_dict".
To preserve the master caches of minions who are matched, set preserve_minions
- '''
+ """
if match is not None:
matches = self.name_match(match)
elif match_dict is not None and isinstance(match_dict, dict):
@@ -697,81 +699,75 @@ class Key(object):
for key in keys:
try:
if revoke_auth:
- if self.opts.get('rotate_aes_key') is False:
- print('Immediate auth revocation specified but AES key rotation not allowed. '
- 'Minion will not be disconnected until the master AES key is rotated.')
+ if self.opts.get("rotate_aes_key") is False:
+ print(
+ "Immediate auth revocation specified but AES key rotation not allowed. "
+ "Minion will not be disconnected until the master AES key is rotated."
+ )
else:
try:
client = salt.client.get_local_client(mopts=self.opts)
- client.cmd_async(key, 'saltutil.revoke_auth')
+ client.cmd_async(key, "saltutil.revoke_auth")
except salt.exceptions.SaltClientError:
- print('Cannot contact Salt master. '
- 'Connection for {0} will remain up until '
- 'master AES key is rotated or auth is revoked '
- 'with \'saltutil.revoke_auth\'.'.format(key))
- os.remove(os.path.join(self.opts['pki_dir'], status, key))
- eload = {'result': True,
- 'act': 'delete',
- 'id': key}
- self.event.fire_event(eload,
- salt.utils.event.tagify(prefix='key'))
+ print(
+ "Cannot contact Salt master. "
+ "Connection for {0} will remain up until "
+ "master AES key is rotated or auth is revoked "
+ "with 'saltutil.revoke_auth'.".format(key)
+ )
+ os.remove(os.path.join(self.opts["pki_dir"], status, key))
+ eload = {"result": True, "act": "delete", "id": key}
+ self.event.fire_event(eload, salt.utils.event.tagify(prefix="key"))
except (OSError, IOError):
pass
- if self.opts.get('preserve_minions') is True:
- self.check_minion_cache(preserve_minions=matches.get('minions', []))
+ if self.opts.get("preserve_minions") is True:
+ self.check_minion_cache(preserve_minions=matches.get("minions", []))
else:
self.check_minion_cache()
- if self.opts.get('rotate_aes_key'):
- salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
- return (
- self.name_match(match) if match is not None
- else self.dict_match(matches)
- )
+ if self.opts.get("rotate_aes_key"):
+ salt.crypt.dropfile(self.opts["cachedir"], self.opts["user"])
+ return self.name_match(match) if match is not None else self.dict_match(matches)
def delete_den(self):
- '''
+ """
Delete all denied keys
- '''
+ """
keys = self.list_keys()
for status, keys in six.iteritems(self.list_keys()):
for key in keys[self.DEN]:
try:
- os.remove(os.path.join(self.opts['pki_dir'], status, key))
- eload = {'result': True,
- 'act': 'delete',
- 'id': key}
- self.event.fire_event(eload,
- salt.utils.event.tagify(prefix='key'))
+ os.remove(os.path.join(self.opts["pki_dir"], status, key))
+ eload = {"result": True, "act": "delete", "id": key}
+ self.event.fire_event(eload, salt.utils.event.tagify(prefix="key"))
except (OSError, IOError):
pass
self.check_minion_cache()
return self.list_keys()
def delete_all(self):
- '''
+ """
Delete all keys
- '''
+ """
for status, keys in six.iteritems(self.list_keys()):
for key in keys:
try:
- os.remove(os.path.join(self.opts['pki_dir'], status, key))
- eload = {'result': True,
- 'act': 'delete',
- 'id': key}
- self.event.fire_event(eload,
- salt.utils.event.tagify(prefix='key'))
+ os.remove(os.path.join(self.opts["pki_dir"], status, key))
+ eload = {"result": True, "act": "delete", "id": key}
+ self.event.fire_event(eload, salt.utils.event.tagify(prefix="key"))
except (OSError, IOError):
pass
self.check_minion_cache()
- if self.opts.get('rotate_aes_key'):
- salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
+ if self.opts.get("rotate_aes_key"):
+ salt.crypt.dropfile(self.opts["cachedir"], self.opts["user"])
return self.list_keys()
- def reject(self, match=None, match_dict=None, include_accepted=False, include_denied=False):
- '''
+ def reject(
+ self, match=None, match_dict=None, include_accepted=False, include_denied=False
+ ):
+ """
Reject public keys. If "match" is passed, it is evaluated as a glob.
Pre-gathered matches can also be passed via "match_dict".
- '''
+ """
if match is not None:
matches = self.name_match(match)
elif match_dict is not None and isinstance(match_dict, dict):
@@ -787,92 +783,71 @@ class Key(object):
for key in matches.get(keydir, []):
try:
shutil.move(
- os.path.join(
- self.opts['pki_dir'],
- keydir,
- key),
- os.path.join(
- self.opts['pki_dir'],
- self.REJ,
- key)
- )
- eload = {'result': True,
- 'act': 'reject',
- 'id': key}
- self.event.fire_event(eload,
- salt.utils.event.tagify(prefix='key'))
+ os.path.join(self.opts["pki_dir"], keydir, key),
+ os.path.join(self.opts["pki_dir"], self.REJ, key),
+ )
+ eload = {"result": True, "act": "reject", "id": key}
+ self.event.fire_event(eload, salt.utils.event.tagify(prefix="key"))
except (IOError, OSError):
pass
self.check_minion_cache()
- if self.opts.get('rotate_aes_key'):
- salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
- return (
- self.name_match(match) if match is not None
- else self.dict_match(matches)
- )
+ if self.opts.get("rotate_aes_key"):
+ salt.crypt.dropfile(self.opts["cachedir"], self.opts["user"])
+ return self.name_match(match) if match is not None else self.dict_match(matches)
def reject_all(self):
- '''
+ """
Reject all keys in pre
- '''
+ """
keys = self.list_keys()
for key in keys[self.PEND]:
try:
shutil.move(
- os.path.join(
- self.opts['pki_dir'],
- self.PEND,
- key),
- os.path.join(
- self.opts['pki_dir'],
- self.REJ,
- key)
- )
- eload = {'result': True,
- 'act': 'reject',
- 'id': key}
- self.event.fire_event(eload,
- salt.utils.event.tagify(prefix='key'))
+ os.path.join(self.opts["pki_dir"], self.PEND, key),
+ os.path.join(self.opts["pki_dir"], self.REJ, key),
+ )
+ eload = {"result": True, "act": "reject", "id": key}
+ self.event.fire_event(eload, salt.utils.event.tagify(prefix="key"))
except (IOError, OSError):
pass
self.check_minion_cache()
- if self.opts.get('rotate_aes_key'):
- salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
+ if self.opts.get("rotate_aes_key"):
+ salt.crypt.dropfile(self.opts["cachedir"], self.opts["user"])
return self.list_keys()
def finger(self, match, hash_type=None):
- '''
+ """
Return the fingerprint for a specified key
- '''
+ """
if hash_type is None:
- hash_type = __opts__['hash_type']
+ hash_type = __opts__["hash_type"]
matches = self.name_match(match, True)
ret = {}
for status, keys in six.iteritems(matches):
ret[status] = {}
for key in keys:
- if status == 'local':
- path = os.path.join(self.opts['pki_dir'], key)
+ if status == "local":
+ path = os.path.join(self.opts["pki_dir"], key)
else:
- path = os.path.join(self.opts['pki_dir'], status, key)
+ path = os.path.join(self.opts["pki_dir"], status, key)
ret[status][key] = salt.utils.crypt.pem_finger(path, sum_type=hash_type)
return ret
def finger_all(self, hash_type=None):
- '''
+ """
Return fingerprints for all keys
- '''
+ """
if hash_type is None:
- hash_type = __opts__['hash_type']
+ hash_type = __opts__["hash_type"]
ret = {}
for status, keys in six.iteritems(self.all_keys()):
ret[status] = {}
for key in keys:
- if status == 'local':
- path = os.path.join(self.opts['pki_dir'], key)
+ if status == "local":
+ path = os.path.join(self.opts["pki_dir"], key)
else:
- path = os.path.join(self.opts['pki_dir'], status, key)
+ path = os.path.join(self.opts["pki_dir"], status, key)
ret[status][key] = salt.utils.crypt.pem_finger(path, sum_type=hash_type)
return ret
diff --git a/salt/loader.py b/salt/loader.py
index 428fb338c96..939edca45dd 100644
--- a/salt/loader.py
+++ b/salt/loader.py
@@ -1,21 +1,22 @@
# -*- coding: utf-8 -*-
-'''
+"""
The Salt loader is the core to Salt's plugin system, the loader scans
directories for python loadable code and organizes the code into the
plugin interfaces used by Salt.
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
+import functools
+import inspect
+import logging
import os
import re
import sys
-import time
-import logging
-import inspect
import tempfile
-import functools
import threading
+import time
import traceback
import types
from zipimport import zipimporter
@@ -33,31 +34,37 @@ import salt.utils.files
import salt.utils.lazy
import salt.utils.odict
import salt.utils.platform
-import salt.utils.versions
import salt.utils.stringutils
+import salt.utils.versions
from salt.exceptions import LoaderError
-from salt.template import check_render_pipe_str
-from salt.utils.decorators import Depends
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import reload_module
+from salt.template import check_render_pipe_str
+from salt.utils.decorators import Depends
if sys.version_info[:2] >= (3, 5):
import importlib.machinery # pylint: disable=no-name-in-module,import-error
import importlib.util # pylint: disable=no-name-in-module,import-error
+
USE_IMPORTLIB = True
else:
import imp
+
USE_IMPORTLIB = False
try:
from collections.abc import MutableMapping
except ImportError:
+ # pylint: disable=no-name-in-module
from collections import MutableMapping
+ # pylint: enable=no-name-in-module
+
try:
import pkg_resources
+
HAS_PKG_RESOURCES = True
except ImportError:
HAS_PKG_RESOURCES = False
@@ -65,7 +72,7 @@ except ImportError:
log = logging.getLogger(__name__)
SALT_BASE_PATH = os.path.abspath(salt.syspaths.INSTALL_DIR)
-LOADED_BASE_NAME = 'salt.loaded'
+LOADED_BASE_NAME = "salt.loaded"
if USE_IMPORTLIB:
# pylint: disable=no-member
@@ -75,22 +82,23 @@ if USE_IMPORTLIB:
MODULE_KIND_PKG_DIRECTORY = 5
SUFFIXES = []
for suffix in importlib.machinery.EXTENSION_SUFFIXES:
- SUFFIXES.append((suffix, 'rb', MODULE_KIND_EXTENSION))
+ SUFFIXES.append((suffix, "rb", MODULE_KIND_EXTENSION))
for suffix in importlib.machinery.SOURCE_SUFFIXES:
- SUFFIXES.append((suffix, 'rb', MODULE_KIND_SOURCE))
+ SUFFIXES.append((suffix, "rb", MODULE_KIND_SOURCE))
for suffix in importlib.machinery.BYTECODE_SUFFIXES:
- SUFFIXES.append((suffix, 'rb', MODULE_KIND_COMPILED))
+ SUFFIXES.append((suffix, "rb", MODULE_KIND_COMPILED))
MODULE_KIND_MAP = {
MODULE_KIND_SOURCE: importlib.machinery.SourceFileLoader,
MODULE_KIND_COMPILED: importlib.machinery.SourcelessFileLoader,
- MODULE_KIND_EXTENSION: importlib.machinery.ExtensionFileLoader
+ MODULE_KIND_EXTENSION: importlib.machinery.ExtensionFileLoader,
}
# pylint: enable=no-member
else:
SUFFIXES = imp.get_suffixes()
-PY3_PRE_EXT = \
- re.compile(r'\.cpython-{0}{1}(\.opt-[1-9])?'.format(*sys.version_info[:2]))
+PY3_PRE_EXT = re.compile(
+ r"\.cpython-{0}{1}(\.opt-[1-9])?".format(*sys.version_info[:2])
+)
# Because on the cloud drivers we do `from salt.cloud.libcloudfuncs import *`
# which simplifies code readability, it adds some unsupported functions into
@@ -98,9 +106,9 @@ PY3_PRE_EXT = \
# We list un-supported functions here. These will be removed from the loaded.
# TODO: remove the need for this cross-module code. Maybe use NotImplemented
LIBCLOUD_FUNCS_NOT_SUPPORTED = (
- 'parallels.avail_sizes',
- 'parallels.avail_locations',
- 'proxmox.avail_sizes',
+ "parallels.avail_sizes",
+ "parallels.avail_locations",
+ "proxmox.avail_sizes",
)
# Will be set to pyximport module at runtime if cython is enabled in config.
@@ -108,25 +116,19 @@ pyximport = None
def static_loader(
- opts,
- ext_type,
- tag,
- pack=None,
- int_type=None,
- ext_dirs=True,
- ext_type_dirs=None,
- base_path=None,
- filter_name=None,
- ):
+ opts,
+ ext_type,
+ tag,
+ pack=None,
+ int_type=None,
+ ext_dirs=True,
+ ext_type_dirs=None,
+ base_path=None,
+ filter_name=None,
+):
funcs = LazyLoader(
_module_dirs(
- opts,
- ext_type,
- tag,
- int_type,
- ext_dirs,
- ext_type_dirs,
- base_path,
+ opts, ext_type, tag, int_type, ext_dirs, ext_type_dirs, base_path,
),
opts,
tag=tag,
@@ -142,57 +144,65 @@ def static_loader(
def _format_entrypoint_target(ep):
- '''
+ """
Makes a string describing the target of an EntryPoint object.
Base strongly on EntryPoint.__str__().
- '''
+ """
s = ep.module_name
if ep.attrs:
- s += ':' + '.'.join(ep.attrs)
+ s += ":" + ".".join(ep.attrs)
return s
def _module_dirs(
- opts,
- ext_type,
- tag=None,
- int_type=None,
- ext_dirs=True,
- ext_type_dirs=None,
- base_path=None,
- ):
+ opts,
+ ext_type,
+ tag=None,
+ int_type=None,
+ ext_dirs=True,
+ ext_type_dirs=None,
+ base_path=None,
+):
if tag is None:
tag = ext_type
sys_types = os.path.join(base_path or SALT_BASE_PATH, int_type or ext_type)
- ext_types = os.path.join(opts['extension_modules'], ext_type)
+ ext_types = os.path.join(opts["extension_modules"], ext_type)
ext_type_types = []
if ext_dirs:
if ext_type_dirs is None:
- ext_type_dirs = '{0}_dirs'.format(tag)
+ ext_type_dirs = "{0}_dirs".format(tag)
if ext_type_dirs in opts:
ext_type_types.extend(opts[ext_type_dirs])
if HAS_PKG_RESOURCES and ext_type_dirs:
- for entry_point in pkg_resources.iter_entry_points('salt.loader', ext_type_dirs):
+ for entry_point in pkg_resources.iter_entry_points(
+ "salt.loader", ext_type_dirs
+ ):
try:
loaded_entry_point = entry_point.load()
for path in loaded_entry_point():
ext_type_types.append(path)
except Exception as exc: # pylint: disable=broad-except
- log.error("Error getting module directories from %s: %s", _format_entrypoint_target(entry_point), exc)
- log.debug("Full backtrace for module directories error", exc_info=True)
+ log.error(
+ "Error getting module directories from %s: %s",
+ _format_entrypoint_target(entry_point),
+ exc,
+ )
+ log.debug(
+ "Full backtrace for module directories error", exc_info=True
+ )
cli_module_dirs = []
# The dirs can be any module dir, or a in-tree _{ext_type} dir
- for _dir in opts.get('module_dirs', []):
+ for _dir in opts.get("module_dirs", []):
# Prepend to the list to match cli argument ordering
maybe_dir = os.path.join(_dir, ext_type)
if os.path.isdir(maybe_dir):
cli_module_dirs.insert(0, maybe_dir)
continue
- maybe_dir = os.path.join(_dir, '_{0}'.format(ext_type))
+ maybe_dir = os.path.join(_dir, "_{0}".format(ext_type))
if os.path.isdir(maybe_dir):
cli_module_dirs.insert(0, maybe_dir)
@@ -200,16 +210,17 @@ def _module_dirs(
def minion_mods(
- opts,
- context=None,
- utils=None,
- whitelist=None,
- initial_load=False,
- loaded_base_name=None,
- notify=False,
- static_modules=None,
- proxy=None):
- '''
+ opts,
+ context=None,
+ utils=None,
+ whitelist=None,
+ initial_load=False,
+ loaded_base_name=None,
+ notify=False,
+ static_modules=None,
+ proxy=None,
+):
+ """
Load execution modules
Returns a dictionary of execution modules appropriate for the current
@@ -242,26 +253,26 @@ def minion_mods(
__utils__ = salt.loader.utils(__opts__)
__salt__ = salt.loader.minion_mods(__opts__, utils=__utils__)
__salt__['test.ping']()
- '''
+ """
# TODO Publish documentation for module whitelisting
if not whitelist:
- whitelist = opts.get('whitelist_modules', None)
+ whitelist = opts.get("whitelist_modules", None)
ret = LazyLoader(
- _module_dirs(opts, 'modules', 'module'),
+ _module_dirs(opts, "modules", "module"),
opts,
- tag='module',
- pack={'__context__': context, '__utils__': utils, '__proxy__': proxy},
+ tag="module",
+ pack={"__context__": context, "__utils__": utils, "__proxy__": proxy},
whitelist=whitelist,
loaded_base_name=loaded_base_name,
static_modules=static_modules,
)
- ret.pack['__salt__'] = ret
+ ret.pack["__salt__"] = ret
# Load any provider overrides from the configuration file providers option
# Note: Providers can be pkg, service, user or group - not to be confused
# with cloud providers.
- providers = opts.get('providers', False)
+ providers = opts.get("providers", False)
if providers and isinstance(providers, dict):
for mod in providers:
# sometimes providers opts is not to diverge modules but
@@ -273,18 +284,18 @@ def minion_mods(
else:
if funcs:
for func in funcs:
- f_key = '{0}{1}'.format(mod, func[func.rindex('.'):])
+ f_key = "{0}{1}".format(mod, func[func.rindex(".") :])
ret[f_key] = funcs[func]
if notify:
- with salt.utils.event.get_event('minion', opts=opts, listen=False) as evt:
- evt.fire_event({'complete': True}, tag='/salt/minion/minion_mod_complete')
+ with salt.utils.event.get_event("minion", opts=opts, listen=False) as evt:
+ evt.fire_event({"complete": True}, tag="/salt/minion/minion_mod_complete")
return ret
-def raw_mod(opts, name, functions, mod='modules'):
- '''
+def raw_mod(opts, name, functions, mod="modules"):
+ """
Returns a single module loaded raw and bypassing the __virtual__ function
.. code-block:: python
@@ -295,13 +306,13 @@ def raw_mod(opts, name, functions, mod='modules'):
__opts__ = salt.config.minion_config('/etc/salt/minion')
testmod = salt.loader.raw_mod(__opts__, 'test', None)
testmod['test.ping']()
- '''
+ """
loader = LazyLoader(
- _module_dirs(opts, mod, 'module'),
+ _module_dirs(opts, mod, "module"),
opts,
- tag='rawmodule',
+ tag="rawmodule",
virtual_enable=False,
- pack={'__salt__': functions},
+ pack={"__salt__": functions},
)
# if we don't have the module, return an empty dict
if name not in loader.file_mapping:
@@ -312,233 +323,209 @@ def raw_mod(opts, name, functions, mod='modules'):
def metaproxy(opts):
- '''
+ """
Return functions used in the meta proxy
- '''
+ """
- return LazyLoader(
- _module_dirs(opts, 'metaproxy'),
- opts,
- tag='metaproxy'
- )
+ return LazyLoader(_module_dirs(opts, "metaproxy"), opts, tag="metaproxy")
def matchers(opts):
- '''
+ """
Return the matcher services plugins
- '''
- return LazyLoader(
- _module_dirs(opts, 'matchers'),
- opts,
- tag='matchers'
- )
+ """
+ return LazyLoader(_module_dirs(opts, "matchers"), opts, tag="matchers")
def engines(opts, functions, runners, utils, proxy=None):
- '''
+ """
Return the master services plugins
- '''
- pack = {'__salt__': functions,
- '__runners__': runners,
- '__proxy__': proxy,
- '__utils__': utils}
- return LazyLoader(
- _module_dirs(opts, 'engines'),
- opts,
- tag='engines',
- pack=pack,
- )
+ """
+ pack = {
+ "__salt__": functions,
+ "__runners__": runners,
+ "__proxy__": proxy,
+ "__utils__": utils,
+ }
+ return LazyLoader(_module_dirs(opts, "engines"), opts, tag="engines", pack=pack,)
def proxy(opts, functions=None, returners=None, whitelist=None, utils=None):
- '''
+ """
Returns the proxy module for this salt-proxy-minion
- '''
+ """
ret = LazyLoader(
- _module_dirs(opts, 'proxy'),
+ _module_dirs(opts, "proxy"),
opts,
- tag='proxy',
- pack={'__salt__': functions, '__ret__': returners, '__utils__': utils},
+ tag="proxy",
+ pack={"__salt__": functions, "__ret__": returners, "__utils__": utils},
)
- ret.pack['__proxy__'] = ret
+ ret.pack["__proxy__"] = ret
return ret
def returners(opts, functions, whitelist=None, context=None, proxy=None):
- '''
+ """
Returns the returner modules
- '''
+ """
return LazyLoader(
- _module_dirs(opts, 'returners', 'returner'),
+ _module_dirs(opts, "returners", "returner"),
opts,
- tag='returner',
+ tag="returner",
whitelist=whitelist,
- pack={'__salt__': functions, '__context__': context, '__proxy__': proxy or {}},
+ pack={"__salt__": functions, "__context__": context, "__proxy__": proxy or {}},
)
def utils(opts, whitelist=None, context=None, proxy=proxy):
- '''
+ """
Returns the utility modules
- '''
+ """
return LazyLoader(
- _module_dirs(opts, 'utils', ext_type_dirs='utils_dirs'),
+ _module_dirs(opts, "utils", ext_type_dirs="utils_dirs"),
opts,
- tag='utils',
+ tag="utils",
whitelist=whitelist,
- pack={'__context__': context, '__proxy__': proxy or {}},
+ pack={"__context__": context, "__proxy__": proxy or {}},
)
def pillars(opts, functions, context=None):
- '''
+ """
Returns the pillars modules
- '''
- ret = LazyLoader(_module_dirs(opts, 'pillar'),
- opts,
- tag='pillar',
- pack={'__salt__': functions,
- '__context__': context,
- '__utils__': utils(opts)})
- ret.pack['__ext_pillar__'] = ret
- return FilterDictWrapper(ret, '.ext_pillar')
+ """
+ ret = LazyLoader(
+ _module_dirs(opts, "pillar"),
+ opts,
+ tag="pillar",
+ pack={"__salt__": functions, "__context__": context, "__utils__": utils(opts)},
+ )
+ ret.pack["__ext_pillar__"] = ret
+ return FilterDictWrapper(ret, ".ext_pillar")
def tops(opts):
- '''
+ """
Returns the tops modules
- '''
- if 'master_tops' not in opts:
+ """
+ if "master_tops" not in opts:
return {}
- whitelist = list(opts['master_tops'].keys())
+ whitelist = list(opts["master_tops"].keys())
ret = LazyLoader(
- _module_dirs(opts, 'tops', 'top'),
- opts,
- tag='top',
- whitelist=whitelist,
+ _module_dirs(opts, "tops", "top"), opts, tag="top", whitelist=whitelist,
)
- return FilterDictWrapper(ret, '.top')
+ return FilterDictWrapper(ret, ".top")
def wheels(opts, whitelist=None, context=None):
- '''
+ """
Returns the wheels modules
- '''
+ """
if context is None:
context = {}
return LazyLoader(
- _module_dirs(opts, 'wheel'),
+ _module_dirs(opts, "wheel"),
opts,
- tag='wheel',
+ tag="wheel",
whitelist=whitelist,
- pack={'__context__': context},
+ pack={"__context__": context},
)
def outputters(opts):
- '''
+ """
Returns the outputters modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only outputters present in the keyspace
- '''
+ """
ret = LazyLoader(
- _module_dirs(opts, 'output', ext_type_dirs='outputter_dirs'),
+ _module_dirs(opts, "output", ext_type_dirs="outputter_dirs"),
opts,
- tag='output',
+ tag="output",
)
- wrapped_ret = FilterDictWrapper(ret, '.output')
+ wrapped_ret = FilterDictWrapper(ret, ".output")
# TODO: this name seems terrible... __salt__ should always be execution mods
- ret.pack['__salt__'] = wrapped_ret
+ ret.pack["__salt__"] = wrapped_ret
return wrapped_ret
def serializers(opts):
- '''
+ """
Returns the serializers modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only serializers present in the keyspace
- '''
- return LazyLoader(
- _module_dirs(opts, 'serializers'),
- opts,
- tag='serializers',
- )
+ """
+ return LazyLoader(_module_dirs(opts, "serializers"), opts, tag="serializers",)
def eauth_tokens(opts):
- '''
+ """
Returns the tokens modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only token backends present in the keyspace
- '''
- return LazyLoader(
- _module_dirs(opts, 'tokens'),
- opts,
- tag='tokens',
- )
+ """
+ return LazyLoader(_module_dirs(opts, "tokens"), opts, tag="tokens",)
def auth(opts, whitelist=None):
- '''
+ """
Returns the auth modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader
- '''
+ """
return LazyLoader(
- _module_dirs(opts, 'auth'),
+ _module_dirs(opts, "auth"),
opts,
- tag='auth',
+ tag="auth",
whitelist=whitelist,
- pack={'__salt__': minion_mods(opts)},
+ pack={"__salt__": minion_mods(opts)},
)
def fileserver(opts, backends):
- '''
+ """
Returns the file server modules
- '''
- return LazyLoader(_module_dirs(opts, 'fileserver'),
- opts,
- tag='fileserver',
- whitelist=backends,
- pack={'__utils__': utils(opts)})
+ """
+ return LazyLoader(
+ _module_dirs(opts, "fileserver"),
+ opts,
+ tag="fileserver",
+ whitelist=backends,
+ pack={"__utils__": utils(opts)},
+ )
def roster(opts, runner=None, utils=None, whitelist=None):
- '''
+ """
Returns the roster modules
- '''
+ """
return LazyLoader(
- _module_dirs(opts, 'roster'),
+ _module_dirs(opts, "roster"),
opts,
- tag='roster',
+ tag="roster",
whitelist=whitelist,
- pack={
- '__runner__': runner,
- '__utils__': utils,
- },
+ pack={"__runner__": runner, "__utils__": utils},
)
def thorium(opts, functions, runners):
- '''
+ """
Load the thorium runtime modules
- '''
- pack = {'__salt__': functions, '__runner__': runners, '__context__': {}}
- ret = LazyLoader(_module_dirs(opts, 'thorium'),
- opts,
- tag='thorium',
- pack=pack)
- ret.pack['__thorium__'] = ret
+ """
+ pack = {"__salt__": functions, "__runner__": runners, "__context__": {}}
+ ret = LazyLoader(_module_dirs(opts, "thorium"), opts, tag="thorium", pack=pack)
+ ret.pack["__thorium__"] = ret
return ret
-def states(opts, functions, utils, serializers, whitelist=None, proxy=None, context=None):
- '''
+def states(
+ opts, functions, utils, serializers, whitelist=None, proxy=None, context=None
+):
+ """
Returns the state modules
:param dict opts: The Salt options dictionary
@@ -552,118 +539,119 @@ def states(opts, functions, utils, serializers, whitelist=None, proxy=None, cont
__opts__ = salt.config.minion_config('/etc/salt/minion')
statemods = salt.loader.states(__opts__, None, None)
- '''
+ """
if context is None:
context = {}
ret = LazyLoader(
- _module_dirs(opts, 'states'),
+ _module_dirs(opts, "states"),
opts,
- tag='states',
- pack={'__salt__': functions, '__proxy__': proxy or {}},
+ tag="states",
+ pack={"__salt__": functions, "__proxy__": proxy or {}},
whitelist=whitelist,
)
- ret.pack['__states__'] = ret
- ret.pack['__utils__'] = utils
- ret.pack['__serializers__'] = serializers
- ret.pack['__context__'] = context
+ ret.pack["__states__"] = ret
+ ret.pack["__utils__"] = utils
+ ret.pack["__serializers__"] = serializers
+ ret.pack["__context__"] = context
return ret
def beacons(opts, functions, context=None, proxy=None):
- '''
+ """
Load the beacon modules
:param dict opts: The Salt options dictionary
:param dict functions: A dictionary of minion modules, with module names as
keys and funcs as values.
- '''
+ """
return LazyLoader(
- _module_dirs(opts, 'beacons'),
+ _module_dirs(opts, "beacons"),
opts,
- tag='beacons',
- pack={'__context__': context, '__salt__': functions, '__proxy__': proxy or {}},
+ tag="beacons",
+ pack={"__context__": context, "__salt__": functions, "__proxy__": proxy or {}},
virtual_funcs=[],
)
def log_handlers(opts):
- '''
+ """
Returns the custom logging handler modules
:param dict opts: The Salt options dictionary
- '''
+ """
ret = LazyLoader(
_module_dirs(
opts,
- 'log_handlers',
- int_type='handlers',
- base_path=os.path.join(SALT_BASE_PATH, 'log'),
+ "log_handlers",
+ int_type="handlers",
+ base_path=os.path.join(SALT_BASE_PATH, "log"),
),
opts,
- tag='log_handlers',
+ tag="log_handlers",
)
- return FilterDictWrapper(ret, '.setup_handlers')
+ return FilterDictWrapper(ret, ".setup_handlers")
def ssh_wrapper(opts, functions=None, context=None):
- '''
+ """
Returns the custom logging handler modules
- '''
+ """
return LazyLoader(
_module_dirs(
opts,
- 'wrapper',
- base_path=os.path.join(SALT_BASE_PATH, os.path.join('client', 'ssh')),
+ "wrapper",
+ base_path=os.path.join(SALT_BASE_PATH, os.path.join("client", "ssh")),
),
opts,
- tag='wrapper',
+ tag="wrapper",
pack={
- '__salt__': functions,
- '__grains__': opts.get('grains', {}),
- '__pillar__': opts.get('pillar', {}),
- '__context__': context,
- },
+ "__salt__": functions,
+ "__grains__": opts.get("grains", {}),
+ "__pillar__": opts.get("pillar", {}),
+ "__context__": context,
+ },
)
def render(opts, functions, states=None, proxy=None, context=None):
- '''
+ """
Returns the render modules
- '''
+ """
if context is None:
context = {}
- pack = {'__salt__': functions,
- '__grains__': opts.get('grains', {}),
- '__context__': context}
+ pack = {
+ "__salt__": functions,
+ "__grains__": opts.get("grains", {}),
+ "__context__": context,
+ }
if states:
- pack['__states__'] = states
- pack['__proxy__'] = proxy or {}
+ pack["__states__"] = states
+ pack["__proxy__"] = proxy or {}
ret = LazyLoader(
- _module_dirs(
- opts,
- 'renderers',
- 'render',
- ext_type_dirs='render_dirs',
- ),
+ _module_dirs(opts, "renderers", "render", ext_type_dirs="render_dirs",),
opts,
- tag='render',
+ tag="render",
pack=pack,
)
- rend = FilterDictWrapper(ret, '.render')
+ rend = FilterDictWrapper(ret, ".render")
- if not check_render_pipe_str(opts['renderer'], rend, opts['renderer_blacklist'], opts['renderer_whitelist']):
- err = ('The renderer {0} is unavailable, this error is often because '
- 'the needed software is unavailable'.format(opts['renderer']))
+ if not check_render_pipe_str(
+ opts["renderer"], rend, opts["renderer_blacklist"], opts["renderer_whitelist"]
+ ):
+ err = (
+ "The renderer {0} is unavailable, this error is often because "
+ "the needed software is unavailable".format(opts["renderer"])
+ )
log.critical(err)
raise LoaderError(err)
return rend
def grain_funcs(opts, proxy=None):
- '''
+ """
Returns the grain functions
.. code-block:: python
@@ -673,18 +661,13 @@ def grain_funcs(opts, proxy=None):
__opts__ = salt.config.minion_config('/etc/salt/minion')
grainfuncs = salt.loader.grain_funcs(__opts__)
- '''
+ """
ret = LazyLoader(
- _module_dirs(
- opts,
- 'grains',
- 'grain',
- ext_type_dirs='grains_dirs',
- ),
+ _module_dirs(opts, "grains", "grain", ext_type_dirs="grains_dirs",),
opts,
- tag='grains',
+ tag="grains",
)
- ret.pack['__utils__'] = utils(opts, proxy=proxy)
+ ret.pack["__utils__"] = utils(opts, proxy=proxy)
return ret
@@ -692,43 +675,46 @@ def _format_cached_grains(cached_grains):
"""
Returns cached grains with fixed types, like tuples.
"""
- if cached_grains.get('osrelease_info'):
- osrelease_info = cached_grains['osrelease_info']
+ if cached_grains.get("osrelease_info"):
+ osrelease_info = cached_grains["osrelease_info"]
if isinstance(osrelease_info, list):
- cached_grains['osrelease_info'] = tuple(osrelease_info)
+ cached_grains["osrelease_info"] = tuple(osrelease_info)
return cached_grains
def _load_cached_grains(opts, cfn):
- '''
+ """
Returns the grains cached in cfn, or None if the cache is too old or is
corrupted.
- '''
+ """
if not os.path.isfile(cfn):
- log.debug('Grains cache file does not exist.')
+ log.debug("Grains cache file does not exist.")
return None
grains_cache_age = int(time.time() - os.path.getmtime(cfn))
- if grains_cache_age > opts.get('grains_cache_expiration', 300):
+ if grains_cache_age > opts.get("grains_cache_expiration", 300):
log.debug(
- 'Grains cache last modified %s seconds ago and cache '
- 'expiration is set to %s. Grains cache expired. '
- 'Refreshing.',
- grains_cache_age, opts.get('grains_cache_expiration', 300)
+ "Grains cache last modified %s seconds ago and cache "
+ "expiration is set to %s. Grains cache expired. "
+ "Refreshing.",
+ grains_cache_age,
+ opts.get("grains_cache_expiration", 300),
)
return None
- if opts.get('refresh_grains_cache', False):
- log.debug('refresh_grains_cache requested, Refreshing.')
+ if opts.get("refresh_grains_cache", False):
+ log.debug("refresh_grains_cache requested, Refreshing.")
return None
- log.debug('Retrieving grains from cache')
+ log.debug("Retrieving grains from cache")
try:
serial = salt.payload.Serial(opts)
- with salt.utils.files.fopen(cfn, 'rb') as fp_:
- cached_grains = salt.utils.data.decode(serial.load(fp_), preserve_tuples=True)
+ with salt.utils.files.fopen(cfn, "rb") as fp_:
+ cached_grains = salt.utils.data.decode(
+ serial.load(fp_), preserve_tuples=True
+ )
if not cached_grains:
- log.debug('Cached grains are empty, cache might be corrupted. Refreshing.')
+ log.debug("Cached grains are empty, cache might be corrupted. Refreshing.")
return None
return _format_cached_grains(cached_grains)
@@ -737,7 +723,7 @@ def _load_cached_grains(opts, cfn):
def grains(opts, force_refresh=False, proxy=None):
- '''
+ """
Return the functions for the dynamic grains and the values for the static
grains.
@@ -754,57 +740,58 @@ def grains(opts, force_refresh=False, proxy=None):
__opts__ = salt.config.minion_config('/etc/salt/minion')
__grains__ = salt.loader.grains(__opts__)
print __grains__['id']
- '''
+ """
# Need to re-import salt.config, somehow it got lost when a minion is starting
import salt.config
+
# if we have no grains, lets try loading from disk (TODO: move to decorator?)
- cfn = os.path.join(
- opts['cachedir'],
- 'grains.cache.p'
- )
- if not force_refresh and opts.get('grains_cache', False):
+ cfn = os.path.join(opts["cachedir"], "grains.cache.p")
+ if not force_refresh and opts.get("grains_cache", False):
cached_grains = _load_cached_grains(opts, cfn)
if cached_grains:
return cached_grains
else:
- log.debug('Grains refresh requested. Refreshing grains.')
+ log.debug("Grains refresh requested. Refreshing grains.")
- if opts.get('skip_grains', False):
+ if opts.get("skip_grains", False):
return {}
- grains_deep_merge = opts.get('grains_deep_merge', False) is True
- if 'conf_file' in opts:
+ grains_deep_merge = opts.get("grains_deep_merge", False) is True
+ if "conf_file" in opts:
pre_opts = {}
- pre_opts.update(salt.config.load_config(
- opts['conf_file'], 'SALT_MINION_CONFIG',
- salt.config.DEFAULT_MINION_OPTS['conf_file']
- ))
- default_include = pre_opts.get(
- 'default_include', opts['default_include']
+ pre_opts.update(
+ salt.config.load_config(
+ opts["conf_file"],
+ "SALT_MINION_CONFIG",
+ salt.config.DEFAULT_MINION_OPTS["conf_file"],
+ )
)
- include = pre_opts.get('include', [])
- pre_opts.update(salt.config.include_config(
- default_include, opts['conf_file'], verbose=False
- ))
- pre_opts.update(salt.config.include_config(
- include, opts['conf_file'], verbose=True
- ))
- if 'grains' in pre_opts:
- opts['grains'] = pre_opts['grains']
+ default_include = pre_opts.get("default_include", opts["default_include"])
+ include = pre_opts.get("include", [])
+ pre_opts.update(
+ salt.config.include_config(
+ default_include, opts["conf_file"], verbose=False
+ )
+ )
+ pre_opts.update(
+ salt.config.include_config(include, opts["conf_file"], verbose=True)
+ )
+ if "grains" in pre_opts:
+ opts["grains"] = pre_opts["grains"]
else:
- opts['grains'] = {}
+ opts["grains"] = {}
else:
- opts['grains'] = {}
+ opts["grains"] = {}
grains_data = {}
- blist = opts.get('grains_blacklist', [])
+ blist = opts.get("grains_blacklist", [])
funcs = grain_funcs(opts, proxy=proxy)
if force_refresh: # if we refresh, lets reload grain modules
funcs.clear()
# Run core grains
for key in funcs:
- if not key.startswith('core.'):
+ if not key.startswith("core."):
continue
- log.trace('Loading %s grain', key)
+ log.trace("Loading %s grain", key)
ret = funcs[key]()
if not isinstance(ret, dict):
continue
@@ -813,7 +800,7 @@ def grains(opts, force_refresh=False, proxy=None):
for block in blist:
if salt.utils.stringutils.expr_match(key, block):
del ret[key]
- log.trace('Filtering %s grain', key)
+ log.trace("Filtering %s grain", key)
if not ret:
continue
if grains_deep_merge:
@@ -823,7 +810,7 @@ def grains(opts, force_refresh=False, proxy=None):
# Run the rest of the grains
for key in funcs:
- if key.startswith('core.') or key == '_errors':
+ if key.startswith("core.") or key == "_errors":
continue
try:
# Grains are loaded too early to take advantage of the injected
@@ -832,21 +819,25 @@ def grains(opts, force_refresh=False, proxy=None):
# one parameter. Then the grains can have access to the
# proxymodule for retrieving information from the connected
# device.
- log.trace('Loading %s grain', key)
+ log.trace("Loading %s grain", key)
parameters = salt.utils.args.get_function_argspec(funcs[key]).args
kwargs = {}
- if 'proxy' in parameters:
- kwargs['proxy'] = proxy
- if 'grains' in parameters:
- kwargs['grains'] = grains_data
+ if "proxy" in parameters:
+ kwargs["proxy"] = proxy
+ if "grains" in parameters:
+ kwargs["grains"] = grains_data
ret = funcs[key](**kwargs)
except Exception: # pylint: disable=broad-except
if salt.utils.platform.is_proxy():
- log.info('The following CRITICAL message may not be an error; the proxy may not be completely established yet.')
+ log.info(
+ "The following CRITICAL message may not be an error; the proxy may not be completely established yet."
+ )
log.critical(
- 'Failed to load grains defined in grain file %s in '
- 'function %s, error:\n', key, funcs[key],
- exc_info=True
+ "Failed to load grains defined in grain file %s in "
+ "function %s, error:\n",
+ key,
+ funcs[key],
+ exc_info=True,
)
continue
if not isinstance(ret, dict):
@@ -856,7 +847,7 @@ def grains(opts, force_refresh=False, proxy=None):
for block in blist:
if salt.utils.stringutils.expr_match(key, block):
del ret[key]
- log.trace('Filtering %s grain', key)
+ log.trace("Filtering %s grain", key)
if not ret:
continue
if grains_deep_merge:
@@ -864,44 +855,48 @@ def grains(opts, force_refresh=False, proxy=None):
else:
grains_data.update(ret)
- if opts.get('proxy_merge_grains_in_module', True) and proxy:
+ if opts.get("proxy_merge_grains_in_module", True) and proxy:
try:
- proxytype = proxy.opts['proxy']['proxytype']
- if proxytype + '.grains' in proxy:
- if proxytype + '.initialized' in proxy and proxy[proxytype + '.initialized']():
+ proxytype = proxy.opts["proxy"]["proxytype"]
+ if proxytype + ".grains" in proxy:
+ if (
+ proxytype + ".initialized" in proxy
+ and proxy[proxytype + ".initialized"]()
+ ):
try:
- proxytype = proxy.opts['proxy']['proxytype']
- ret = proxy[proxytype + '.grains']()
+ proxytype = proxy.opts["proxy"]["proxytype"]
+ ret = proxy[proxytype + ".grains"]()
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
except Exception: # pylint: disable=broad-except
- log.critical('Failed to run proxy\'s grains function!',
- exc_info=True
+ log.critical(
+ "Failed to run proxy's grains function!", exc_info=True
)
except KeyError:
pass
- grains_data.update(opts['grains'])
+ grains_data.update(opts["grains"])
# Write cache if enabled
- if opts.get('grains_cache', False):
+ if opts.get("grains_cache", False):
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Late import
import salt.modules.cmdmod
+
# Make sure cache file isn't read-only
salt.modules.cmdmod._run_quiet('attrib -R "{0}"'.format(cfn))
- with salt.utils.files.fopen(cfn, 'w+b') as fp_:
+ with salt.utils.files.fopen(cfn, "w+b") as fp_:
try:
serial = salt.payload.Serial(opts)
serial.dump(grains_data, fp_)
except TypeError as e:
- log.error('Failed to serialize grains cache: %s', e)
+ log.error("Failed to serialize grains cache: %s", e)
raise # re-throw for cleanup
except Exception as e: # pylint: disable=broad-except
- log.error('Unable to write to grains cache file %s: %s', cfn, e)
+ log.error("Unable to write to grains cache file %s: %s", cfn, e)
# Based on the original exception, the file may or may not have been
# created. If it was, we will remove it now, as the exception means
# the serialized data is not to be trusted, no matter what the
@@ -910,176 +905,170 @@ def grains(opts, force_refresh=False, proxy=None):
os.unlink(cfn)
if grains_deep_merge:
- salt.utils.dictupdate.update(grains_data, opts['grains'])
+ salt.utils.dictupdate.update(grains_data, opts["grains"])
else:
- grains_data.update(opts['grains'])
+ grains_data.update(opts["grains"])
return salt.utils.data.decode(grains_data, preserve_tuples=True)
# TODO: get rid of? Does anyone use this? You should use raw() instead
def call(fun, **kwargs):
- '''
+ """
Directly call a function inside a loader directory
- '''
- args = kwargs.get('args', [])
- dirs = kwargs.get('dirs', [])
+ """
+ args = kwargs.get("args", [])
+ dirs = kwargs.get("dirs", [])
funcs = LazyLoader(
- [os.path.join(SALT_BASE_PATH, 'modules')] + dirs,
+ [os.path.join(SALT_BASE_PATH, "modules")] + dirs,
None,
- tag='modules',
+ tag="modules",
virtual_enable=False,
)
return funcs[fun](*args)
def runner(opts, utils=None, context=None, whitelist=None):
- '''
+ """
Directly call a function inside a loader directory
- '''
+ """
if utils is None:
utils = {}
if context is None:
context = {}
ret = LazyLoader(
- _module_dirs(opts, 'runners', 'runner', ext_type_dirs='runner_dirs'),
+ _module_dirs(opts, "runners", "runner", ext_type_dirs="runner_dirs"),
opts,
- tag='runners',
- pack={'__utils__': utils, '__context__': context},
+ tag="runners",
+ pack={"__utils__": utils, "__context__": context},
whitelist=whitelist,
)
# TODO: change from __salt__ to something else, we overload __salt__ too much
- ret.pack['__salt__'] = ret
+ ret.pack["__salt__"] = ret
return ret
def queues(opts):
- '''
+ """
Directly call a function inside a loader directory
- '''
+ """
return LazyLoader(
- _module_dirs(opts, 'queues', 'queue', ext_type_dirs='queue_dirs'),
+ _module_dirs(opts, "queues", "queue", ext_type_dirs="queue_dirs"),
opts,
- tag='queues',
+ tag="queues",
)
def sdb(opts, functions=None, whitelist=None, utils=None):
- '''
+ """
Make a very small database call
- '''
+ """
if utils is None:
utils = {}
return LazyLoader(
- _module_dirs(opts, 'sdb'),
+ _module_dirs(opts, "sdb"),
opts,
- tag='sdb',
+ tag="sdb",
pack={
- '__sdb__': functions,
- '__opts__': opts,
- '__utils__': utils,
- '__salt__': minion_mods(opts, utils=utils),
+ "__sdb__": functions,
+ "__opts__": opts,
+ "__utils__": utils,
+ "__salt__": minion_mods(opts, utils=utils),
},
whitelist=whitelist,
)
def pkgdb(opts):
- '''
+ """
Return modules for SPM's package database
.. versionadded:: 2015.8.0
- '''
+ """
return LazyLoader(
- _module_dirs(
- opts,
- 'pkgdb',
- base_path=os.path.join(SALT_BASE_PATH, 'spm')
- ),
+ _module_dirs(opts, "pkgdb", base_path=os.path.join(SALT_BASE_PATH, "spm")),
opts,
- tag='pkgdb'
+ tag="pkgdb",
)
def pkgfiles(opts):
- '''
+ """
Return modules for SPM's file handling
.. versionadded:: 2015.8.0
- '''
+ """
return LazyLoader(
- _module_dirs(
- opts,
- 'pkgfiles',
- base_path=os.path.join(SALT_BASE_PATH, 'spm')
- ),
+ _module_dirs(opts, "pkgfiles", base_path=os.path.join(SALT_BASE_PATH, "spm")),
opts,
- tag='pkgfiles'
+ tag="pkgfiles",
)
def clouds(opts):
- '''
+ """
Return the cloud functions
- '''
+ """
# Let's bring __active_provider_name__, defaulting to None, to all cloud
# drivers. This will get temporarily updated/overridden with a context
# manager when needed.
functions = LazyLoader(
- _module_dirs(opts,
- 'clouds',
- 'cloud',
- base_path=os.path.join(SALT_BASE_PATH, 'cloud'),
- int_type='clouds'),
+ _module_dirs(
+ opts,
+ "clouds",
+ "cloud",
+ base_path=os.path.join(SALT_BASE_PATH, "cloud"),
+ int_type="clouds",
+ ),
opts,
- tag='clouds',
- pack={'__utils__': salt.loader.utils(opts),
- '__active_provider_name__': None},
+ tag="clouds",
+ pack={"__utils__": salt.loader.utils(opts), "__active_provider_name__": None},
)
for funcname in LIBCLOUD_FUNCS_NOT_SUPPORTED:
log.trace(
- '\'%s\' has been marked as not supported. Removing from the '
- 'list of supported cloud functions', funcname
+ "'%s' has been marked as not supported. Removing from the "
+ "list of supported cloud functions",
+ funcname,
)
functions.pop(funcname, None)
return functions
def netapi(opts):
- '''
+ """
Return the network api functions
- '''
- return LazyLoader(
- _module_dirs(opts, 'netapi'),
- opts,
- tag='netapi',
- )
+ """
+ return LazyLoader(_module_dirs(opts, "netapi"), opts, tag="netapi",)
def executors(opts, functions=None, context=None, proxy=None):
- '''
+ """
Returns the executor modules
- '''
+ """
executors = LazyLoader(
- _module_dirs(opts, 'executors', 'executor'),
+ _module_dirs(opts, "executors", "executor"),
opts,
- tag='executor',
- pack={'__salt__': functions, '__context__': context or {}, '__proxy__': proxy or {}},
+ tag="executor",
+ pack={
+ "__salt__": functions,
+ "__context__": context or {},
+ "__proxy__": proxy or {},
+ },
)
- executors.pack['__executors__'] = executors
+ executors.pack["__executors__"] = executors
return executors
def cache(opts, serial):
- '''
+ """
Returns the returner modules
- '''
+ """
return LazyLoader(
- _module_dirs(opts, 'cache', 'cache'),
+ _module_dirs(opts, "cache", "cache"),
opts,
- tag='cache',
- pack={'__opts__': opts, '__context__': {'serial': serial}},
+ tag="cache",
+ pack={"__opts__": opts, "__context__": {"serial": serial}},
)
@@ -1087,7 +1076,7 @@ def _generate_module(name):
if name in sys.modules:
return
- code = "'''Salt loaded {0} parent module'''".format(name.split('.')[-1])
+ code = "'''Salt loaded {0} parent module'''".format(name.split(".")[-1])
# ModuleType can't accept a unicode type on PY2
module = types.ModuleType(str(name)) # future lint: disable=blacklisted-function
exec(code, module.__dict__)
@@ -1096,17 +1085,18 @@ def _generate_module(name):
def _mod_type(module_path):
if module_path.startswith(SALT_BASE_PATH):
- return 'int'
- return 'ext'
+ return "int"
+ return "ext"
# TODO: move somewhere else?
class FilterDictWrapper(MutableMapping):
- '''
+ """
Create a dict which wraps another dict with a specific key suffix on get
This is to replace "filter_load"
- '''
+ """
+
def __init__(self, d, suffix):
self._dict = d
self.suffix = suffix
@@ -1126,11 +1116,11 @@ class FilterDictWrapper(MutableMapping):
def __iter__(self):
for key in self._dict:
if key.endswith(self.suffix):
- yield key.replace(self.suffix, '')
+ yield key.replace(self.suffix, "")
class LazyLoader(salt.utils.lazy.LazyDict):
- '''
+ """
A pseduo-dictionary which has a set of keys which are the
name of the module and function, delimited by a dot. When
the value of the key is accessed, the function is then loaded
@@ -1154,33 +1144,34 @@ class LazyLoader(salt.utils.lazy.LazyDict):
# TODO:
- move modules_max_memory into here
- singletons (per tag)
- '''
+ """
mod_dict_class = salt.utils.odict.OrderedDict
- def __init__(self,
- module_dirs,
- opts=None,
- tag='module',
- loaded_base_name=None,
- mod_type_check=None,
- pack=None,
- whitelist=None,
- virtual_enable=True,
- static_modules=None,
- proxy=None,
- virtual_funcs=None,
- ): # pylint: disable=W0231
- '''
+ def __init__(
+ self,
+ module_dirs,
+ opts=None,
+ tag="module",
+ loaded_base_name=None,
+ mod_type_check=None,
+ pack=None,
+ whitelist=None,
+ virtual_enable=True,
+ static_modules=None,
+ proxy=None,
+ virtual_funcs=None,
+ ): # pylint: disable=W0231
+ """
In pack, if any of the values are None they will be replaced with an
empty context-specific dict
- '''
+ """
self.inject_globals = {}
self.pack = {} if pack is None else pack
if opts is None:
opts = {}
- threadsafety = not opts.get('multiprocessing')
+ threadsafety = not opts.get("multiprocessing")
self.context_dict = salt.utils.context.ContextDict(threadsafe=threadsafety)
self.opts = self.__prep_mod_opts(opts)
@@ -1189,13 +1180,15 @@ class LazyLoader(salt.utils.lazy.LazyDict):
self.loaded_base_name = loaded_base_name or LOADED_BASE_NAME
self.mod_type_check = mod_type_check or _mod_type
- if '__context__' not in self.pack:
- self.pack['__context__'] = None
+ if "__context__" not in self.pack:
+ self.pack["__context__"] = None
for k, v in six.iteritems(self.pack):
if v is None: # if the value of a pack is None, lets make an empty dict
self.context_dict.setdefault(k, {})
- self.pack[k] = salt.utils.context.NamespacedDictWrapper(self.context_dict, k)
+ self.pack[k] = salt.utils.context.NamespacedDictWrapper(
+ self.context_dict, k
+ )
self.whitelist = whitelist
self.virtual_enable = virtual_enable
@@ -1213,11 +1206,8 @@ class LazyLoader(salt.utils.lazy.LazyDict):
self.disabled = set(
self.opts.get(
- 'disable_{0}{1}'.format(
- self.tag,
- '' if self.tag[-1] == 's' else 's'
- ),
- []
+ "disable_{0}{1}".format(self.tag, "" if self.tag[-1] == "s" else "s"),
+ [],
)
)
@@ -1225,7 +1215,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
self.suffix_map = {}
# A list to determine precedence of extensions
# Prefer packages (directories) over modules (single files)!
- self.suffix_order = ['']
+ self.suffix_order = [""]
for (suffix, mode, kind) in SUFFIXES:
self.suffix_map[suffix] = (suffix, mode, kind)
self.suffix_order.append(suffix)
@@ -1235,16 +1225,16 @@ class LazyLoader(salt.utils.lazy.LazyDict):
super(LazyLoader, self).__init__() # late init the lazy loader
# create all of the import namespaces
- _generate_module('{0}.int'.format(self.loaded_base_name))
- _generate_module('{0}.int.{1}'.format(self.loaded_base_name, tag))
- _generate_module('{0}.ext'.format(self.loaded_base_name))
- _generate_module('{0}.ext.{1}'.format(self.loaded_base_name, tag))
+ _generate_module("{0}.int".format(self.loaded_base_name))
+ _generate_module("{0}.int.{1}".format(self.loaded_base_name, tag))
+ _generate_module("{0}.ext".format(self.loaded_base_name))
+ _generate_module("{0}.ext.{1}".format(self.loaded_base_name, tag))
def __getitem__(self, item):
- '''
+ """
Override the __getitem__ in order to decorate the returned function if we need
to last-minute inject globals
- '''
+ """
func = super(LazyLoader, self).__getitem__(item)
if self.inject_globals:
return global_injector_decorator(self.inject_globals)(func)
@@ -1252,11 +1242,11 @@ class LazyLoader(salt.utils.lazy.LazyDict):
return func
def __getattr__(self, mod_name):
- '''
+ """
Allow for "direct" attribute access-- this allows jinja templates to
access things like `salt.test.ping()`
- '''
- if mod_name in ('__getstate__', '__setstate__'):
+ """
+ if mod_name in ("__getstate__", "__setstate__"):
return object.__getattribute__(self, mod_name)
# if we have an attribute named that, lets return it.
@@ -1279,48 +1269,52 @@ class LazyLoader(salt.utils.lazy.LazyDict):
raise AttributeError(mod_name)
def missing_fun_string(self, function_name):
- '''
+ """
Return the error string for a missing function.
This can range from "not available' to "__virtual__" returned False
- '''
- mod_name = function_name.split('.')[0]
+ """
+ mod_name = function_name.split(".")[0]
if mod_name in self.loaded_modules:
- return '\'{0}\' is not available.'.format(function_name)
+ return "'{0}' is not available.".format(function_name)
else:
try:
reason = self.missing_modules[mod_name]
except KeyError:
- return '\'{0}\' is not available.'.format(function_name)
+ return "'{0}' is not available.".format(function_name)
else:
if reason is not None:
- return '\'{0}\' __virtual__ returned False: {1}'.format(mod_name, reason)
+ return "'{0}' __virtual__ returned False: {1}".format(
+ mod_name, reason
+ )
else:
- return '\'{0}\' __virtual__ returned False'.format(mod_name)
+ return "'{0}' __virtual__ returned False".format(mod_name)
def _refresh_file_mapping(self):
- '''
+ """
refresh the mapping of the FS on disk
- '''
+ """
# map of suffix to description for imp
- if self.opts.get('cython_enable', True) is True:
+ if self.opts.get("cython_enable", True) is True:
try:
global pyximport
- pyximport = __import__('pyximport') # pylint: disable=import-error
+ pyximport = __import__("pyximport") # pylint: disable=import-error
pyximport.install()
# add to suffix_map so file_mapping will pick it up
- self.suffix_map['.pyx'] = tuple()
+ self.suffix_map[".pyx"] = tuple()
except ImportError:
- log.info('Cython is enabled in the options but not present '
- 'in the system path. Skipping Cython modules.')
+ log.info(
+ "Cython is enabled in the options but not present "
+ "in the system path. Skipping Cython modules."
+ )
# Allow for zipimport of modules
- if self.opts.get('enable_zip_modules', True) is True:
- self.suffix_map['.zip'] = tuple()
+ if self.opts.get("enable_zip_modules", True) is True:
+ self.suffix_map[".zip"] = tuple()
# allow for module dirs
if USE_IMPORTLIB:
- self.suffix_map[''] = ('', '', MODULE_KIND_PKG_DIRECTORY)
+ self.suffix_map[""] = ("", "", MODULE_KIND_PKG_DIRECTORY)
else:
- self.suffix_map[''] = ('', '', imp.PKG_DIRECTORY)
+ self.suffix_map[""] = ("", "", imp.PKG_DIRECTORY)
# create mapping of filename (without suffix) to (path, suffix)
# The files are added in order of priority, so order *must* be retained.
@@ -1329,29 +1323,29 @@ class LazyLoader(salt.utils.lazy.LazyDict):
opt_match = []
def _replace_pre_ext(obj):
- '''
+ """
Hack so we can get the optimization level that we replaced (if
any) out of the re.sub call below. We use a list here because
it is a persistent data structure that we will be able to
access after re.sub is called.
- '''
+ """
opt_match.append(obj)
- return ''
+ return ""
for mod_dir in self.module_dirs:
try:
# Make sure we have a sorted listdir in order to have
# expectable override results
- files = sorted(
- x for x in os.listdir(mod_dir) if x != '__pycache__'
- )
+ files = sorted(x for x in os.listdir(mod_dir) if x != "__pycache__")
except OSError:
continue # Next mod_dir
if six.PY3:
try:
pycache_files = [
- os.path.join('__pycache__', x) for x in
- sorted(os.listdir(os.path.join(mod_dir, '__pycache__')))
+ os.path.join("__pycache__", x)
+ for x in sorted(
+ os.listdir(os.path.join(mod_dir, "__pycache__"))
+ )
]
except OSError:
pass
@@ -1361,7 +1355,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
for filename in files:
try:
dirname, basename = os.path.split(filename)
- if basename.startswith('_'):
+ if basename.startswith("_"):
# skip private modules
# log messages omitted for obviousness
continue # Next filename
@@ -1369,21 +1363,22 @@ class LazyLoader(salt.utils.lazy.LazyDict):
if six.PY3:
f_noext = PY3_PRE_EXT.sub(_replace_pre_ext, f_noext)
try:
- opt_level = int(
- opt_match.pop().group(1).rsplit('-', 1)[-1]
- )
+ opt_level = int(opt_match.pop().group(1).rsplit("-", 1)[-1])
except (AttributeError, IndexError, ValueError):
# No regex match or no optimization level matched
opt_level = 0
try:
- opt_index = self.opts['optimization_order'].index(opt_level)
+ opt_index = self.opts["optimization_order"].index(opt_level)
except KeyError:
log.trace(
- 'Disallowed optimization level %d for module '
- 'name \'%s\', skipping. Add %d to the '
- '\'optimization_order\' config option if you '
- 'do not want to ignore this optimization '
- 'level.', opt_level, f_noext, opt_level
+ "Disallowed optimization level %d for module "
+ "name '%s', skipping. Add %d to the "
+ "'optimization_order' config option if you "
+ "do not want to ignore this optimization "
+ "level.",
+ opt_level,
+ f_noext,
+ opt_level,
)
continue
else:
@@ -1395,19 +1390,18 @@ class LazyLoader(salt.utils.lazy.LazyDict):
continue # Next filename
if f_noext in self.disabled:
log.trace(
- 'Skipping %s, it is disabled by configuration',
- filename
+ "Skipping %s, it is disabled by configuration", filename
)
continue # Next filename
fpath = os.path.join(mod_dir, filename)
# if its a directory, lets allow us to load that
- if ext == '':
+ if ext == "":
# is there something __init__?
subfiles = os.listdir(fpath)
for suffix in self.suffix_order:
- if '' == suffix:
+ if "" == suffix:
continue # Next suffix (__init__ must have a suffix)
- init_file = '__init__{0}'.format(suffix)
+ init_file = "__init__{0}".format(suffix)
if init_file in subfiles:
break
else:
@@ -1419,25 +1413,27 @@ class LazyLoader(salt.utils.lazy.LazyDict):
except KeyError:
pass
else:
- if '' in (curr_ext, ext) and curr_ext != ext:
+ if "" in (curr_ext, ext) and curr_ext != ext:
log.error(
- 'Module/package collision: \'%s\' and \'%s\'',
+ "Module/package collision: '%s' and '%s'",
fpath,
- self.file_mapping[f_noext][0]
+ self.file_mapping[f_noext][0],
)
- if six.PY3 and ext == '.pyc' and curr_ext == '.pyc':
+ if six.PY3 and ext == ".pyc" and curr_ext == ".pyc":
# Check the optimization level
if opt_index >= curr_opt_index:
# Module name match, but a higher-priority
# optimization level was already matched, skipping.
continue
- elif not curr_ext or self.suffix_order.index(ext) >= self.suffix_order.index(curr_ext):
+ elif not curr_ext or self.suffix_order.index(
+ ext
+ ) >= self.suffix_order.index(curr_ext):
# Match found but a higher-priorty match already
# exists, so skip this.
continue
- if six.PY3 and not dirname and ext == '.pyc':
+ if six.PY3 and not dirname and ext == ".pyc":
# On Python 3, we should only load .pyc files from the
# __pycache__ subdirectory (i.e. when dirname is not an
# empty string).
@@ -1449,13 +1445,13 @@ class LazyLoader(salt.utils.lazy.LazyDict):
except OSError:
continue
for smod in self.static_modules:
- f_noext = smod.split('.')[-1]
- self.file_mapping[f_noext] = (smod, '.o', 0)
+ f_noext = smod.split(".")[-1]
+ self.file_mapping[f_noext] = (smod, ".o", 0)
def clear(self):
- '''
+ """
Clear the dict
- '''
+ """
with self._lock:
super(LazyLoader, self).clear() # clear the lazy loader
self.loaded_files = set()
@@ -1463,33 +1459,37 @@ class LazyLoader(salt.utils.lazy.LazyDict):
self.loaded_modules = {}
# if we have been loaded before, lets clear the file mapping since
# we obviously want a re-do
- if hasattr(self, 'opts'):
+ if hasattr(self, "opts"):
self._refresh_file_mapping()
self.initial_load = False
def __prep_mod_opts(self, opts):
- '''
+ """
Strip out of the opts any logger instance
- '''
- if '__grains__' not in self.pack:
- self.context_dict['grains'] = opts.get('grains', {})
- self.pack['__grains__'] = salt.utils.context.NamespacedDictWrapper(self.context_dict, 'grains')
+ """
+ if "__grains__" not in self.pack:
+ self.context_dict["grains"] = opts.get("grains", {})
+ self.pack["__grains__"] = salt.utils.context.NamespacedDictWrapper(
+ self.context_dict, "grains"
+ )
- if '__pillar__' not in self.pack:
- self.context_dict['pillar'] = opts.get('pillar', {})
- self.pack['__pillar__'] = salt.utils.context.NamespacedDictWrapper(self.context_dict, 'pillar')
+ if "__pillar__" not in self.pack:
+ self.context_dict["pillar"] = opts.get("pillar", {})
+ self.pack["__pillar__"] = salt.utils.context.NamespacedDictWrapper(
+ self.context_dict, "pillar"
+ )
mod_opts = {}
for key, val in list(opts.items()):
- if key == 'logger':
+ if key == "logger":
continue
mod_opts[key] = val
return mod_opts
def _iter_files(self, mod_name):
- '''
+ """
Iterate over all file_mapping files in order of closeness to mod_name
- '''
+ """
# do we have an exact match?
if mod_name in self.file_mapping:
yield mod_name
@@ -1506,14 +1506,15 @@ class LazyLoader(salt.utils.lazy.LazyDict):
def _reload_submodules(self, mod):
submodules = (
- getattr(mod, sname) for sname in dir(mod) if
- isinstance(getattr(mod, sname), mod.__class__)
+ getattr(mod, sname)
+ for sname in dir(mod)
+ if isinstance(getattr(mod, sname), mod.__class__)
)
# reload only custom "sub"modules
for submodule in submodules:
# it is a submodule if the name is in a namespace under mod
- if submodule.__name__.startswith(mod.__name__ + '.'):
+ if submodule.__name__.startswith(mod.__name__ + "."):
reload_module(submodule)
self._reload_submodules(submodule)
@@ -1524,46 +1525,58 @@ class LazyLoader(salt.utils.lazy.LazyDict):
fpath_dirname = os.path.dirname(fpath)
try:
sys.path.append(fpath_dirname)
- if suffix == '.pyx':
+ if suffix == ".pyx":
mod = pyximport.load_module(name, fpath, tempfile.gettempdir())
- elif suffix == '.o':
+ elif suffix == ".o":
top_mod = __import__(fpath, globals(), locals(), [])
- comps = fpath.split('.')
+ comps = fpath.split(".")
if len(comps) < 2:
mod = top_mod
else:
mod = top_mod
for subname in comps[1:]:
mod = getattr(mod, subname)
- elif suffix == '.zip':
+ elif suffix == ".zip":
mod = zipimporter(fpath).load_module(name)
else:
desc = self.suffix_map[suffix]
# if it is a directory, we don't open a file
try:
- mod_namespace = '.'.join((
- self.loaded_base_name,
- self.mod_type_check(fpath),
- self.tag,
- name))
+ mod_namespace = ".".join(
+ (
+ self.loaded_base_name,
+ self.mod_type_check(fpath),
+ self.tag,
+ name,
+ )
+ )
except TypeError:
- mod_namespace = '{0}.{1}.{2}.{3}'.format(
+ mod_namespace = "{0}.{1}.{2}.{3}".format(
self.loaded_base_name,
self.mod_type_check(fpath),
self.tag,
- name)
- if suffix == '':
+ name,
+ )
+ if suffix == "":
if USE_IMPORTLIB:
# pylint: disable=no-member
# Package directory, look for __init__
loader_details = [
- (importlib.machinery.SourceFileLoader, importlib.machinery.SOURCE_SUFFIXES),
- (importlib.machinery.SourcelessFileLoader, importlib.machinery.BYTECODE_SUFFIXES),
- (importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES),
+ (
+ importlib.machinery.SourceFileLoader,
+ importlib.machinery.SOURCE_SUFFIXES,
+ ),
+ (
+ importlib.machinery.SourcelessFileLoader,
+ importlib.machinery.BYTECODE_SUFFIXES,
+ ),
+ (
+ importlib.machinery.ExtensionFileLoader,
+ importlib.machinery.EXTENSION_SUFFIXES,
+ ),
]
file_finder = importlib.machinery.FileFinder(
- fpath_dirname,
- *loader_details
+ fpath_dirname, *loader_details
)
spec = file_finder.find_spec(mod_namespace)
if spec is None:
@@ -1598,8 +1611,8 @@ class LazyLoader(salt.utils.lazy.LazyDict):
# with the magic dunders we pack into the loaded
# modules, most notably with salt-ssh's __opts__.
mod = spec.loader.load_module()
- #mod = importlib.util.module_from_spec(spec)
- #spec.loader.exec_module(mod)
+ # mod = importlib.util.module_from_spec(spec)
+ # spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[mod_namespace] = mod
else:
@@ -1608,20 +1621,22 @@ class LazyLoader(salt.utils.lazy.LazyDict):
except IOError:
raise
except ImportError as exc:
- if 'magic number' in six.text_type(exc):
- error_msg = 'Failed to import {0} {1}. Bad magic number. If migrating from Python2 to Python3, remove all .pyc files and try again.'.format(self.tag, name)
+ if "magic number" in six.text_type(exc):
+ error_msg = "Failed to import {0} {1}. Bad magic number. If migrating from Python2 to Python3, remove all .pyc files and try again.".format(
+ self.tag, name
+ )
log.warning(error_msg)
self.missing_modules[name] = error_msg
- log.debug(
- 'Failed to import %s %s:\n',
- self.tag, name, exc_info=True
- )
+ log.debug("Failed to import %s %s:\n", self.tag, name, exc_info=True)
self.missing_modules[name] = exc
return False
except Exception as error: # pylint: disable=broad-except
log.error(
- 'Failed to import %s %s, this is due most likely to a '
- 'syntax error:\n', self.tag, name, exc_info=True
+ "Failed to import %s %s, this is due most likely to a "
+ "syntax error:\n",
+ self.tag,
+ name,
+ exc_info=True,
)
self.missing_modules[name] = error
return False
@@ -1631,22 +1646,24 @@ class LazyLoader(salt.utils.lazy.LazyDict):
except Exception: # pylint: disable=broad-except
pass
else:
- tgt_fn = os.path.join('salt', 'utils', 'process.py')
- if fn_.endswith(tgt_fn) and '_handle_signals' in caller:
+ tgt_fn = os.path.join("salt", "utils", "process.py")
+ if fn_.endswith(tgt_fn) and "_handle_signals" in caller:
# Race conditon, SIGTERM or SIGINT received while loader
# was in process of loading a module. Call sys.exit to
# ensure that the process is killed.
sys.exit(salt.defaults.exitcodes.EX_OK)
log.error(
- 'Failed to import %s %s as the module called exit()\n',
- self.tag, name, exc_info=True
+ "Failed to import %s %s as the module called exit()\n",
+ self.tag,
+ name,
+ exc_info=True,
)
self.missing_modules[name] = error
return False
finally:
sys.path.remove(fpath_dirname)
- if hasattr(mod, '__opts__'):
+ if hasattr(mod, "__opts__"):
mod.__opts__.update(self.opts)
else:
mod.__opts__ = self.opts
@@ -1655,20 +1672,23 @@ class LazyLoader(salt.utils.lazy.LazyDict):
for p_name, p_value in six.iteritems(self.pack):
setattr(mod, p_name, p_value)
- module_name = mod.__name__.rsplit('.', 1)[-1]
+ module_name = mod.__name__.rsplit(".", 1)[-1]
# Call a module's initialization method if it exists
- module_init = getattr(mod, '__init__', None)
+ module_init = getattr(mod, "__init__", None)
if inspect.isfunction(module_init):
try:
module_init(self.opts)
except TypeError as e:
log.error(e)
except Exception: # pylint: disable=broad-except
- err_string = '__init__ failed'
+ err_string = "__init__ failed"
log.debug(
- 'Error loading %s.%s: %s',
- self.tag, module_name, err_string, exc_info=True
+ "Error loading %s.%s: %s",
+ self.tag,
+ module_name,
+ err_string,
+ exc_info=True,
)
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
@@ -1677,14 +1697,17 @@ class LazyLoader(salt.utils.lazy.LazyDict):
# if virtual modules are enabled, we need to look for the
# __virtual__() function inside that module and run it.
if self.virtual_enable:
- virtual_funcs_to_process = ['__virtual__'] + self.virtual_funcs
+ virtual_funcs_to_process = ["__virtual__"] + self.virtual_funcs
for virtual_func in virtual_funcs_to_process:
- virtual_ret, module_name, virtual_err, virtual_aliases = \
- self._process_virtual(mod, module_name, virtual_func)
+ (
+ virtual_ret,
+ module_name,
+ virtual_err,
+ virtual_aliases,
+ ) = self._process_virtual(mod, module_name, virtual_func)
if virtual_err is not None:
log.trace(
- 'Error loading %s.%s: %s',
- self.tag, module_name, virtual_err
+ "Error loading %s.%s: %s", self.tag, module_name, virtual_err
)
# if _process_virtual returned a non-True value then we are
@@ -1702,32 +1725,33 @@ class LazyLoader(salt.utils.lazy.LazyDict):
# containing the names of the proxy types that the module supports.
#
# Render modules and state modules are OK though
- if 'proxy' in self.opts:
- if self.tag in ['grains', 'proxy']:
- if not hasattr(mod, '__proxyenabled__') or \
- (self.opts['proxy']['proxytype'] not in mod.__proxyenabled__ and
- '*' not in mod.__proxyenabled__):
- err_string = 'not a proxy_minion enabled module'
+ if "proxy" in self.opts:
+ if self.tag in ["grains", "proxy"]:
+ if not hasattr(mod, "__proxyenabled__") or (
+ self.opts["proxy"]["proxytype"] not in mod.__proxyenabled__
+ and "*" not in mod.__proxyenabled__
+ ):
+ err_string = "not a proxy_minion enabled module"
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
- if getattr(mod, '__load__', False) is not False:
+ if getattr(mod, "__load__", False) is not False:
log.info(
- 'The functions from module \'%s\' are being loaded from the '
- 'provided __load__ attribute', module_name
+ "The functions from module '%s' are being loaded from the "
+ "provided __load__ attribute",
+ module_name,
)
# If we had another module by the same virtual name, we should put any
# new functions under the existing dictionary.
mod_names = [module_name] + list(virtual_aliases)
- mod_dict = dict((
- (x, self.loaded_modules.get(x, self.mod_dict_class()))
- for x in mod_names
- ))
+ mod_dict = dict(
+ ((x, self.loaded_modules.get(x, self.mod_dict_class())) for x in mod_names)
+ )
- for attr in getattr(mod, '__load__', dir(mod)):
- if attr.startswith('_'):
+ for attr in getattr(mod, "__load__", dir(mod)):
+ if attr.startswith("_"):
# private functions are skipped
continue
func = getattr(mod, attr)
@@ -1741,12 +1765,12 @@ class LazyLoader(salt.utils.lazy.LazyDict):
#
# It default's of course to the found callable attribute name
# if no alias is defined.
- funcname = getattr(mod, '__func_alias__', {}).get(attr, attr)
+ funcname = getattr(mod, "__func_alias__", {}).get(attr, attr)
for tgt_mod in mod_names:
try:
- full_funcname = '.'.join((tgt_mod, funcname))
+ full_funcname = ".".join((tgt_mod, funcname))
except TypeError:
- full_funcname = '{0}.{1}'.format(tgt_mod, funcname)
+ full_funcname = "{0}.{1}".format(tgt_mod, funcname)
# Save many references for lookups
# Careful not to overwrite existing (higher priority) functions
if full_funcname not in self._dict:
@@ -1761,8 +1785,8 @@ class LazyLoader(salt.utils.lazy.LazyDict):
Depends.enforce_dependencies(self._dict, self.tag, name)
except RuntimeError as exc:
log.info(
- 'Depends.enforce_dependencies() failed for the following '
- 'reason: %s', exc
+ "Depends.enforce_dependencies() failed for the following " "reason: %s",
+ exc,
)
for tgt_mod in mod_names:
@@ -1770,15 +1794,15 @@ class LazyLoader(salt.utils.lazy.LazyDict):
return True
def _load(self, key):
- '''
+ """
Load a single item if you have it
- '''
+ """
# if the key doesn't have a '.' then it isn't valid for this mod dict
if not isinstance(key, six.string_types):
- raise KeyError('The key must be a string.')
- if '.' not in key:
- raise KeyError('The key \'{0}\' should contain a \'.\''.format(key))
- mod_name, _ = key.split('.', 1)
+ raise KeyError("The key must be a string.")
+ if "." not in key:
+ raise KeyError("The key '{0}' should contain a '.'".format(key))
+ mod_name, _ = key.split(".", 1)
with self._lock:
# It is possible that the key is in the dictionary after
# acquiring the lock due to another thread loading it.
@@ -1787,8 +1811,11 @@ class LazyLoader(salt.utils.lazy.LazyDict):
# if the modulename isn't in the whitelist, don't bother
if self.whitelist and mod_name not in self.whitelist:
log.error(
- 'Failed to load function %s because its module (%s) is '
- 'not in the whitelist: %s', key, mod_name, self.whitelist
+ "Failed to load function %s because its module (%s) is "
+ "not in the whitelist: %s",
+ key,
+ mod_name,
+ self.whitelist,
)
raise KeyError(key)
@@ -1823,9 +1850,9 @@ class LazyLoader(salt.utils.lazy.LazyDict):
return ret
def _load_all(self):
- '''
+ """
Load all of them
- '''
+ """
with self._lock:
for name in self.file_mapping:
if name in self.loaded_files or name in self.missing_modules:
@@ -1840,16 +1867,16 @@ class LazyLoader(salt.utils.lazy.LazyDict):
self._load_all()
def _apply_outputter(self, func, mod):
- '''
+ """
Apply the __outputter__ variable to the functions
- '''
- if hasattr(mod, '__outputter__'):
+ """
+ if hasattr(mod, "__outputter__"):
outp = mod.__outputter__
if func.__name__ in outp:
func.__outputter__ = outp[func.__name__]
- def _process_virtual(self, mod, module_name, virtual_func='__virtual__'):
- '''
+ def _process_virtual(self, mod, module_name, virtual_func="__virtual__"):
+ """
Given a loaded module and its default name determine its virtual name
This function returns a tuple. The first value will be either True or
@@ -1861,7 +1888,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
The default name can be calculated as follows::
module_name = mod.__name__.rsplit('.', 1)[-1]
- '''
+ """
# The __virtual__ function will return either a True or False value.
# If it returns a True value it can also set a module level attribute
@@ -1874,30 +1901,33 @@ class LazyLoader(salt.utils.lazy.LazyDict):
# namespace collisions. And finally it allows modules to return False
# if they are not intended to run on the given platform or are missing
# dependencies.
- virtual_aliases = getattr(mod, '__virtual_aliases__', tuple())
+ virtual_aliases = getattr(mod, "__virtual_aliases__", tuple())
try:
error_reason = None
- if hasattr(mod, '__virtual__') and inspect.isfunction(mod.__virtual__):
+ if hasattr(mod, "__virtual__") and inspect.isfunction(mod.__virtual__):
try:
start = time.time()
virtual = getattr(mod, virtual_func)()
if isinstance(virtual, tuple):
error_reason = virtual[1]
virtual = virtual[0]
- if self.opts.get('virtual_timer', False):
+ if self.opts.get("virtual_timer", False):
end = time.time() - start
- msg = 'Virtual function took {0} seconds for {1}'.format(
- end, module_name)
+ msg = "Virtual function took {0} seconds for {1}".format(
+ end, module_name
+ )
log.warning(msg)
except Exception as exc: # pylint: disable=broad-except
error_reason = (
- 'Exception raised when processing __virtual__ function'
- ' for {0}. Module will not be loaded: {1}'.format(
- mod.__name__, exc))
+ "Exception raised when processing __virtual__ function"
+ " for {0}. Module will not be loaded: {1}".format(
+ mod.__name__, exc
+ )
+ )
log.error(error_reason, exc_info_on_loglevel=logging.DEBUG)
virtual = None
# Get the module's virtual name
- virtualname = getattr(mod, '__virtualname__', virtual)
+ virtualname = getattr(mod, "__virtualname__", virtual)
if not virtual:
# if __virtual__() evaluates to False then the module
# wasn't meant for this platform or it's not supposed to
@@ -1907,10 +1937,12 @@ class LazyLoader(salt.utils.lazy.LazyDict):
# improperly loaded
if virtual is None:
log.warning(
- '%s.__virtual__() is wrongly returning `None`. '
- 'It should either return `True`, `False` or a new '
- 'name. If you\'re the developer of the module '
- '\'%s\', please fix this.', mod.__name__, module_name
+ "%s.__virtual__() is wrongly returning `None`. "
+ "It should either return `True`, `False` or a new "
+ "name. If you're the developer of the module "
+ "'%s', please fix this.",
+ mod.__name__,
+ module_name,
)
return (False, module_name, error_reason, virtual_aliases)
@@ -1921,18 +1953,20 @@ class LazyLoader(salt.utils.lazy.LazyDict):
if virtual is not True and module_name != virtual:
# The module is renaming itself. Updating the module name
# with the new name
- log.trace('Loaded %s as virtual %s', module_name, virtual)
+ log.trace("Loaded %s as virtual %s", module_name, virtual)
if virtualname != virtual:
# The __virtualname__ attribute does not match what's
# being returned by the __virtual__() function. This
# should be considered an error.
log.error(
- 'The module \'%s\' is showing some bad usage. Its '
- '__virtualname__ attribute is set to \'%s\' yet the '
- '__virtual__() function is returning \'%s\'. These '
- 'values should match!',
- mod.__name__, virtualname, virtual
+ "The module '%s' is showing some bad usage. Its "
+ "__virtualname__ attribute is set to '%s' yet the "
+ "__virtual__() function is returning '%s'. These "
+ "values should match!",
+ mod.__name__,
+ virtualname,
+ virtual,
)
module_name = virtualname
@@ -1948,14 +1982,16 @@ class LazyLoader(salt.utils.lazy.LazyDict):
# in incomplete grains sets, these can be safely ignored
# and logged to debug, still, it includes the traceback to
# help debugging.
- log.debug('KeyError when loading %s', module_name, exc_info=True)
+ log.debug("KeyError when loading %s", module_name, exc_info=True)
except Exception: # pylint: disable=broad-except
# If the module throws an exception during __virtual__()
# then log the information and continue to the next.
log.error(
- 'Failed to read the virtual function for %s: %s',
- self.tag, module_name, exc_info=True
+ "Failed to read the virtual function for %s: %s",
+ self.tag,
+ module_name,
+ exc_info=True,
)
return (False, module_name, error_reason, virtual_aliases)
@@ -1963,17 +1999,20 @@ class LazyLoader(salt.utils.lazy.LazyDict):
def global_injector_decorator(inject_globals):
- '''
+ """
Decorator used by the LazyLoader to inject globals into a function at
execute time.
globals
Dictionary with global variables to inject
- '''
+ """
+
def inner_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with salt.utils.context.func_globals_inject(f, **inject_globals):
return f(*args, **kwargs)
+
return wrapper
+
return inner_decorator
diff --git a/salt/log/__init__.py b/salt/log/__init__.py
index 2fbd2760794..e2514c1d0fb 100644
--- a/salt/log/__init__.py
+++ b/salt/log/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
@@ -8,7 +8,7 @@
This is where Salt's logging gets set up. Currently, the required imports
are made to assure backwards compatibility.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import severals classes/functions from salt.log.setup for backwards
@@ -20,8 +20,8 @@ from salt.log.setup import (
is_logfile_configured,
is_logging_configured,
is_temp_logging_configured,
- setup_temp_logger,
+ set_logger_level,
setup_console_logger,
setup_logfile_logger,
- set_logger_level,
+ setup_temp_logger,
)
diff --git a/salt/log/handlers/__init__.py b/salt/log/handlers/__init__.py
index de262b97215..fe7002bd0b1 100644
--- a/salt/log/handlers/__init__.py
+++ b/salt/log/handlers/__init__.py
@@ -1,32 +1,36 @@
# -*- coding: utf-8 -*-
-'''
+"""
salt.log.handlers
~~~~~~~~~~~~~~~~~
.. versionadded:: 0.17.0
Custom logging handlers to be used in salt.
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
# Import salt libs
-from salt._logging.handlers import TemporaryLoggingHandler
-from salt._logging.handlers import StreamHandler
-from salt._logging.handlers import FileHandler
-from salt._logging.handlers import SysLogHandler
-from salt._logging.handlers import RotatingFileHandler
-from salt._logging.handlers import WatchedFileHandler
-from salt._logging.handlers import QueueHandler
-#from salt.utils.versions import warn_until_date
-#warn_until_date(
+from salt._logging.handlers import (
+ FileHandler,
+ QueueHandler,
+ RotatingFileHandler,
+ StreamHandler,
+ SysLogHandler,
+ TemporaryLoggingHandler,
+ WatchedFileHandler,
+)
+
+# from salt.utils.versions import warn_until_date
+# warn_until_date(
# '20220101',
# 'Please stop using \'{name}\' and instead use \'salt._logging.handlers\'. '
# '\'{name}\' will go away after {{date}}.'.format(
# name=__name__
# )
-#)
+# )
NullHandler = logging.NullHandler
diff --git a/salt/log/handlers/fluent_mod.py b/salt/log/handlers/fluent_mod.py
index 78587f2d62f..3da4463396a 100644
--- a/salt/log/handlers/fluent_mod.py
+++ b/salt/log/handlers/fluent_mod.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Fluent Logging Handler
======================
@@ -71,52 +71,54 @@
.. _fluentd: http://www.fluentd.org
.. _`fluent-logger-python`: https://github.com/fluent/fluent-logger-python
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
+import datetime
import logging
import logging.handlers
-import time
-import datetime
import socket
import threading
+import time
import types
-# Import salt libs
-from salt.log.setup import LOG_LEVELS
-from salt.log.mixins import NewStyleClassMixIn
import salt.utils.msgpack
import salt.utils.network
# Import Third party libs
from salt.ext import six
+from salt.log.mixins import NewStyleClassMixIn
+
+# Import salt libs
+from salt.log.setup import LOG_LEVELS
log = logging.getLogger(__name__)
# Define the module's virtual name
-__virtualname__ = 'fluent'
+__virtualname__ = "fluent"
_global_sender = None
# Python logger's idea of "level" is wildly at variance with
# Graylog's (and, incidentally, the rest of the civilized world).
syslog_levels = {
- 'EMERG': 0,
- 'ALERT': 2,
- 'CRIT': 2,
- 'ERR': 3,
- 'WARNING': 4,
- 'NOTICE': 5,
- 'INFO': 6,
- 'DEBUG': 7
+ "EMERG": 0,
+ "ALERT": 2,
+ "CRIT": 2,
+ "ERR": 3,
+ "WARNING": 4,
+ "NOTICE": 5,
+ "INFO": 6,
+ "DEBUG": 7,
}
def setup(tag, **kwargs):
- host = kwargs.get('host', 'localhost')
- port = kwargs.get('port', 24224)
+ host = kwargs.get("host", "localhost")
+ port = kwargs.get("port", 24224)
global _global_sender
_global_sender = FluentSender(tag, host=host, port=port)
@@ -127,11 +129,11 @@ def get_global_sender():
def __virtual__():
- if not any(['fluent_handler' in __opts__]):
+ if not any(["fluent_handler" in __opts__]):
log.trace(
- 'The required configuration section, \'fluent_handler\', '
- 'was not found the in the configuration. Not loading the fluent '
- 'logging handlers module.'
+ "The required configuration section, 'fluent_handler', "
+ "was not found the in the configuration. Not loading the fluent "
+ "logging handlers module."
)
return False
return __virtualname__
@@ -140,35 +142,41 @@ def __virtual__():
def setup_handlers():
host = port = None
- if 'fluent_handler' in __opts__:
- host = __opts__['fluent_handler'].get('host', None)
- port = __opts__['fluent_handler'].get('port', None)
- payload_type = __opts__['fluent_handler'].get('payload_type', None)
+ if "fluent_handler" in __opts__:
+ host = __opts__["fluent_handler"].get("host", None)
+ port = __opts__["fluent_handler"].get("port", None)
+ payload_type = __opts__["fluent_handler"].get("payload_type", None)
# in general, you want the value of tag to ALSO be a member of tags
- tags = __opts__['fluent_handler'].get('tags', ['salt'])
- tag = tags[0] if tags else 'salt'
- if payload_type == 'graylog':
+ tags = __opts__["fluent_handler"].get("tags", ["salt"])
+ tag = tags[0] if tags else "salt"
+ if payload_type == "graylog":
version = 0
- elif payload_type == 'gelf':
+ elif payload_type == "gelf":
# We only support version 1.1 (the latest) of GELF...
version = 1.1
else:
# Default to logstash for backwards compat
- payload_type = 'logstash'
- version = __opts__['fluent_handler'].get('version', 1)
+ payload_type = "logstash"
+ version = __opts__["fluent_handler"].get("version", 1)
if host is None and port is None:
log.debug(
- 'The required \'fluent_handler\' configuration keys, '
- '\'host\' and/or \'port\', are not properly configured. Not '
- 'enabling the fluent logging handler.'
+ "The required 'fluent_handler' configuration keys, "
+ "'host' and/or 'port', are not properly configured. Not "
+ "enabling the fluent logging handler."
)
else:
- formatter = MessageFormatter(payload_type=payload_type, version=version, tags=tags)
+ formatter = MessageFormatter(
+ payload_type=payload_type, version=version, tags=tags
+ )
fluent_handler = FluentHandler(tag, host=host, port=port)
fluent_handler.setFormatter(formatter)
fluent_handler.setLevel(
- LOG_LEVELS[__opts__['fluent_handler'].get('log_level', __opts__.get('log_level', 'error'))]
+ LOG_LEVELS[
+ __opts__["fluent_handler"].get(
+ "log_level", __opts__.get("log_level", "error")
+ )
+ ]
)
yield fluent_handler
@@ -180,175 +188,247 @@ class MessageFormatter(logging.Formatter, NewStyleClassMixIn):
def __init__(self, payload_type, version, tags, msg_type=None, msg_path=None):
self.payload_type = payload_type
self.version = version
- self.tag = tags[0] if tags else 'salt' # 'salt' for backwards compat
+ self.tag = tags[0] if tags else "salt" # 'salt' for backwards compat
self.tags = tags
self.msg_path = msg_path if msg_path else payload_type
self.msg_type = msg_type if msg_type else payload_type
- format_func = 'format_{0}_v{1}'.format(payload_type, version).replace('.', '_')
+ format_func = "format_{0}_v{1}".format(payload_type, version).replace(".", "_")
self.format = getattr(self, format_func)
super(MessageFormatter, self).__init__(fmt=None, datefmt=None)
def formatTime(self, record, datefmt=None):
- if self.payload_type == 'gelf': # GELF uses epoch times
+ if self.payload_type == "gelf": # GELF uses epoch times
return record.created
- return datetime.datetime.utcfromtimestamp(record.created).isoformat()[:-3] + 'Z'
+ return datetime.datetime.utcfromtimestamp(record.created).isoformat()[:-3] + "Z"
def format_graylog_v0(self, record):
- '''
+ """
Graylog 'raw' format is essentially the raw record, minimally munged to provide
the bare minimum that td-agent requires to accept and route the event. This is
well suited to a config where the client td-agents log directly to Graylog.
- '''
+ """
message_dict = {
- 'message': record.getMessage(),
- 'timestamp': self.formatTime(record),
+ "message": record.getMessage(),
+ "timestamp": self.formatTime(record),
# Graylog uses syslog levels, not whatever it is Python does...
- 'level': syslog_levels.get(record.levelname, 'ALERT'),
- 'tag': self.tag
+ "level": syslog_levels.get(record.levelname, "ALERT"),
+ "tag": self.tag,
}
if record.exc_info:
exc_info = self.formatException(record.exc_info)
- message_dict.update({'full_message': exc_info})
+ message_dict.update({"full_message": exc_info})
# Add any extra attributes to the message field
for key, value in six.iteritems(record.__dict__):
- if key in ('args', 'asctime', 'bracketlevel', 'bracketname', 'bracketprocess',
- 'created', 'exc_info', 'exc_text', 'id', 'levelname', 'levelno', 'msecs',
- 'msecs', 'message', 'msg', 'relativeCreated', 'version'):
+ if key in (
+ "args",
+ "asctime",
+ "bracketlevel",
+ "bracketname",
+ "bracketprocess",
+ "created",
+ "exc_info",
+ "exc_text",
+ "id",
+ "levelname",
+ "levelno",
+ "msecs",
+ "msecs",
+ "message",
+ "msg",
+ "relativeCreated",
+ "version",
+ ):
# These are already handled above or explicitly pruned.
continue
- if isinstance(value, (six.string_types, bool, dict, float, int, list, types.NoneType)): # pylint: disable=W1699
+ # pylint: disable=incompatible-py3-code
+ if isinstance(
+ value, (six.string_types, bool, dict, float, int, list, types.NoneType)
+ ):
val = value
+ # pylint: enable=incompatible-py3-code
else:
val = repr(value)
- message_dict.update({'{0}'.format(key): val})
+ message_dict.update({"{0}".format(key): val})
return message_dict
def format_gelf_v1_1(self, record):
- '''
+ """
If your agent is (or can be) configured to forward pre-formed GELF to Graylog
with ZERO fluent processing, this function is for YOU, pal...
- '''
+ """
message_dict = {
- 'version': self.version,
- 'host': salt.utils.network.get_fqhostname(),
- 'short_message': record.getMessage(),
- 'timestamp': self.formatTime(record),
- 'level': syslog_levels.get(record.levelname, 'ALERT'),
- "_tag": self.tag
+ "version": self.version,
+ "host": salt.utils.network.get_fqhostname(),
+ "short_message": record.getMessage(),
+ "timestamp": self.formatTime(record),
+ "level": syslog_levels.get(record.levelname, "ALERT"),
+ "_tag": self.tag,
}
if record.exc_info:
exc_info = self.formatException(record.exc_info)
- message_dict.update({'full_message': exc_info})
+ message_dict.update({"full_message": exc_info})
# Add any extra attributes to the message field
for key, value in six.iteritems(record.__dict__):
- if key in ('args', 'asctime', 'bracketlevel', 'bracketname', 'bracketprocess',
- 'created', 'exc_info', 'exc_text', 'id', 'levelname', 'levelno', 'msecs',
- 'msecs', 'message', 'msg', 'relativeCreated', 'version'):
+ if key in (
+ "args",
+ "asctime",
+ "bracketlevel",
+ "bracketname",
+ "bracketprocess",
+ "created",
+ "exc_info",
+ "exc_text",
+ "id",
+ "levelname",
+ "levelno",
+ "msecs",
+ "msecs",
+ "message",
+ "msg",
+ "relativeCreated",
+ "version",
+ ):
# These are already handled above or explicitly avoided.
continue
- if isinstance(value, (six.string_types, bool, dict, float, int, list, types.NoneType)): # pylint: disable=W1699
+ # pylint: disable=incompatible-py3-code
+ if isinstance(
+ value, (six.string_types, bool, dict, float, int, list, types.NoneType)
+ ):
val = value
+ # pylint: enable=incompatible-py3-code
else:
val = repr(value)
# GELF spec require "non-standard" fields to be prefixed with '_' (underscore).
- message_dict.update({'_{0}'.format(key): val})
+ message_dict.update({"_{0}".format(key): val})
return message_dict
def format_logstash_v0(self, record):
- '''
+ """
Messages are formatted in logstash's expected format.
- '''
+ """
host = salt.utils.network.get_fqhostname()
message_dict = {
- '@timestamp': self.formatTime(record),
- '@fields': {
- 'levelname': record.levelname,
- 'logger': record.name,
- 'lineno': record.lineno,
- 'pathname': record.pathname,
- 'process': record.process,
- 'threadName': record.threadName,
- 'funcName': record.funcName,
- 'processName': record.processName
+ "@timestamp": self.formatTime(record),
+ "@fields": {
+ "levelname": record.levelname,
+ "logger": record.name,
+ "lineno": record.lineno,
+ "pathname": record.pathname,
+ "process": record.process,
+ "threadName": record.threadName,
+ "funcName": record.funcName,
+ "processName": record.processName,
},
- '@message': record.getMessage(),
- '@source': '{0}://{1}/{2}'.format(
- self.msg_type,
- host,
- self.msg_path
- ),
- '@source_host': host,
- '@source_path': self.msg_path,
- '@tags': self.tags,
- '@type': self.msg_type,
+ "@message": record.getMessage(),
+ "@source": "{0}://{1}/{2}".format(self.msg_type, host, self.msg_path),
+ "@source_host": host,
+ "@source_path": self.msg_path,
+ "@tags": self.tags,
+ "@type": self.msg_type,
}
if record.exc_info:
- message_dict['@fields']['exc_info'] = self.formatException(
- record.exc_info
- )
+ message_dict["@fields"]["exc_info"] = self.formatException(record.exc_info)
# Add any extra attributes to the message field
for key, value in six.iteritems(record.__dict__):
- if key in ('args', 'asctime', 'created', 'exc_info', 'exc_text',
- 'filename', 'funcName', 'id', 'levelname', 'levelno',
- 'lineno', 'module', 'msecs', 'msecs', 'message', 'msg',
- 'name', 'pathname', 'process', 'processName',
- 'relativeCreated', 'thread', 'threadName'):
+ if key in (
+ "args",
+ "asctime",
+ "created",
+ "exc_info",
+ "exc_text",
+ "filename",
+ "funcName",
+ "id",
+ "levelname",
+ "levelno",
+ "lineno",
+ "module",
+ "msecs",
+ "msecs",
+ "message",
+ "msg",
+ "name",
+ "pathname",
+ "process",
+ "processName",
+ "relativeCreated",
+ "thread",
+ "threadName",
+ ):
# These are already handled above or not handled at all
continue
if value is None:
- message_dict['@fields'][key] = value
+ message_dict["@fields"][key] = value
continue
if isinstance(value, (six.string_types, bool, dict, float, int, list)):
- message_dict['@fields'][key] = value
+ message_dict["@fields"][key] = value
continue
- message_dict['@fields'][key] = repr(value)
+ message_dict["@fields"][key] = repr(value)
return message_dict
def format_logstash_v1(self, record):
- '''
+ """
Messages are formatted in logstash's expected format.
- '''
+ """
message_dict = {
- '@version': 1,
- '@timestamp': self.formatTime(record),
- 'host': salt.utils.network.get_fqhostname(),
- 'levelname': record.levelname,
- 'logger': record.name,
- 'lineno': record.lineno,
- 'pathname': record.pathname,
- 'process': record.process,
- 'threadName': record.threadName,
- 'funcName': record.funcName,
- 'processName': record.processName,
- 'message': record.getMessage(),
- 'tags': self.tags,
- 'type': self.msg_type
+ "@version": 1,
+ "@timestamp": self.formatTime(record),
+ "host": salt.utils.network.get_fqhostname(),
+ "levelname": record.levelname,
+ "logger": record.name,
+ "lineno": record.lineno,
+ "pathname": record.pathname,
+ "process": record.process,
+ "threadName": record.threadName,
+ "funcName": record.funcName,
+ "processName": record.processName,
+ "message": record.getMessage(),
+ "tags": self.tags,
+ "type": self.msg_type,
}
if record.exc_info:
- message_dict['exc_info'] = self.formatException(
- record.exc_info
- )
+ message_dict["exc_info"] = self.formatException(record.exc_info)
# Add any extra attributes to the message field
for key, value in six.iteritems(record.__dict__):
- if key in ('args', 'asctime', 'created', 'exc_info', 'exc_text',
- 'filename', 'funcName', 'id', 'levelname', 'levelno',
- 'lineno', 'module', 'msecs', 'msecs', 'message', 'msg',
- 'name', 'pathname', 'process', 'processName',
- 'relativeCreated', 'thread', 'threadName'):
+ if key in (
+ "args",
+ "asctime",
+ "created",
+ "exc_info",
+ "exc_text",
+ "filename",
+ "funcName",
+ "id",
+ "levelname",
+ "levelno",
+ "lineno",
+ "module",
+ "msecs",
+ "msecs",
+ "message",
+ "msg",
+ "name",
+ "pathname",
+ "process",
+ "processName",
+ "relativeCreated",
+ "thread",
+ "threadName",
+ ):
# These are already handled above or not handled at all
continue
@@ -365,20 +445,16 @@ class MessageFormatter(logging.Formatter, NewStyleClassMixIn):
class FluentHandler(logging.Handler):
- '''
+ """
Logging Handler for fluent.
- '''
- def __init__(self,
- tag,
- host='localhost',
- port=24224,
- timeout=3.0,
- verbose=False):
+ """
+
+ def __init__(self, tag, host="localhost", port=24224, timeout=3.0, verbose=False):
self.tag = tag
- self.sender = FluentSender(tag,
- host=host, port=port,
- timeout=timeout, verbose=verbose)
+ self.sender = FluentSender(
+ tag, host=host, port=port, timeout=timeout, verbose=verbose
+ )
logging.Handler.__init__(self)
def emit(self, record):
@@ -395,13 +471,15 @@ class FluentHandler(logging.Handler):
class FluentSender(object):
- def __init__(self,
- tag,
- host='localhost',
- port=24224,
- bufmax=1 * 1024 * 1024,
- timeout=3.0,
- verbose=False):
+ def __init__(
+ self,
+ tag,
+ host="localhost",
+ port=24224,
+ bufmax=1 * 1024 * 1024,
+ timeout=3.0,
+ verbose=False,
+ ):
self.tag = tag
self.host = host
@@ -430,7 +508,7 @@ class FluentSender(object):
def _make_packet(self, label, timestamp, data):
if label:
- tag = '.'.join((self.tag, label))
+ tag = ".".join((self.tag, label))
else:
tag = self.tag
packet = (tag, timestamp, data)
@@ -472,10 +550,10 @@ class FluentSender(object):
def _reconnect(self):
if not self.socket:
- if self.host.startswith('unix://'):
+ if self.host.startswith("unix://"):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
- sock.connect(self.host[len('unix://'):])
+ sock.connect(self.host[len("unix://") :])
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
diff --git a/salt/log/handlers/log4mongo_mod.py b/salt/log/handlers/log4mongo_mod.py
index 2a75b89418d..a54bd497148 100644
--- a/salt/log/handlers/log4mongo_mod.py
+++ b/salt/log/handlers/log4mongo_mod.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Log4Mongo Logging Handler
=========================
@@ -33,11 +33,12 @@
This work was inspired by the Salt logging handlers for LogStash and
Sentry and by the log4mongo Python implementation.
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
-import socket
+
import logging
+import socket
# Import salt libs
from salt.ext import six
@@ -47,11 +48,12 @@ from salt.log.setup import LOG_LEVELS
# Import third party libs
try:
from log4mongo.handlers import MongoHandler, MongoFormatter
+
HAS_MONGO = True
except ImportError:
HAS_MONGO = False
-__virtualname__ = 'mongo'
+__virtualname__ = "mongo"
def __virtual__():
@@ -64,38 +66,32 @@ class FormatterWithHost(logging.Formatter, NewStyleClassMixIn):
def format(self, record):
mongoformatter = MongoFormatter()
document = mongoformatter.format(record)
- document['hostname'] = socket.gethostname()
+ document["hostname"] = socket.gethostname()
return document
def setup_handlers():
- handler_id = 'log4mongo_handler'
+ handler_id = "log4mongo_handler"
if handler_id in __opts__:
config_fields = {
- 'host': 'host',
- 'port': 'port',
- 'database_name': 'database_name',
- 'collection': 'collection',
- 'username': 'username',
- 'password': 'password',
- 'write_concern': 'w'
+ "host": "host",
+ "port": "port",
+ "database_name": "database_name",
+ "collection": "collection",
+ "username": "username",
+ "password": "password",
+ "write_concern": "w",
}
config_opts = {}
for config_opt, arg_name in six.iteritems(config_fields):
config_opts[arg_name] = __opts__[handler_id].get(config_opt)
- config_opts['level'] = LOG_LEVELS[
- __opts__[handler_id].get(
- 'log_level',
- __opts__.get('log_level', 'error')
- )
+ config_opts["level"] = LOG_LEVELS[
+ __opts__[handler_id].get("log_level", __opts__.get("log_level", "error"))
]
- handler = MongoHandler(
- formatter=FormatterWithHost(),
- **config_opts
- )
+ handler = MongoHandler(formatter=FormatterWithHost(), **config_opts)
yield handler
else:
yield False
diff --git a/salt/log/handlers/logstash_mod.py b/salt/log/handlers/logstash_mod.py
index 5f003446a23..8fdba7a5f46 100644
--- a/salt/log/handlers/logstash_mod.py
+++ b/salt/log/handlers/logstash_mod.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Logstash Logging Handler
========================
@@ -153,24 +153,27 @@
.. _`ZeroMQ input`: http://logstash.net/docs/latest/inputs/zeromq
.. _`high water mark`: http://api.zeromq.org/3-2:zmq-setsockopt
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
-import os
+
+import datetime
import logging
import logging.handlers
-import datetime
+import os
-# Import salt libs
-from salt.log.setup import LOG_LEVELS
-from salt.log.mixins import NewStyleClassMixIn
import salt.utils.json
import salt.utils.network
import salt.utils.stringutils
# Import Third party libs
from salt.ext import six
+from salt.log.mixins import NewStyleClassMixIn
+
+# Import salt libs
+from salt.log.setup import LOG_LEVELS
+
try:
import zmq
except ImportError:
@@ -179,17 +182,18 @@ except ImportError:
log = logging.getLogger(__name__)
# Define the module's virtual name
-__virtualname__ = 'logstash'
+__virtualname__ = "logstash"
def __virtual__():
- if not any(['logstash_udp_handler' in __opts__,
- 'logstash_zmq_handler' in __opts__]):
+ if not any(
+ ["logstash_udp_handler" in __opts__, "logstash_zmq_handler" in __opts__]
+ ):
log.trace(
- 'None of the required configuration sections, '
- '\'logstash_udp_handler\' and \'logstash_zmq_handler\', '
- 'were found in the configuration. Not loading the Logstash '
- 'logging handlers module.'
+ "None of the required configuration sections, "
+ "'logstash_udp_handler' and 'logstash_zmq_handler', "
+ "were found in the configuration. Not loading the Logstash "
+ "logging handlers module."
)
return False
return __virtualname__
@@ -198,17 +202,17 @@ def __virtual__():
def setup_handlers():
host = port = address = None
- if 'logstash_udp_handler' in __opts__:
- host = __opts__['logstash_udp_handler'].get('host', None)
- port = __opts__['logstash_udp_handler'].get('port', None)
- version = __opts__['logstash_udp_handler'].get('version', 0)
- msg_type = __opts__['logstash_udp_handler'].get('msg_type', 'logstash')
+ if "logstash_udp_handler" in __opts__:
+ host = __opts__["logstash_udp_handler"].get("host", None)
+ port = __opts__["logstash_udp_handler"].get("port", None)
+ version = __opts__["logstash_udp_handler"].get("version", 0)
+ msg_type = __opts__["logstash_udp_handler"].get("msg_type", "logstash")
if host is None and port is None:
log.debug(
- 'The required \'logstash_udp_handler\' configuration keys, '
- '\'host\' and/or \'port\', are not properly configured. Not '
- 'configuring the logstash UDP logging handler.'
+ "The required 'logstash_udp_handler' configuration keys, "
+ "'host' and/or 'port', are not properly configured. Not "
+ "configuring the logstash UDP logging handler."
)
else:
logstash_formatter = LogstashFormatter(msg_type=msg_type, version=version)
@@ -216,30 +220,30 @@ def setup_handlers():
udp_handler.setFormatter(logstash_formatter)
udp_handler.setLevel(
LOG_LEVELS[
- __opts__['logstash_udp_handler'].get(
- 'log_level',
+ __opts__["logstash_udp_handler"].get(
+ "log_level",
# Not set? Get the main salt log_level setting on the
# configuration file
__opts__.get(
- 'log_level',
+ "log_level",
# Also not set?! Default to 'error'
- 'error'
- )
+ "error",
+ ),
)
]
)
yield udp_handler
- if 'logstash_zmq_handler' in __opts__:
- address = __opts__['logstash_zmq_handler'].get('address', None)
- zmq_hwm = __opts__['logstash_zmq_handler'].get('hwm', 1000)
- version = __opts__['logstash_zmq_handler'].get('version', 0)
+ if "logstash_zmq_handler" in __opts__:
+ address = __opts__["logstash_zmq_handler"].get("address", None)
+ zmq_hwm = __opts__["logstash_zmq_handler"].get("hwm", 1000)
+ version = __opts__["logstash_zmq_handler"].get("version", 0)
if address is None:
log.debug(
- 'The required \'logstash_zmq_handler\' configuration key, '
- '\'address\', is not properly configured. Not '
- 'configuring the logstash ZMQ logging handler.'
+ "The required 'logstash_zmq_handler' configuration key, "
+ "'address', is not properly configured. Not "
+ "configuring the logstash ZMQ logging handler."
)
else:
logstash_formatter = LogstashFormatter(version=version)
@@ -247,15 +251,15 @@ def setup_handlers():
zmq_handler.setFormatter(logstash_formatter)
zmq_handler.setLevel(
LOG_LEVELS[
- __opts__['logstash_zmq_handler'].get(
- 'log_level',
+ __opts__["logstash_zmq_handler"].get(
+ "log_level",
# Not set? Get the main salt log_level setting on the
# configuration file
__opts__.get(
- 'log_level',
+ "log_level",
# Also not set?! Default to 'error'
- 'error'
- )
+ "error",
+ ),
)
]
)
@@ -266,98 +270,130 @@ def setup_handlers():
class LogstashFormatter(logging.Formatter, NewStyleClassMixIn):
- def __init__(self, msg_type='logstash', msg_path='logstash', version=0):
+ def __init__(self, msg_type="logstash", msg_path="logstash", version=0):
self.msg_path = msg_path
self.msg_type = msg_type
self.version = version
- self.format = getattr(self, 'format_v{0}'.format(version))
+ self.format = getattr(self, "format_v{0}".format(version))
super(LogstashFormatter, self).__init__(fmt=None, datefmt=None)
def formatTime(self, record, datefmt=None):
- return datetime.datetime.utcfromtimestamp(record.created).isoformat()[:-3] + 'Z'
+ return datetime.datetime.utcfromtimestamp(record.created).isoformat()[:-3] + "Z"
def format_v0(self, record):
host = salt.utils.network.get_fqhostname()
message_dict = {
- '@timestamp': self.formatTime(record),
- '@fields': {
- 'levelname': record.levelname,
- 'logger': record.name,
- 'lineno': record.lineno,
- 'pathname': record.pathname,
- 'process': record.process,
- 'threadName': record.threadName,
- 'funcName': record.funcName,
- 'processName': record.processName
+ "@timestamp": self.formatTime(record),
+ "@fields": {
+ "levelname": record.levelname,
+ "logger": record.name,
+ "lineno": record.lineno,
+ "pathname": record.pathname,
+ "process": record.process,
+ "threadName": record.threadName,
+ "funcName": record.funcName,
+ "processName": record.processName,
},
- '@message': record.getMessage(),
- '@source': '{0}://{1}/{2}'.format(
- self.msg_type,
- host,
- self.msg_path
- ),
- '@source_host': host,
- '@source_path': self.msg_path,
- '@tags': ['salt'],
- '@type': self.msg_type,
+ "@message": record.getMessage(),
+ "@source": "{0}://{1}/{2}".format(self.msg_type, host, self.msg_path),
+ "@source_host": host,
+ "@source_path": self.msg_path,
+ "@tags": ["salt"],
+ "@type": self.msg_type,
}
if record.exc_info:
- message_dict['@fields']['exc_info'] = self.formatException(
- record.exc_info
- )
+ message_dict["@fields"]["exc_info"] = self.formatException(record.exc_info)
# Add any extra attributes to the message field
for key, value in six.iteritems(record.__dict__):
- if key in ('args', 'asctime', 'created', 'exc_info', 'exc_text',
- 'filename', 'funcName', 'id', 'levelname', 'levelno',
- 'lineno', 'module', 'msecs', 'msecs', 'message', 'msg',
- 'name', 'pathname', 'process', 'processName',
- 'relativeCreated', 'thread', 'threadName'):
+ if key in (
+ "args",
+ "asctime",
+ "created",
+ "exc_info",
+ "exc_text",
+ "filename",
+ "funcName",
+ "id",
+ "levelname",
+ "levelno",
+ "lineno",
+ "module",
+ "msecs",
+ "msecs",
+ "message",
+ "msg",
+ "name",
+ "pathname",
+ "process",
+ "processName",
+ "relativeCreated",
+ "thread",
+ "threadName",
+ ):
# These are already handled above or not handled at all
continue
if value is None:
- message_dict['@fields'][key] = value
+ message_dict["@fields"][key] = value
continue
if isinstance(value, (six.string_types, bool, dict, float, int, list)):
- message_dict['@fields'][key] = value
+ message_dict["@fields"][key] = value
continue
- message_dict['@fields'][key] = repr(value)
+ message_dict["@fields"][key] = repr(value)
return salt.utils.json.dumps(message_dict)
def format_v1(self, record):
message_dict = {
- '@version': 1,
- '@timestamp': self.formatTime(record),
- 'host': salt.utils.network.get_fqhostname(),
- 'levelname': record.levelname,
- 'logger': record.name,
- 'lineno': record.lineno,
- 'pathname': record.pathname,
- 'process': record.process,
- 'threadName': record.threadName,
- 'funcName': record.funcName,
- 'processName': record.processName,
- 'message': record.getMessage(),
- 'tags': ['salt'],
- 'type': self.msg_type
+ "@version": 1,
+ "@timestamp": self.formatTime(record),
+ "host": salt.utils.network.get_fqhostname(),
+ "levelname": record.levelname,
+ "logger": record.name,
+ "lineno": record.lineno,
+ "pathname": record.pathname,
+ "process": record.process,
+ "threadName": record.threadName,
+ "funcName": record.funcName,
+ "processName": record.processName,
+ "message": record.getMessage(),
+ "tags": ["salt"],
+ "type": self.msg_type,
}
if record.exc_info:
- message_dict['exc_info'] = self.formatException(
- record.exc_info
- )
+ message_dict["exc_info"] = self.formatException(record.exc_info)
# Add any extra attributes to the message field
for key, value in six.iteritems(record.__dict__):
- if key in ('args', 'asctime', 'created', 'exc_info', 'exc_text',
- 'filename', 'funcName', 'id', 'levelname', 'levelno',
- 'lineno', 'module', 'msecs', 'msecs', 'message', 'msg',
- 'name', 'pathname', 'process', 'processName',
- 'relativeCreated', 'thread', 'threadName'):
+ if key in (
+ "args",
+ "asctime",
+ "created",
+ "exc_info",
+ "exc_text",
+ "filename",
+ "funcName",
+ "id",
+ "levelname",
+ "levelno",
+ "lineno",
+ "module",
+ "msecs",
+ "msecs",
+ "message",
+ "msg",
+ "name",
+ "pathname",
+ "process",
+ "processName",
+ "relativeCreated",
+ "thread",
+ "threadName",
+ ):
# These are already handled above or not handled at all
continue
@@ -374,18 +410,18 @@ class LogstashFormatter(logging.Formatter, NewStyleClassMixIn):
class DatagramLogstashHandler(logging.handlers.DatagramHandler):
- '''
+ """
Logstash UDP logging handler.
- '''
+ """
def makePickle(self, record):
return salt.utils.stringutils.to_bytes(self.format(record))
class ZMQLogstashHander(logging.Handler, NewStyleClassMixIn):
- '''
+ """
Logstash ZMQ logging handler.
- '''
+ """
def __init__(self, address, level=logging.NOTSET, zmq_hwm=1000):
super(ZMQLogstashHander, self).__init__(level=level)
@@ -397,7 +433,7 @@ class ZMQLogstashHander(logging.Handler, NewStyleClassMixIn):
@property
def publisher(self):
current_pid = os.getpid()
- if not getattr(self, '_publisher') or self._pid != current_pid:
+ if not getattr(self, "_publisher") or self._pid != current_pid:
# We forked? Multiprocessing? Recreate!!!
self._pid = current_pid
self._context = zmq.Context()
@@ -423,10 +459,10 @@ class ZMQLogstashHander(logging.Handler, NewStyleClassMixIn):
def close(self):
if self._context is not None:
# One second to send any queued messages
- if hasattr(self._context, 'destroy'):
+ if hasattr(self._context, "destroy"):
self._context.destroy(1 * 1000)
else:
- if getattr(self, '_publisher', None) is not None:
+ if getattr(self, "_publisher", None) is not None:
self._publisher.setsockopt(zmq.LINGER, 1 * 1000)
self._publisher.close()
diff --git a/salt/log/handlers/sentry_mod.py b/salt/log/handlers/sentry_mod.py
index 5f9974c19f2..15b444a52f9 100644
--- a/salt/log/handlers/sentry_mod.py
+++ b/salt/log/handlers/sentry_mod.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Sentry Logging Handler
======================
@@ -84,7 +84,7 @@
.. _`Sentry`: https://getsentry.com
.. _`Raven`: https://raven.readthedocs.io
.. _`Raven client documentation`: https://raven.readthedocs.io/en/latest/config/index.html#client-arguments
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
@@ -99,6 +99,7 @@ from salt.log import LOG_LEVELS
try:
import raven
from raven.handlers.logging import SentryHandler
+
HAS_RAVEN = True
except ImportError:
HAS_RAVEN = False
@@ -108,7 +109,7 @@ __grains__ = {}
__salt__ = {}
# Define the module's virtual name
-__virtualname__ = 'sentry'
+__virtualname__ = "sentry"
def __virtual__():
@@ -118,38 +119,40 @@ def __virtual__():
def setup_handlers():
- '''
+ """
sets up the sentry handler
- '''
+ """
__grains__ = salt.loader.grains(__opts__)
__salt__ = salt.loader.minion_mods(__opts__)
- if 'sentry_handler' not in __opts__:
- log.debug('No \'sentry_handler\' key was found in the configuration')
+ if "sentry_handler" not in __opts__:
+ log.debug("No 'sentry_handler' key was found in the configuration")
return False
options = {}
- dsn = get_config_value('dsn')
+ dsn = get_config_value("dsn")
if dsn is not None:
try:
# support raven ver 5.5.0
from raven.transport import TransportRegistry, default_transports
from raven.utils.urlparse import urlparse
+
transport_registry = TransportRegistry(default_transports)
url = urlparse(dsn)
if not transport_registry.supported_scheme(url.scheme):
- raise ValueError('Unsupported Sentry DSN scheme: {0}'.format(url.scheme))
+ raise ValueError(
+ "Unsupported Sentry DSN scheme: {0}".format(url.scheme)
+ )
except ValueError as exc:
- log.info(
- 'Raven failed to parse the configuration provided DSN: %s', exc
- )
+ log.info("Raven failed to parse the configuration provided DSN: %s", exc)
if not dsn:
- for key in ('project', 'servers', 'public_key', 'secret_key'):
+ for key in ("project", "servers", "public_key", "secret_key"):
config_value = get_config_value(key)
if config_value is None and key not in options:
log.debug(
- 'The required \'sentry_handler\' configuration key, '
- '\'%s\', is not properly configured. Not configuring '
- 'the sentry logging handler.', key
+ "The required 'sentry_handler' configuration key, "
+ "'%s', is not properly configured. Not configuring "
+ "the sentry logging handler.",
+ key,
)
return
elif config_value is None:
@@ -157,67 +160,60 @@ def setup_handlers():
options[key] = config_value
# site: An optional, arbitrary string to identify this client installation.
- options.update({
- # site: An optional, arbitrary string to identify this client
- # installation
- 'site': get_config_value('site'),
-
- # name: This will override the server_name value for this installation.
- # Defaults to socket.gethostname()
- 'name': get_config_value('name'),
-
- # exclude_paths: Extending this allow you to ignore module prefixes
- # when sentry attempts to discover which function an error comes from
- 'exclude_paths': get_config_value('exclude_paths', ()),
-
- # include_paths: For example, in Django this defaults to your list of
- # INSTALLED_APPS, and is used for drilling down where an exception is
- # located
- 'include_paths': get_config_value('include_paths', ()),
-
- # list_max_length: The maximum number of items a list-like container
- # should store.
- 'list_max_length': get_config_value('list_max_length'),
-
- # string_max_length: The maximum characters of a string that should be
- # stored.
- 'string_max_length': get_config_value('string_max_length'),
-
- # auto_log_stacks: Should Raven automatically log frame stacks
- # (including locals) all calls as it would for exceptions.
- 'auto_log_stacks': get_config_value('auto_log_stacks'),
-
- # timeout: If supported, the timeout value for sending messages to
- # remote.
- 'timeout': get_config_value('timeout', 1),
-
- # processors: A list of processors to apply to events before sending
- # them to the Sentry server. Useful for sending additional global state
- # data or sanitizing data that you want to keep off of the server.
- 'processors': get_config_value('processors'),
-
- # dsn: Ensure the DSN is passed into the client
- 'dsn': dsn
- })
+ options.update(
+ {
+ # site: An optional, arbitrary string to identify this client
+ # installation
+ "site": get_config_value("site"),
+ # name: This will override the server_name value for this installation.
+ # Defaults to socket.gethostname()
+ "name": get_config_value("name"),
+ # exclude_paths: Extending this allow you to ignore module prefixes
+ # when sentry attempts to discover which function an error comes from
+ "exclude_paths": get_config_value("exclude_paths", ()),
+ # include_paths: For example, in Django this defaults to your list of
+ # INSTALLED_APPS, and is used for drilling down where an exception is
+ # located
+ "include_paths": get_config_value("include_paths", ()),
+ # list_max_length: The maximum number of items a list-like container
+ # should store.
+ "list_max_length": get_config_value("list_max_length"),
+ # string_max_length: The maximum characters of a string that should be
+ # stored.
+ "string_max_length": get_config_value("string_max_length"),
+ # auto_log_stacks: Should Raven automatically log frame stacks
+ # (including locals) all calls as it would for exceptions.
+ "auto_log_stacks": get_config_value("auto_log_stacks"),
+ # timeout: If supported, the timeout value for sending messages to
+ # remote.
+ "timeout": get_config_value("timeout", 1),
+ # processors: A list of processors to apply to events before sending
+ # them to the Sentry server. Useful for sending additional global state
+ # data or sanitizing data that you want to keep off of the server.
+ "processors": get_config_value("processors"),
+ # dsn: Ensure the DSN is passed into the client
+ "dsn": dsn,
+ }
+ )
client = raven.Client(**options)
- context = get_config_value('context')
+ context = get_config_value("context")
context_dict = {}
if context is not None:
for tag in context:
try:
tag_value = __grains__[tag]
except KeyError:
- log.debug('Sentry tag \'%s\' not found in grains.', tag)
+ log.debug("Sentry tag '%s' not found in grains.", tag)
continue
if tag_value:
context_dict[tag] = tag_value
if context_dict:
- client.context.merge({'tags': context_dict})
+ client.context.merge({"tags": context_dict})
try:
handler = SentryHandler(client)
- exclude_patterns = get_config_value('exclude_patterns', None)
+ exclude_patterns = get_config_value("exclude_patterns", None)
if exclude_patterns:
filter_regexes = [re.compile(pattern) for pattern in exclude_patterns]
@@ -229,14 +225,14 @@ def setup_handlers():
handler.addFilter(FilterExcludedMessages())
- handler.setLevel(LOG_LEVELS[get_config_value('log_level', 'error')])
+ handler.setLevel(LOG_LEVELS[get_config_value("log_level", "error")])
return handler
except ValueError as exc:
- log.debug('Failed to setup the sentry logging handler', exc_info=True)
+ log.debug("Failed to setup the sentry logging handler", exc_info=True)
def get_config_value(name, default=None):
- '''
+ """
returns a configuration option for the sentry_handler
- '''
- return __opts__['sentry_handler'].get(name, default)
+ """
+ return __opts__["sentry_handler"].get(name, default)
diff --git a/salt/log/mixins.py b/salt/log/mixins.py
index 6e8645d96dd..47477c9964b 100644
--- a/salt/log/mixins.py
+++ b/salt/log/mixins.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
@@ -9,25 +9,28 @@
.. versionadded:: 0.17.0
Some mix-in classes to be used in salt's logging
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
# pylint: disable=unused-import
-from salt._logging.mixins import NewStyleClassMixin as NewStyleClassMixIn
-from salt._logging.mixins import LoggingProfileMixin as LoggingProfileMixIn
-from salt._logging.mixins import LoggingTraceMixin as LoggingTraceMixIn
+from salt._logging.mixins import (
+ ExcInfoOnLogLevelFormatMixin as ExcInfoOnLogLevelFormatMixIn,
+)
from salt._logging.mixins import LoggingGarbageMixin as LoggingGarbageMixIn
from salt._logging.mixins import LoggingMixinMeta as LoggingMixInMeta
-from salt._logging.mixins import ExcInfoOnLogLevelFormatMixin as ExcInfoOnLogLevelFormatMixIn
+from salt._logging.mixins import LoggingProfileMixin as LoggingProfileMixIn
+from salt._logging.mixins import LoggingTraceMixin as LoggingTraceMixIn
+from salt._logging.mixins import NewStyleClassMixin as NewStyleClassMixIn
+
# pylint: enable=unused-import
-#from salt.utils.versions import warn_until_date
-#warn_until_date(
+# from salt.utils.versions import warn_until_date
+# warn_until_date(
# '20220101',
# 'Please stop using \'{name}\' and instead use \'salt._logging.mixins\'. '
# '\'{name}\' will go away after {{date}}.'.format(
# name=__name__
# )
-#)
+# )
diff --git a/salt/log/setup.py b/salt/log/setup.py
index 6f9fa63cda2..7d02e315e3d 100644
--- a/salt/log/setup.py
+++ b/salt/log/setup.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
@@ -11,42 +11,52 @@
This module should be imported as soon as possible, preferably the first
module salt or any salt depending library imports so any new logging
logger instance uses our ``salt.log.setup.SaltLoggingClass``.
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
-import os
-import sys
-import time
-import types
-import socket
+
import logging
import logging.handlers
-import traceback
import multiprocessing
-
-# Import 3rd-party libs
-from salt.ext import six
-from salt.ext.six.moves.urllib.parse import urlparse # pylint: disable=import-error,no-name-in-module
+import os
+import socket
+import sys
+import time
+import traceback
+import types
# Import salt libs
# pylint: disable=unused-import
-from salt._logging import (LOG_COLORS,
- LOG_LEVELS,
- LOG_VALUES_TO_LEVELS,
- SORTED_LEVEL_NAMES)
-from salt._logging.impl import (SaltLogRecord,
- SaltColorLogRecord,
- LOGGING_NULL_HANDLER,
- LOGGING_STORE_HANDLER,
- LOGGING_TEMP_HANDLER)
+from salt._logging import (
+ LOG_COLORS,
+ LOG_LEVELS,
+ LOG_VALUES_TO_LEVELS,
+ SORTED_LEVEL_NAMES,
+)
+from salt._logging.handlers import (
+ FileHandler,
+ QueueHandler,
+ RotatingFileHandler,
+ StreamHandler,
+ SysLogHandler,
+ WatchedFileHandler,
+)
+from salt._logging.impl import (
+ LOGGING_NULL_HANDLER,
+ LOGGING_STORE_HANDLER,
+ LOGGING_TEMP_HANDLER,
+ SaltColorLogRecord,
+ SaltLogRecord,
+)
from salt._logging.impl import set_log_record_factory as setLogRecordFactory
-from salt._logging.handlers import (StreamHandler,
- SysLogHandler,
- FileHandler,
- WatchedFileHandler,
- RotatingFileHandler,
- QueueHandler)
+
+# Import 3rd-party libs
+from salt.ext import six
+from salt.ext.six.moves.urllib.parse import ( # pylint: disable=import-error,no-name-in-module
+ urlparse,
+)
+
# pylint: enable=unused-import
__CONSOLE_CONFIGURED = False
@@ -61,7 +71,7 @@ __MP_LOGGING_QUEUE = None
__MP_LOGGING_LEVEL = logging.GARBAGE
__MP_LOGGING_QUEUE_PROCESS = None
__MP_LOGGING_QUEUE_HANDLER = None
-__MP_IN_MAINPROCESS = multiprocessing.current_process().name == 'MainProcess'
+__MP_IN_MAINPROCESS = multiprocessing.current_process().name == "MainProcess"
__MP_MAINPROCESS_ID = None
@@ -94,34 +104,32 @@ def is_extended_logging_configured():
class SaltLogQueueHandler(QueueHandler):
- '''
+ """
Subclassed just to differentiate when debugging
- '''
+ """
def getLogger(name): # pylint: disable=C0103
- '''
+ """
This function is just a helper, an alias to:
logging.getLogger(name)
Although you might find it useful, there's no reason why you should not be
using the aliased method.
- '''
+ """
return logging.getLogger(name)
-def setup_temp_logger(log_level='error'):
- '''
+def setup_temp_logger(log_level="error"):
+ """
Setup the temporary console logger
- '''
+ """
if is_temp_logging_configured():
- logging.getLogger(__name__).warning(
- 'Temporary logging is already configured'
- )
+ logging.getLogger(__name__).warning("Temporary logging is already configured")
return
if log_level is None:
- log_level = 'warning'
+ log_level = "warning"
level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)
@@ -130,7 +138,7 @@ def setup_temp_logger(log_level='error'):
if handler in (LOGGING_NULL_HANDLER, LOGGING_STORE_HANDLER):
continue
- if not hasattr(handler, 'stream'):
+ if not hasattr(handler, "stream"):
# Not a stream handler, continue
continue
@@ -142,9 +150,7 @@ def setup_temp_logger(log_level='error'):
handler.setLevel(level)
# Set the default temporary console formatter config
- formatter = logging.Formatter(
- '[%(levelname)-8s] %(message)s', datefmt='%H:%M:%S'
- )
+ formatter = logging.Formatter("[%(levelname)-8s] %(message)s", datefmt="%H:%M:%S")
handler.setFormatter(formatter)
logging.root.addHandler(handler)
@@ -153,8 +159,7 @@ def setup_temp_logger(log_level='error'):
LOGGING_NULL_HANDLER.sync_with_handlers([handler])
else:
logging.getLogger(__name__).debug(
- 'LOGGING_NULL_HANDLER is already None, can\'t sync messages '
- 'with it'
+ "LOGGING_NULL_HANDLER is already None, can't sync messages " "with it"
)
# Remove the temporary null logging handler
@@ -164,19 +169,19 @@ def setup_temp_logger(log_level='error'):
__TEMP_LOGGING_CONFIGURED = True
-def setup_console_logger(log_level='error', log_format=None, date_format=None):
- '''
+def setup_console_logger(log_level="error", log_format=None, date_format=None):
+ """
Setup the console logger
- '''
+ """
if is_console_configured():
- logging.getLogger(__name__).warning('Console logging already configured')
+ logging.getLogger(__name__).warning("Console logging already configured")
return
# Remove the temporary logging handler
__remove_temp_logging_handler()
if log_level is None:
- log_level = 'warning'
+ log_level = "warning"
level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)
@@ -187,7 +192,7 @@ def setup_console_logger(log_level='error', log_format=None, date_format=None):
if handler is LOGGING_STORE_HANDLER:
continue
- if not hasattr(handler, 'stream'):
+ if not hasattr(handler, "stream"):
# Not a stream handler, continue
continue
@@ -200,9 +205,9 @@ def setup_console_logger(log_level='error', log_format=None, date_format=None):
# Set the default console formatter config
if not log_format:
- log_format = '[%(levelname)-8s] %(message)s'
+ log_format = "[%(levelname)-8s] %(message)s"
if not date_format:
- date_format = '%H:%M:%S'
+ date_format = "%H:%M:%S"
formatter = logging.Formatter(log_format, datefmt=date_format)
@@ -215,9 +220,15 @@ def setup_console_logger(log_level='error', log_format=None, date_format=None):
__LOGGING_CONSOLE_HANDLER = handler
-def setup_logfile_logger(log_path, log_level='error', log_format=None,
- date_format=None, max_bytes=0, backup_count=0):
- '''
+def setup_logfile_logger(
+ log_path,
+ log_level="error",
+ log_format=None,
+ date_format=None,
+ max_bytes=0,
+ backup_count=0,
+):
+ """
Setup the logfile logger
Since version 0.10.6 we support logging to syslog, some examples:
@@ -241,15 +252,15 @@ def setup_logfile_logger(log_path, log_level='error', log_format=None,
The preferred way to do remote logging is setup a local syslog, point
salt's logging to the local syslog(unix socket is much faster) and then
have the local syslog forward the log messages to the remote syslog.
- '''
+ """
if is_logfile_configured():
- logging.getLogger(__name__).warning('Logfile logging already configured')
+ logging.getLogger(__name__).warning("Logfile logging already configured")
return
if log_path is None:
logging.getLogger(__name__).warning(
- 'log_path setting is set to `None`. Nothing else to do'
+ "log_path setting is set to `None`. Nothing else to do"
)
return
@@ -257,7 +268,7 @@ def setup_logfile_logger(log_path, log_level='error', log_format=None,
__remove_temp_logging_handler()
if log_level is None:
- log_level = 'warning'
+ log_level = "warning"
level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)
@@ -265,77 +276,71 @@ def setup_logfile_logger(log_path, log_level='error', log_format=None,
root_logger = logging.getLogger()
- if parsed_log_path.scheme in ('tcp', 'udp', 'file'):
+ if parsed_log_path.scheme in ("tcp", "udp", "file"):
syslog_opts = {
- 'facility': SysLogHandler.LOG_USER,
- 'socktype': socket.SOCK_DGRAM
+ "facility": SysLogHandler.LOG_USER,
+ "socktype": socket.SOCK_DGRAM,
}
- if parsed_log_path.scheme == 'file' and parsed_log_path.path:
+ if parsed_log_path.scheme == "file" and parsed_log_path.path:
facility_name = parsed_log_path.path.split(os.sep)[-1].upper()
- if not facility_name.startswith('LOG_'):
+ if not facility_name.startswith("LOG_"):
# The user is not specifying a syslog facility
- facility_name = 'LOG_USER' # Syslog default
- syslog_opts['address'] = parsed_log_path.path
+ facility_name = "LOG_USER" # Syslog default
+ syslog_opts["address"] = parsed_log_path.path
else:
# The user has set a syslog facility, let's update the path to
# the logging socket
- syslog_opts['address'] = os.sep.join(
+ syslog_opts["address"] = os.sep.join(
parsed_log_path.path.split(os.sep)[:-1]
)
elif parsed_log_path.path:
# In case of udp or tcp with a facility specified
facility_name = parsed_log_path.path.lstrip(os.sep).upper()
- if not facility_name.startswith('LOG_'):
+ if not facility_name.startswith("LOG_"):
# Logging facilities start with LOG_ if this is not the case
# fail right now!
raise RuntimeError(
- 'The syslog facility \'{0}\' is not known'.format(
- facility_name
- )
+ "The syslog facility '{0}' is not known".format(facility_name)
)
else:
# This is the case of udp or tcp without a facility specified
- facility_name = 'LOG_USER' # Syslog default
+ facility_name = "LOG_USER" # Syslog default
- facility = getattr(
- SysLogHandler, facility_name, None
- )
+ facility = getattr(SysLogHandler, facility_name, None)
if facility is None:
# This python syslog version does not know about the user provided
# facility name
raise RuntimeError(
- 'The syslog facility \'{0}\' is not known'.format(
- facility_name
- )
+ "The syslog facility '{0}' is not known".format(facility_name)
)
- syslog_opts['facility'] = facility
+ syslog_opts["facility"] = facility
- if parsed_log_path.scheme == 'tcp':
+ if parsed_log_path.scheme == "tcp":
# tcp syslog support was only added on python versions >= 2.7
if sys.version_info < (2, 7):
raise RuntimeError(
- 'Python versions lower than 2.7 do not support logging '
- 'to syslog using tcp sockets'
+ "Python versions lower than 2.7 do not support logging "
+ "to syslog using tcp sockets"
)
- syslog_opts['socktype'] = socket.SOCK_STREAM
+ syslog_opts["socktype"] = socket.SOCK_STREAM
- if parsed_log_path.scheme in ('tcp', 'udp'):
- syslog_opts['address'] = (
+ if parsed_log_path.scheme in ("tcp", "udp"):
+ syslog_opts["address"] = (
parsed_log_path.hostname,
- parsed_log_path.port or logging.handlers.SYSLOG_UDP_PORT
+ parsed_log_path.port or logging.handlers.SYSLOG_UDP_PORT,
)
- if sys.version_info < (2, 7) or parsed_log_path.scheme == 'file':
+ if sys.version_info < (2, 7) or parsed_log_path.scheme == "file":
# There's not socktype support on python versions lower than 2.7
- syslog_opts.pop('socktype', None)
+ syslog_opts.pop("socktype", None)
try:
# Et voilá! Finally our syslog handler instance
handler = SysLogHandler(**syslog_opts)
except socket.error as err:
logging.getLogger(__name__).error(
- 'Failed to setup the Syslog logging handler: %s', err
+ "Failed to setup the Syslog logging handler: %s", err
)
shutdown_multiprocessing_logging_listener()
sys.exit(2)
@@ -344,13 +349,13 @@ def setup_logfile_logger(log_path, log_level='error', log_format=None,
log_dir = os.path.dirname(log_path)
if not os.path.exists(log_dir):
logging.getLogger(__name__).info(
- 'Log directory not found, trying to create it: %s', log_dir
+ "Log directory not found, trying to create it: %s", log_dir
)
try:
os.makedirs(log_dir, mode=0o700)
except OSError as ose:
logging.getLogger(__name__).warning(
- 'Failed to create directory for log file: %s (%s)', log_dir, ose
+ "Failed to create directory for log file: %s (%s)", log_dir, ose
)
return
try:
@@ -359,17 +364,22 @@ def setup_logfile_logger(log_path, log_level='error', log_format=None,
# user is not using plain ASCII, their system should be ready to
# handle UTF-8.
if max_bytes > 0:
- handler = RotatingFileHandler(log_path,
- mode='a',
- maxBytes=max_bytes,
- backupCount=backup_count,
- encoding='utf-8',
- delay=0)
+ handler = RotatingFileHandler(
+ log_path,
+ mode="a",
+ maxBytes=max_bytes,
+ backupCount=backup_count,
+ encoding="utf-8",
+ delay=0,
+ )
else:
- handler = WatchedFileHandler(log_path, mode='a', encoding='utf-8', delay=0)
+ handler = WatchedFileHandler(
+ log_path, mode="a", encoding="utf-8", delay=0
+ )
except (IOError, OSError):
logging.getLogger(__name__).warning(
- 'Failed to open log file, do you have permission to write to %s?', log_path
+ "Failed to open log file, do you have permission to write to %s?",
+ log_path,
)
# Do not proceed with any more configuration since it will fail, we
# have the console logging already setup and the user should see
@@ -380,9 +390,9 @@ def setup_logfile_logger(log_path, log_level='error', log_format=None,
# Set the default console formatter config
if not log_format:
- log_format = '%(asctime)s [%(name)-15s][%(levelname)-8s] %(message)s'
+ log_format = "%(asctime)s [%(name)-15s][%(levelname)-8s] %(message)s"
if not date_format:
- date_format = '%Y-%m-%d %H:%M:%S'
+ date_format = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(log_format, datefmt=date_format)
@@ -396,9 +406,9 @@ def setup_logfile_logger(log_path, log_level='error', log_format=None,
def setup_extended_logging(opts):
- '''
+ """
Setup any additional logging handlers, internal or external
- '''
+ """
if is_extended_logging_configured() is True:
# Don't re-configure external loggers
return
@@ -417,7 +427,7 @@ def setup_extended_logging(opts):
additional_handlers = []
for name, get_handlers_func in six.iteritems(providers):
- logging.getLogger(__name__).info('Processing `log_handlers.%s`', name)
+ logging.getLogger(__name__).info("Processing `log_handlers.%s`", name)
# Keep a reference to the logging handlers count before getting the
# possible additional ones.
initial_handlers_count = len(logging.root.handlers)
@@ -429,9 +439,10 @@ def setup_extended_logging(opts):
# A false return value means not configuring any logging handler on
# purpose
logging.getLogger(__name__).info(
- 'The `log_handlers.%s.setup_handlers()` function returned '
- '`False` which means no logging handler was configured on '
- 'purpose. Continuing...', name
+ "The `log_handlers.%s.setup_handlers()` function returned "
+ "`False` which means no logging handler was configured on "
+ "purpose. Continuing...",
+ name,
)
continue
else:
@@ -439,19 +450,19 @@ def setup_extended_logging(opts):
handlers = [handlers]
for handler in handlers:
- if not handler and \
- len(logging.root.handlers) == initial_handlers_count:
+ if not handler and len(logging.root.handlers) == initial_handlers_count:
logging.getLogger(__name__).info(
- 'The `log_handlers.%s`, did not return any handlers '
- 'and the global handlers count did not increase. This '
- 'could be a sign of `log_handlers.%s` not working as '
- 'supposed', name, name
+ "The `log_handlers.%s`, did not return any handlers "
+ "and the global handlers count did not increase. This "
+ "could be a sign of `log_handlers.%s` not working as "
+ "supposed",
+ name,
+ name,
)
continue
logging.getLogger(__name__).debug(
- 'Adding the \'%s\' provided logging handler: \'%s\'',
- name, handler
+ "Adding the '%s' provided logging handler: '%s'", name, handler
)
additional_handlers.append(handler)
logging.root.addHandler(handler)
@@ -466,8 +477,7 @@ def setup_extended_logging(opts):
LOGGING_STORE_HANDLER.sync_with_handlers(additional_handlers)
else:
logging.getLogger(__name__).debug(
- 'LOGGING_STORE_HANDLER is already None, can\'t sync messages '
- 'with it'
+ "LOGGING_STORE_HANDLER is already None, can't sync messages " "with it"
)
# Remove the temporary queue logging handler
@@ -515,20 +525,18 @@ def set_multiprocessing_logging_level(log_level):
def set_multiprocessing_logging_level_by_opts(opts):
- '''
+ """
This will set the multiprocessing logging level to the lowest
logging level of all the types of logging that are configured.
- '''
+ """
global __MP_LOGGING_LEVEL
log_levels = [
- LOG_LEVELS.get(opts.get('log_level', '').lower(), logging.ERROR),
- LOG_LEVELS.get(opts.get('log_level_logfile', '').lower(), logging.ERROR)
+ LOG_LEVELS.get(opts.get("log_level", "").lower(), logging.ERROR),
+ LOG_LEVELS.get(opts.get("log_level_logfile", "").lower(), logging.ERROR),
]
- for level in six.itervalues(opts.get('log_granular_levels', {})):
- log_levels.append(
- LOG_LEVELS.get(level.lower(), logging.ERROR)
- )
+ for level in six.itervalues(opts.get("log_granular_levels", {})):
+ log_levels.append(LOG_LEVELS.get(level.lower(), logging.ERROR))
__MP_LOGGING_LEVEL = min(log_levels)
@@ -552,7 +560,7 @@ def setup_multiprocessing_logging_listener(opts, queue=None):
__MP_MAINPROCESS_ID = os.getpid()
__MP_LOGGING_QUEUE_PROCESS = multiprocessing.Process(
target=__process_multiprocessing_logging_queue,
- args=(opts, queue or get_multiprocessing_logging_queue(),)
+ args=(opts, queue or get_multiprocessing_logging_queue(),),
)
__MP_LOGGING_QUEUE_PROCESS.daemon = True
__MP_LOGGING_QUEUE_PROCESS.start()
@@ -560,10 +568,10 @@ def setup_multiprocessing_logging_listener(opts, queue=None):
def setup_multiprocessing_logging(queue=None):
- '''
+ """
This code should be called from within a running multiprocessing
process instance.
- '''
+ """
from salt.utils.platform import is_windows
global __MP_LOGGING_CONFIGURED
@@ -594,15 +602,19 @@ def setup_multiprocessing_logging(queue=None):
__remove_queue_logging_handler()
# Let's add a queue handler to the logging root handlers
- __MP_LOGGING_QUEUE_HANDLER = SaltLogQueueHandler(queue or get_multiprocessing_logging_queue())
+ __MP_LOGGING_QUEUE_HANDLER = SaltLogQueueHandler(
+ queue or get_multiprocessing_logging_queue()
+ )
logging.root.addHandler(__MP_LOGGING_QUEUE_HANDLER)
# Set the logging root level to the lowest needed level to get all
# desired messages.
log_level = get_multiprocessing_logging_level()
logging.root.setLevel(log_level)
logging.getLogger(__name__).debug(
- 'Multiprocessing queue logging configured for the process running '
- 'under PID: %s at log level %s', os.getpid(), log_level
+ "Multiprocessing queue logging configured for the process running "
+ "under PID: %s at log level %s",
+ os.getpid(),
+ log_level,
)
# The above logging call will create, in some situations, a futex wait
# lock condition, probably due to the multiprocessing Queue's internal
@@ -695,15 +707,17 @@ def shutdown_multiprocessing_logging_listener(daemonizing=False):
return
if __MP_LOGGING_QUEUE_PROCESS.is_alive():
- logging.getLogger(__name__).debug('Stopping the multiprocessing logging queue listener')
+ logging.getLogger(__name__).debug(
+ "Stopping the multiprocessing logging queue listener"
+ )
try:
# Sent None sentinel to stop the logging processing queue
__MP_LOGGING_QUEUE.put(None)
# Let's join the multiprocessing logging handle thread
time.sleep(0.5)
- logging.getLogger(__name__).debug('closing multiprocessing queue')
+ logging.getLogger(__name__).debug("closing multiprocessing queue")
__MP_LOGGING_QUEUE.close()
- logging.getLogger(__name__).debug('joining multiprocessing queue thread')
+ logging.getLogger(__name__).debug("joining multiprocessing queue thread")
__MP_LOGGING_QUEUE.join_thread()
__MP_LOGGING_QUEUE = None
__MP_LOGGING_QUEUE_PROCESS.join(1)
@@ -717,22 +731,24 @@ def shutdown_multiprocessing_logging_listener(daemonizing=False):
__MP_LOGGING_QUEUE_PROCESS.terminate()
__MP_LOGGING_QUEUE_PROCESS = None
__MP_LOGGING_LISTENER_CONFIGURED = False
- logging.getLogger(__name__).debug('Stopped the multiprocessing logging queue listener')
+ logging.getLogger(__name__).debug(
+ "Stopped the multiprocessing logging queue listener"
+ )
-def set_logger_level(logger_name, log_level='error'):
- '''
+def set_logger_level(logger_name, log_level="error"):
+ """
Tweak a specific logger's logging level
- '''
+ """
logging.getLogger(logger_name).setLevel(
LOG_LEVELS.get(log_level.lower(), logging.ERROR)
)
def patch_python_logging_handlers():
- '''
+ """
Patch the python logging handlers with out mixed-in classes
- '''
+ """
logging.StreamHandler = StreamHandler
logging.FileHandler = FileHandler
logging.handlers.SysLogHandler = SysLogHandler
@@ -745,32 +761,35 @@ def patch_python_logging_handlers():
def __process_multiprocessing_logging_queue(opts, queue):
# Avoid circular import
import salt.utils.process
- salt.utils.process.appendproctitle('MultiprocessingLoggingQueue')
+
+ salt.utils.process.appendproctitle("MultiprocessingLoggingQueue")
# Assign UID/GID of user to proc if set
from salt.utils.verify import check_user
- user = opts.get('user')
+
+ user = opts.get("user")
if user:
check_user(user)
from salt.utils.platform import is_windows
+
if is_windows():
# On Windows, creating a new process doesn't fork (copy the parent
# process image). Due to this, we need to setup all of our logging
# inside this process.
setup_temp_logger()
setup_console_logger(
- log_level=opts.get('log_level'),
- log_format=opts.get('log_fmt_console'),
- date_format=opts.get('log_datefmt_console')
+ log_level=opts.get("log_level"),
+ log_format=opts.get("log_fmt_console"),
+ date_format=opts.get("log_datefmt_console"),
)
setup_logfile_logger(
- opts.get('log_file'),
- log_level=opts.get('log_level_logfile'),
- log_format=opts.get('log_fmt_logfile'),
- date_format=opts.get('log_datefmt_logfile'),
- max_bytes=opts.get('log_rotate_max_bytes', 0),
- backup_count=opts.get('log_rotate_backup_count', 0)
+ opts.get("log_file"),
+ log_level=opts.get("log_level_logfile"),
+ log_format=opts.get("log_fmt_logfile"),
+ date_format=opts.get("log_datefmt_logfile"),
+ max_bytes=opts.get("log_rotate_max_bytes", 0),
+ backup_count=opts.get("log_rotate_backup_count", 0),
)
setup_extended_logging(opts)
while True:
@@ -787,16 +806,18 @@ def __process_multiprocessing_logging_queue(opts, queue):
break
except Exception as exc: # pylint: disable=broad-except
logging.getLogger(__name__).warning(
- 'An exception occurred in the multiprocessing logging '
- 'queue thread: %r', exc, exc_info_on_loglevel=logging.DEBUG
+ "An exception occurred in the multiprocessing logging "
+ "queue thread: %r",
+ exc,
+ exc_info_on_loglevel=logging.DEBUG,
)
def __remove_null_logging_handler():
- '''
+ """
This function will run once the temporary logging has been configured. It
just removes the NullHandler from the logging handlers.
- '''
+ """
global LOGGING_NULL_HANDLER
if LOGGING_NULL_HANDLER is None:
# Already removed
@@ -813,10 +834,10 @@ def __remove_null_logging_handler():
def __remove_queue_logging_handler():
- '''
+ """
This function will run once the additional loggers have been synchronized.
It just removes the QueueLoggingHandler from the logging handlers.
- '''
+ """
global LOGGING_STORE_HANDLER
if LOGGING_STORE_HANDLER is None:
# Already removed
@@ -833,10 +854,10 @@ def __remove_queue_logging_handler():
def __remove_temp_logging_handler():
- '''
+ """
This function will run once logging has been configured. It just removes
the temporary stream Handler from the logging handlers.
- '''
+ """
if is_logging_configured():
# In this case, the temporary logging handler has been removed, return!
return
@@ -861,9 +882,9 @@ def __remove_temp_logging_handler():
def __global_logging_exception_handler(exc_type, exc_value, exc_traceback):
- '''
+ """
This function will log all un-handled python exceptions.
- '''
+ """
if exc_type.__name__ == "KeyboardInterrupt":
# Do not log the exception or display the traceback on Keyboard Interrupt
# Stop the logging queue listener thread
@@ -872,13 +893,13 @@ def __global_logging_exception_handler(exc_type, exc_value, exc_traceback):
else:
# Log the exception
logging.getLogger(__name__).error(
- 'An un-handled exception was caught by salt\'s global exception '
- 'handler:\n%s: %s\n%s',
+ "An un-handled exception was caught by salt's global exception "
+ "handler:\n%s: %s\n%s",
exc_type.__name__,
exc_value,
- ''.join(traceback.format_exception(
- exc_type, exc_value, exc_traceback
- )).strip()
+ "".join(
+ traceback.format_exception(exc_type, exc_value, exc_traceback)
+ ).strip(),
)
# Call the original sys.excepthook
sys.__excepthook__(exc_type, exc_value, exc_traceback)
diff --git a/salt/master.py b/salt/master.py
index fb2e0c35bf0..359ffc7de81 100644
--- a/salt/master.py
+++ b/salt/master.py
@@ -1,53 +1,47 @@
# -*- coding: utf-8 -*-
-'''
+"""
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
-'''
+"""
# Import python libs
-from __future__ import absolute_import, with_statement, print_function, unicode_literals
+from __future__ import absolute_import, print_function, unicode_literals, with_statement
+
+import collections
import copy
import ctypes
import functools
+import logging
+import multiprocessing
import os
import re
-import sys
-import time
import signal
import stat
-import logging
-import collections
-import multiprocessing
+import sys
import threading
-import salt.serializers.msgpack
+import time
-# pylint: disable=import-error,no-name-in-module,redefined-builtin
-from salt.ext import six
-from salt.ext.six.moves import range
-from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
-# pylint: enable=import-error,no-name-in-module,redefined-builtin
-
-import salt.ext.tornado.gen # pylint: disable=F0401
+import salt.acl
+import salt.auth
+import salt.client
+import salt.client.ssh.client
# Import salt libs
import salt.crypt
-import salt.client
-import salt.client.ssh.client
-import salt.exceptions
-import salt.payload
-import salt.pillar
-import salt.state
-import salt.runner
-import salt.auth
-import salt.wheel
-import salt.minion
-import salt.key
-import salt.acl
-import salt.engines
import salt.daemons.masterapi
import salt.defaults.exitcodes
-import salt.transport.server
+import salt.engines
+import salt.exceptions
+import salt.ext.tornado.gen # pylint: disable=F0401
+import salt.key
import salt.log.setup
+import salt.minion
+import salt.payload
+import salt.pillar
+import salt.runner
+import salt.serializers.msgpack
+import salt.state
+import salt.transport.server
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.crypt
@@ -67,17 +61,31 @@ import salt.utils.stringutils
import salt.utils.user
import salt.utils.verify
import salt.utils.zeromq
+import salt.wheel
from salt.config import DEFAULT_INTERVAL
from salt.defaults import DEFAULT_TARGET_DELIM
+
+# pylint: disable=import-error,no-name-in-module,redefined-builtin
+from salt.ext import six
+from salt.ext.six.moves import range
+from salt.ext.tornado.stack_context import StackContext
from salt.transport import iter_transport_opts
+from salt.utils.ctx import RequestContext
from salt.utils.debug import (
- enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack
+ enable_sigusr1_handler,
+ enable_sigusr2_handler,
+ inspect_stack,
)
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
+from salt.utils.zeromq import ZMQ_VERSION_INFO, ZMQDefaultLoop, install_zmq, zmq
+
+# pylint: enable=import-error,no-name-in-module,redefined-builtin
+
try:
import resource
+
HAS_RESOURCE = True
except ImportError:
# resource is not available on windows
@@ -86,29 +94,30 @@ except ImportError:
# Import halite libs
try:
import halite # pylint: disable=import-error
+
HAS_HALITE = True
except ImportError:
HAS_HALITE = False
-from salt.ext.tornado.stack_context import StackContext
-from salt.utils.ctx import RequestContext
-
log = logging.getLogger(__name__)
class SMaster(object):
- '''
+ """
Create a simple salt-master, this will generate the top-level master
- '''
- secrets = {} # mapping of key -> {'secret': multiprocessing type, 'reload': FUNCTION}
+ """
+
+ secrets = (
+ {}
+ ) # mapping of key -> {'secret': multiprocessing type, 'reload': FUNCTION}
def __init__(self, opts):
- '''
+ """
Create a salt master server instance
:param dict opts: The salt options dictionary
- '''
+ """
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
@@ -119,39 +128,42 @@ class SMaster(object):
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
- self.opts = state['opts']
- self.master_key = state['master_key']
- self.key = state['key']
- SMaster.secrets = state['secrets']
+ self.opts = state["opts"]
+ self.master_key = state["master_key"]
+ self.key = state["key"]
+ SMaster.secrets = state["secrets"]
def __getstate__(self):
- return {'opts': self.opts,
- 'master_key': self.master_key,
- 'key': self.key,
- 'secrets': SMaster.secrets}
+ return {
+ "opts": self.opts,
+ "master_key": self.master_key,
+ "key": self.key,
+ "secrets": SMaster.secrets,
+ }
def __prep_key(self):
- '''
+ """
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
- '''
+ """
return salt.daemons.masterapi.access_keys(self.opts)
class Maintenance(salt.utils.process.SignalHandlingProcess):
- '''
+ """
A generalized maintenance process which performs maintenance routines.
- '''
+ """
+
def __init__(self, opts, **kwargs):
- '''
+ """
Create a maintenance instance
:param dict opts: The salt options
- '''
+ """
super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
- self.loop_interval = int(self.opts['loop_interval'])
+ self.loop_interval = int(self.opts["loop_interval"])
# Track key rotation intervals
self.rotate = int(time.time())
# A serializer for general maint operations
@@ -162,47 +174,49 @@ class Maintenance(salt.utils.process.SignalHandlingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
- state['opts'],
- log_queue=state['log_queue'],
- log_queue_level=state['log_queue_level']
+ state["opts"],
+ log_queue=state["log_queue"],
+ log_queue_level=state["log_queue_level"],
)
def __getstate__(self):
return {
- 'opts': self.opts,
- 'log_queue': self.log_queue,
- 'log_queue_level': self.log_queue_level
+ "opts": self.opts,
+ "log_queue": self.log_queue,
+ "log_queue_level": self.log_queue_level,
}
def _post_fork_init(self):
- '''
+ """
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
- '''
+ """
# Load Runners
ropts = dict(self.opts)
- ropts['quiet'] = True
+ ropts["quiet"] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
- self.schedule = salt.utils.schedule.Schedule(self.opts,
- runner_client.functions_dict(),
- returners=self.returners)
+ self.schedule = salt.utils.schedule.Schedule(
+ self.opts, runner_client.functions_dict(), returners=self.returners
+ )
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
- self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
+ self.event = salt.utils.event.get_master_event(
+ self.opts, self.opts["sock_dir"], listen=False
+ )
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
self.presence_events = False
- if self.opts.get('presence_events', False):
+ if self.opts.get("presence_events", False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
- if transport != 'tcp':
+ if transport != "tcp":
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
@@ -210,13 +224,13 @@ class Maintenance(salt.utils.process.SignalHandlingProcess):
self.presence_events = True
def run(self):
- '''
+ """
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
- '''
+ """
salt.utils.process.appendproctitle(self.__class__.__name__)
# init things that need to be done after the process is forked
@@ -226,7 +240,7 @@ class Maintenance(salt.utils.process.SignalHandlingProcess):
last = int(time.time())
last_git_pillar_update = last
- git_pillar_update_interval = self.opts.get('git_pillar_update_interval', 0)
+ git_pillar_update_interval = self.opts.get("git_pillar_update_interval", 0)
old_present = set()
while True:
now = int(time.time())
@@ -246,36 +260,42 @@ class Maintenance(salt.utils.process.SignalHandlingProcess):
time.sleep(self.loop_interval)
def handle_key_cache(self):
- '''
+ """
Evaluate accepted keys and create a msgpack file
which contains a list
- '''
- if self.opts['key_cache'] == 'sched':
+ """
+ if self.opts["key_cache"] == "sched":
keys = []
- #TODO DRY from CKMinions
- if self.opts['transport'] in ('zeromq', 'tcp'):
- acc = 'minions'
+ # TODO DRY from CKMinions
+ if self.opts["transport"] in ("zeromq", "tcp"):
+ acc = "minions"
else:
- acc = 'accepted'
+ acc = "accepted"
- for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)):
- if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)):
+ for fn_ in os.listdir(os.path.join(self.opts["pki_dir"], acc)):
+ if not fn_.startswith(".") and os.path.isfile(
+ os.path.join(self.opts["pki_dir"], acc, fn_)
+ ):
keys.append(fn_)
- log.debug('Writing master key cache')
+ log.debug("Writing master key cache")
# Write a temporary file securely
if six.PY2:
- with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
+ with salt.utils.atomicfile.atomic_open(
+ os.path.join(self.opts["pki_dir"], acc, ".key_cache")
+ ) as cache_file:
self.serial.dump(keys, cache_file)
else:
- with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache'), mode='wb') as cache_file:
+ with salt.utils.atomicfile.atomic_open(
+ os.path.join(self.opts["pki_dir"], acc, ".key_cache"), mode="wb"
+ ) as cache_file:
self.serial.dump(keys, cache_file)
def handle_key_rotate(self, now):
- '''
+ """
Rotate the AES key rotation
- '''
+ """
to_rotate = False
- dfn = os.path.join(self.opts['cachedir'], '.dfn')
+ dfn = os.path.join(self.opts["cachedir"], ".dfn")
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
@@ -287,44 +307,46 @@ class Maintenance(salt.utils.process.SignalHandlingProcess):
elif stats.st_mode == 0o100400:
to_rotate = True
else:
- log.error('Found dropfile with incorrect permissions, ignoring...')
+ log.error("Found dropfile with incorrect permissions, ignoring...")
os.remove(dfn)
except os.error:
pass
- if self.opts.get('publish_session'):
- if now - self.rotate >= self.opts['publish_session']:
+ if self.opts.get("publish_session"):
+ if now - self.rotate >= self.opts["publish_session"]:
to_rotate = True
if to_rotate:
- log.info('Rotating master AES key')
+ log.info("Rotating master AES key")
for secret_key, secret_map in six.iteritems(SMaster.secrets):
# should be unnecessary-- since no one else should be modifying
- with secret_map['secret'].get_lock():
- secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']())
- self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key')
+ with secret_map["secret"].get_lock():
+ secret_map["secret"].value = salt.utils.stringutils.to_bytes(
+ secret_map["reload"]()
+ )
+ self.event.fire_event(
+ {"rotate_{0}_key".format(secret_key): True}, tag="key"
+ )
self.rotate = now
- if self.opts.get('ping_on_rotate'):
+ if self.opts.get("ping_on_rotate"):
# Ping all minions to get them to pick up the new key
- log.debug('Pinging all connected minions '
- 'due to key rotation')
+ log.debug("Pinging all connected minions " "due to key rotation")
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_git_pillar(self):
- '''
+ """
Update git pillar
- '''
+ """
try:
for pillar in self.git_pillar:
pillar.fetch_remotes()
except Exception as exc: # pylint: disable=broad-except
- log.error('Exception caught while updating git_pillar',
- exc_info=True)
+ log.error("Exception caught while updating git_pillar", exc_info=True)
def handle_schedule(self):
- '''
+ """
Evaluate the scheduler
- '''
+ """
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
@@ -332,13 +354,13 @@ class Maintenance(salt.utils.process.SignalHandlingProcess):
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc: # pylint: disable=broad-except
- log.error('Exception %s occurred in scheduled job', exc)
+ log.error("Exception %s occurred in scheduled job", exc)
self.schedule.cleanup_subprocesses()
def handle_presence(self, old_present):
- '''
+ """
Fire presence events if enabled
- '''
+ """
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
if self.presence_events and self.event.connect_pull(timeout=3):
@@ -347,25 +369,26 @@ class Maintenance(salt.utils.process.SignalHandlingProcess):
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
- data = {'new': list(new),
- 'lost': list(lost)}
- self.event.fire_event(data, tagify('change', 'presence'))
- data = {'present': list(present)}
- self.event.fire_event(data, tagify('present', 'presence'))
+ data = {"new": list(new), "lost": list(lost)}
+ self.event.fire_event(data, tagify("change", "presence"))
+ data = {"present": list(present)}
+ self.event.fire_event(data, tagify("present", "presence"))
old_present.clear()
old_present.update(present)
class FileserverUpdate(salt.utils.process.SignalHandlingProcess):
- '''
+ """
A process from which to update any dynamic fileserver backends
- '''
+ """
+
def __init__(self, opts, **kwargs):
super(FileserverUpdate, self).__init__(**kwargs)
self.opts = opts
self.update_threads = {}
# Avoid circular import
import salt.fileserver
+
self.fileserver = salt.fileserver.Fileserver(self.opts)
self.fill_buckets()
@@ -374,32 +397,29 @@ class FileserverUpdate(salt.utils.process.SignalHandlingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
- state['opts'],
- log_queue=state['log_queue'],
+ state["opts"], log_queue=state["log_queue"],
)
def __getstate__(self):
- return {'opts': self.opts,
- 'log_queue': self.log_queue,
+ return {
+ "opts": self.opts,
+ "log_queue": self.log_queue,
}
def fill_buckets(self):
- '''
+ """
Get the configured backends and the intervals for any backend which
supports them, and set up the update "buckets". There will be one
bucket for each thing being updated at a given interval.
- '''
+ """
update_intervals = self.fileserver.update_intervals()
self.buckets = {}
for backend in self.fileserver.backends():
- fstr = '{0}.update'.format(backend)
+ fstr = "{0}.update".format(backend)
try:
update_func = self.fileserver.servers[fstr]
except KeyError:
- log.debug(
- 'No update function for the %s filserver backend',
- backend
- )
+ log.debug("No update function for the %s filserver backend", backend)
continue
if backend in update_intervals:
# Variable intervals are supported for this backend
@@ -408,8 +428,9 @@ class FileserverUpdate(salt.utils.process.SignalHandlingProcess):
# Don't allow an interval of 0
interval = DEFAULT_INTERVAL
log.debug(
- 'An update_interval of 0 is not supported, '
- 'falling back to %s', interval
+ "An update_interval of 0 is not supported, "
+ "falling back to %s",
+ interval,
)
i_ptr = self.buckets.setdefault(interval, OrderedDict())
# Backend doesn't technically need to be present in the
@@ -424,50 +445,58 @@ class FileserverUpdate(salt.utils.process.SignalHandlingProcess):
# nothing to pass to the backend's update func, so we'll just
# set the value to None.
try:
- interval_key = '{0}_update_interval'.format(backend)
+ interval_key = "{0}_update_interval".format(backend)
interval = self.opts[interval_key]
except KeyError:
interval = DEFAULT_INTERVAL
log.warning(
- '%s key missing from configuration. Falling back to '
- 'default interval of %d seconds',
- interval_key, interval
+ "%s key missing from configuration. Falling back to "
+ "default interval of %d seconds",
+ interval_key,
+ interval,
)
- self.buckets.setdefault(
- interval, OrderedDict())[(backend, update_func)] = None
+ self.buckets.setdefault(interval, OrderedDict())[
+ (backend, update_func)
+ ] = None
def update_fileserver(self, interval, backends):
- '''
+ """
Threading target which handles all updates for a given wait interval
- '''
+ """
+
def _do_update():
log.debug(
- 'Performing fileserver updates for items with an update '
- 'interval of %d', interval
+ "Performing fileserver updates for items with an update "
+ "interval of %d",
+ interval,
)
for backend, update_args in six.iteritems(backends):
backend_name, update_func = backend
try:
if update_args:
log.debug(
- 'Updating %s fileserver cache for the following '
- 'targets: %s', backend_name, update_args
+ "Updating %s fileserver cache for the following "
+ "targets: %s",
+ backend_name,
+ update_args,
)
args = (update_args,)
else:
- log.debug('Updating %s fileserver cache', backend_name)
+ log.debug("Updating %s fileserver cache", backend_name)
args = ()
update_func(*args)
except Exception as exc: # pylint: disable=broad-except
log.exception(
- 'Uncaught exception while updating %s fileserver '
- 'cache', backend_name
+ "Uncaught exception while updating %s fileserver " "cache",
+ backend_name,
)
log.debug(
- 'Completed fileserver updates for items with an update '
- 'interval of %d, waiting %d seconds', interval, interval
+ "Completed fileserver updates for items with an update "
+ "interval of %d, waiting %d seconds",
+ interval,
+ interval,
)
condition = threading.Condition()
@@ -478,17 +507,16 @@ class FileserverUpdate(salt.utils.process.SignalHandlingProcess):
_do_update()
def run(self):
- '''
+ """
Start the update threads
- '''
+ """
salt.utils.process.appendproctitle(self.__class__.__name__)
# Clean out the fileserver backend cache
salt.daemons.masterapi.clean_fsbackend(self.opts)
for interval in self.buckets:
self.update_threads[interval] = threading.Thread(
- target=self.update_fileserver,
- args=(interval, self.buckets[interval]),
+ target=self.update_fileserver, args=(interval, self.buckets[interval]),
)
self.update_threads[interval].start()
@@ -498,21 +526,22 @@ class FileserverUpdate(salt.utils.process.SignalHandlingProcess):
class Master(SMaster):
- '''
+ """
The salt master server
- '''
+ """
+
def __init__(self, opts):
- '''
+ """
Create a salt master server instance
:param dict: The salt options
- '''
+ """
if zmq and ZMQ_VERSION_INFO < (3, 2):
log.warning(
- 'You have a version of ZMQ less than ZMQ 3.2! There are '
- 'known connection keep-alive issues with ZMQ < 3.2 which '
- 'may result in loss of contact with minions. Please '
- 'upgrade your ZMQ!'
+ "You have a version of ZMQ less than ZMQ 3.2! There are "
+ "known connection keep-alive issues with ZMQ < 3.2 which "
+ "may result in loss of contact with minions. Please "
+ "upgrade your ZMQ!"
)
SMaster.__init__(self, opts)
@@ -526,64 +555,67 @@ class Master(SMaster):
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
- 'Current values for max open files soft/hard setting: %s/%s',
- mof_s, mof_h
+ "Current values for max open files soft/hard setting: %s/%s", mof_s, mof_h
)
# Let's grab, from the configuration file, the value to raise max open
# files to
- mof_c = self.opts['max_open_files']
+ mof_c = self.opts["max_open_files"]
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
- 'The value for the \'max_open_files\' setting, %s, is higher '
- 'than the highest value the user running salt is allowed to '
- 'set (%s). Defaulting to %s.', mof_c, mof_h, mof_h
+ "The value for the 'max_open_files' setting, %s, is higher "
+ "than the highest value the user running salt is allowed to "
+ "set (%s). Defaulting to %s.",
+ mof_c,
+ mof_h,
+ mof_h,
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
- log.info('Raising max open files value to %s', mof_c)
+ log.info("Raising max open files value to %s", mof_c)
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
- 'New values for max open files soft/hard values: %s/%s',
- mof_s, mof_h
+ "New values for max open files soft/hard values: %s/%s",
+ mof_s,
+ mof_h,
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under macOS reported that our 100000 default value is
# still too high.
log.critical(
- 'Failed to raise max open files setting to %s. If this '
- 'value is too low, the salt-master will most likely fail '
- 'to run properly.', mof_c
+ "Failed to raise max open files setting to %s. If this "
+ "value is too low, the salt-master will most likely fail "
+ "to run properly.",
+ mof_c,
)
def _pre_flight(self):
- '''
+ """
Run pre flight checks. If anything in this method fails then the master
should not start up.
- '''
+ """
errors = []
critical_errors = []
try:
- os.chdir('/')
+ os.chdir("/")
except OSError as err:
- errors.append(
- 'Cannot change to root directory ({0})'.format(err)
- )
+ errors.append("Cannot change to root directory ({0})".format(err))
- if self.opts.get('fileserver_verify_config', True):
+ if self.opts.get("fileserver_verify_config", True):
# Avoid circular import
import salt.fileserver
+
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
- 'Failed to load fileserver backends, the configured backends '
- 'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
+ "Failed to load fileserver backends, the configured backends "
+ "are: {0}".format(", ".join(self.opts["fileserver_backend"]))
)
else:
# Run init() for all backends which support the function, to
@@ -591,46 +623,50 @@ class Master(SMaster):
try:
fileserver.init()
except salt.exceptions.FileserverConfigError as exc:
- critical_errors.append('{0}'.format(exc))
+ critical_errors.append("{0}".format(exc))
- if not self.opts['fileserver_backend']:
- errors.append('No fileserver backends are configured')
+ if not self.opts["fileserver_backend"]:
+ errors.append("No fileserver backends are configured")
# Check to see if we need to create a pillar cache dir
- if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')):
+ if self.opts["pillar_cache"] and not os.path.isdir(
+ os.path.join(self.opts["cachedir"], "pillar_cache")
+ ):
try:
with salt.utils.files.set_umask(0o077):
- os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
+ os.mkdir(os.path.join(self.opts["cachedir"], "pillar_cache"))
except OSError:
pass
- if self.opts.get('git_pillar_verify_config', True):
+ if self.opts.get("git_pillar_verify_config", True):
try:
git_pillars = [
- x for x in self.opts.get('ext_pillar', [])
- if 'git' in x
- and not isinstance(x['git'], six.string_types)
+ x
+ for x in self.opts.get("ext_pillar", [])
+ if "git" in x and not isinstance(x["git"], six.string_types)
]
except TypeError:
git_pillars = []
critical_errors.append(
- 'Invalid ext_pillar configuration. It is likely that the '
- 'external pillar type was not specified for one or more '
- 'external pillars.'
+ "Invalid ext_pillar configuration. It is likely that the "
+ "external pillar type was not specified for one or more "
+ "external pillars."
)
if git_pillars:
try:
new_opts = copy.deepcopy(self.opts)
import salt.pillar.git_pillar
+
for repo in git_pillars:
- new_opts['ext_pillar'] = [repo]
+ new_opts["ext_pillar"] = [repo]
try:
git_pillar = salt.utils.gitfs.GitPillar(
new_opts,
- repo['git'],
+ repo["git"],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
- global_only=salt.pillar.git_pillar.GLOBAL_ONLY)
+ global_only=salt.pillar.git_pillar.GLOBAL_ONLY,
+ )
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
@@ -641,15 +677,15 @@ class Master(SMaster):
log.error(error)
for error in critical_errors:
log.critical(error)
- log.critical('Master failed pre flight checks, exiting\n')
+ log.critical("Master failed pre flight checks, exiting\n")
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def start(self):
- '''
+ """
Turn on the master server components
- '''
+ """
self._pre_flight()
- log.info('salt-master is starting as user \'%s\'', salt.utils.user.get_user())
+ log.info("salt-master is starting as user '%s'", salt.utils.user.get_user())
enable_sigusr1_handler()
enable_sigusr2_handler()
@@ -663,105 +699,119 @@ class Master(SMaster):
# Setup the secrets here because the PubServerChannel may need
# them as well.
- SMaster.secrets['aes'] = {
- 'secret': multiprocessing.Array(
+ SMaster.secrets["aes"] = {
+ "secret": multiprocessing.Array(
ctypes.c_char,
salt.utils.stringutils.to_bytes(
salt.crypt.Crypticle.generate_key_string()
- )
+ ),
),
- 'reload': salt.crypt.Crypticle.generate_key_string
+ "reload": salt.crypt.Crypticle.generate_key_string,
}
- log.info('Creating master process manager')
+ log.info("Creating master process manager")
# Since there are children having their own ProcessManager we should wait for kill more time.
self.process_manager = salt.utils.process.ProcessManager(wait_for_kill=5)
pub_channels = []
- log.info('Creating master publisher process')
+ log.info("Creating master publisher process")
log_queue = salt.log.setup.get_multiprocessing_logging_queue()
for _, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
- chan.pre_fork(self.process_manager, kwargs={'log_queue': log_queue})
+ chan.pre_fork(self.process_manager, kwargs={"log_queue": log_queue})
pub_channels.append(chan)
- log.info('Creating master event publisher process')
- self.process_manager.add_process(salt.utils.event.EventPublisher, args=(self.opts,))
+ log.info("Creating master event publisher process")
+ self.process_manager.add_process(
+ salt.utils.event.EventPublisher, args=(self.opts,)
+ )
- if self.opts.get('reactor'):
- if isinstance(self.opts['engines'], list):
+ if self.opts.get("reactor"):
+ if isinstance(self.opts["engines"], list):
rine = False
- for item in self.opts['engines']:
- if 'reactor' in item:
+ for item in self.opts["engines"]:
+ if "reactor" in item:
rine = True
break
if not rine:
- self.opts['engines'].append({'reactor': {}})
+ self.opts["engines"].append({"reactor": {}})
else:
- if 'reactor' not in self.opts['engines']:
- log.info('Enabling the reactor engine')
- self.opts['engines']['reactor'] = {}
+ if "reactor" not in self.opts["engines"]:
+ log.info("Enabling the reactor engine")
+ self.opts["engines"]["reactor"] = {}
salt.engines.start_engines(self.opts, self.process_manager)
# must be after channels
- log.info('Creating master maintenance process')
+ log.info("Creating master maintenance process")
self.process_manager.add_process(Maintenance, args=(self.opts,))
- if self.opts.get('event_return'):
- log.info('Creating master event return process')
- self.process_manager.add_process(salt.utils.event.EventReturn, args=(self.opts,))
+ if self.opts.get("event_return"):
+ log.info("Creating master event return process")
+ self.process_manager.add_process(
+ salt.utils.event.EventReturn, args=(self.opts,)
+ )
- ext_procs = self.opts.get('ext_processes', [])
+ ext_procs = self.opts.get("ext_processes", [])
for proc in ext_procs:
- log.info('Creating ext_processes process: %s', proc)
+ log.info("Creating ext_processes process: %s", proc)
try:
- mod = '.'.join(proc.split('.')[:-1])
- cls = proc.split('.')[-1]
+ mod = ".".join(proc.split(".")[:-1])
+ cls = proc.split(".")[-1]
_tmp = __import__(mod, globals(), locals(), [cls], -1)
cls = _tmp.__getattribute__(cls)
self.process_manager.add_process(cls, args=(self.opts,))
except Exception: # pylint: disable=broad-except
- log.error('Error creating ext_processes process: %s', proc)
+ log.error("Error creating ext_processes process: %s", proc)
- if HAS_HALITE and 'halite' in self.opts:
- log.info('Creating master halite process')
- self.process_manager.add_process(Halite, args=(self.opts['halite'],))
+ if HAS_HALITE and "halite" in self.opts:
+ log.info("Creating master halite process")
+ self.process_manager.add_process(Halite, args=(self.opts["halite"],))
# TODO: remove, or at least push into the transport stuff (pre-fork probably makes sense there)
- if self.opts['con_cache']:
- log.info('Creating master concache process')
- self.process_manager.add_process(salt.utils.master.ConnectedCache, args=(self.opts,))
+ if self.opts["con_cache"]:
+ log.info("Creating master concache process")
+ self.process_manager.add_process(
+ salt.utils.master.ConnectedCache, args=(self.opts,)
+ )
# workaround for issue #16315, race condition
- log.debug('Sleeping for two seconds to let concache rest')
+ log.debug("Sleeping for two seconds to let concache rest")
time.sleep(2)
- log.info('Creating master request server process')
+ log.info("Creating master request server process")
kwargs = {}
if salt.utils.platform.is_windows():
- kwargs['log_queue'] = log_queue
- kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level()
- kwargs['secrets'] = SMaster.secrets
+ kwargs["log_queue"] = log_queue
+ kwargs[
+ "log_queue_level"
+ ] = salt.log.setup.get_multiprocessing_logging_level()
+ kwargs["secrets"] = SMaster.secrets
self.process_manager.add_process(
ReqServer,
args=(self.opts, self.key, self.master_key),
kwargs=kwargs,
- name='ReqServer')
+ name="ReqServer",
+ )
- self.process_manager.add_process(
- FileserverUpdate,
- args=(self.opts,))
+ self.process_manager.add_process(FileserverUpdate, args=(self.opts,))
# Fire up SSDP discovery publisher
- if self.opts['discovery']:
+ if self.opts["discovery"]:
if salt.utils.ssdp.SSDPDiscoveryServer.is_available():
- self.process_manager.add_process(salt.utils.ssdp.SSDPDiscoveryServer(
- port=self.opts['discovery']['port'],
- listen_ip=self.opts['interface'],
- answer={'mapping': self.opts['discovery'].get('mapping', {})}).run)
+ self.process_manager.add_process(
+ salt.utils.ssdp.SSDPDiscoveryServer(
+ port=self.opts["discovery"]["port"],
+ listen_ip=self.opts["interface"],
+ answer={
+ "mapping": self.opts["discovery"].get("mapping", {})
+ },
+ ).run
+ )
else:
- log.error('Unable to load SSDP: asynchronous IO is not available.')
+ log.error("Unable to load SSDP: asynchronous IO is not available.")
if sys.version_info.major == 2:
- log.error('You are using Python 2, please install "trollius" module to enable SSDP discovery.')
+ log.error(
+ 'You are using Python 2, please install "trollius" module to enable SSDP discovery.'
+ )
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
@@ -785,15 +835,16 @@ class Master(SMaster):
class Halite(salt.utils.process.SignalHandlingProcess):
- '''
+ """
Manage the Halite server
- '''
+ """
+
def __init__(self, hopts, **kwargs):
- '''
+ """
Create a halite instance
:param dict hopts: The halite options
- '''
+ """
super(Halite, self).__init__(**kwargs)
self.hopts = hopts
@@ -802,33 +853,34 @@ class Halite(salt.utils.process.SignalHandlingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
- state['hopts'],
- log_queue=state['log_queue'],
- log_queue_level=state['log_queue_level']
+ state["hopts"],
+ log_queue=state["log_queue"],
+ log_queue_level=state["log_queue_level"],
)
def __getstate__(self):
return {
- 'hopts': self.hopts,
- 'log_queue': self.log_queue,
- 'log_queue_level': self.log_queue_level
+ "hopts": self.hopts,
+ "log_queue": self.log_queue,
+ "log_queue_level": self.log_queue_level,
}
def run(self):
- '''
+ """
Fire up halite!
- '''
+ """
salt.utils.process.appendproctitle(self.__class__.__name__)
halite.start(self.hopts)
class ReqServer(salt.utils.process.SignalHandlingProcess):
- '''
+ """
Starts up the master request server, minions send results to this
interface.
- '''
+ """
+
def __init__(self, opts, key, mkey, secrets=None, **kwargs):
- '''
+ """
Create a request server
:param dict opts: The salt options dictionary
@@ -837,7 +889,7 @@ class ReqServer(salt.utils.process.SignalHandlingProcess):
:rtype: ReqServer
:returns: Request server
- '''
+ """
super(ReqServer, self).__init__(**kwargs)
self.opts = opts
self.master_key = mkey
@@ -850,22 +902,22 @@ class ReqServer(salt.utils.process.SignalHandlingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
- state['opts'],
- state['key'],
- state['mkey'],
- secrets=state['secrets'],
- log_queue=state['log_queue'],
- log_queue_level=state['log_queue_level']
+ state["opts"],
+ state["key"],
+ state["mkey"],
+ secrets=state["secrets"],
+ log_queue=state["log_queue"],
+ log_queue_level=state["log_queue_level"],
)
def __getstate__(self):
return {
- 'opts': self.opts,
- 'key': self.key,
- 'mkey': self.master_key,
- 'secrets': self.secrets,
- 'log_queue': self.log_queue,
- 'log_queue_level': self.log_queue_level
+ "opts": self.opts,
+ "key": self.key,
+ "mkey": self.master_key,
+ "secrets": self.secrets,
+ "log_queue": self.log_queue,
+ "log_queue_level": self.log_queue_level,
}
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
@@ -873,9 +925,9 @@ class ReqServer(salt.utils.process.SignalHandlingProcess):
super(ReqServer, self)._handle_signals(signum, sigframe)
def __bind(self):
- '''
+ """
Binds the reply server
- '''
+ """
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
if self.log_queue_level is not None:
@@ -884,7 +936,7 @@ class ReqServer(salt.utils.process.SignalHandlingProcess):
if self.secrets is not None:
SMaster.secrets = self.secrets
- dfn = os.path.join(self.opts['cachedir'], '.dfn')
+ dfn = os.path.join(self.opts["cachedir"], ".dfn")
if os.path.isfile(dfn):
try:
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
@@ -895,8 +947,9 @@ class ReqServer(salt.utils.process.SignalHandlingProcess):
pass
# Wait for kill should be less then parent's ProcessManager.
- self.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager',
- wait_for_kill=1)
+ self.process_manager = salt.utils.process.ProcessManager(
+ name="ReqServer_ProcessManager", wait_for_kill=1
+ )
req_channels = []
tcp_only = True
@@ -904,45 +957,45 @@ class ReqServer(salt.utils.process.SignalHandlingProcess):
chan = salt.transport.server.ReqServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
req_channels.append(chan)
- if transport != 'tcp':
+ if transport != "tcp":
tcp_only = False
kwargs = {}
if salt.utils.platform.is_windows():
- kwargs['log_queue'] = self.log_queue
- kwargs['log_queue_level'] = self.log_queue_level
+ kwargs["log_queue"] = self.log_queue
+ kwargs["log_queue_level"] = self.log_queue_level
# Use one worker thread if only the TCP transport is set up on
# Windows and we are using Python 2. There is load balancer
# support on Windows for the TCP transport when using Python 3.
- if tcp_only and six.PY2 and int(self.opts['worker_threads']) != 1:
- log.warning('TCP transport supports only 1 worker on Windows '
- 'when using Python 2.')
- self.opts['worker_threads'] = 1
+ if tcp_only and six.PY2 and int(self.opts["worker_threads"]) != 1:
+ log.warning(
+ "TCP transport supports only 1 worker on Windows "
+ "when using Python 2."
+ )
+ self.opts["worker_threads"] = 1
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
- for ind in range(int(self.opts['worker_threads'])):
- name = 'MWorker-{0}'.format(ind)
- self.process_manager.add_process(MWorker,
- args=(self.opts,
- self.master_key,
- self.key,
- req_channels,
- name),
- kwargs=kwargs,
- name=name)
+ for ind in range(int(self.opts["worker_threads"])):
+ name = "MWorker-{0}".format(ind)
+ self.process_manager.add_process(
+ MWorker,
+ args=(self.opts, self.master_key, self.key, req_channels, name),
+ kwargs=kwargs,
+ name=name,
+ )
self.process_manager.run()
def run(self):
- '''
+ """
Start up the ReqServer
- '''
+ """
self.__bind()
def destroy(self, signum=signal.SIGTERM):
- if hasattr(self, 'process_manager'):
+ if hasattr(self, "process_manager"):
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
self.process_manager.kill_children()
@@ -950,22 +1003,18 @@ class ReqServer(salt.utils.process.SignalHandlingProcess):
# pylint: disable=W1701
def __del__(self):
self.destroy()
+
# pylint: enable=W1701
class MWorker(salt.utils.process.SignalHandlingProcess):
- '''
+ """
The worker multiprocess instance to manage the backend operations for the
salt master.
- '''
- def __init__(self,
- opts,
- mkey,
- key,
- req_channels,
- name,
- **kwargs):
- '''
+ """
+
+ def __init__(self, opts, mkey, key, req_channels, name, **kwargs):
+ """
Create a salt master worker process
:param dict opts: The salt options
@@ -974,8 +1023,8 @@ class MWorker(salt.utils.process.SignalHandlingProcess):
:rtype: MWorker
:return: Master worker
- '''
- kwargs['name'] = name
+ """
+ kwargs["name"] = name
self.name = name
super(MWorker, self).__init__(**kwargs)
self.opts = opts
@@ -984,7 +1033,7 @@ class MWorker(salt.utils.process.SignalHandlingProcess):
self.mkey = mkey
self.key = key
self.k_mtime = 0
- self.stats = collections.defaultdict(lambda: {'mean': 0, 'runs': 0})
+ self.stats = collections.defaultdict(lambda: {"mean": 0, "runs": 0})
self.stat_clock = time.time()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
@@ -994,43 +1043,44 @@ class MWorker(salt.utils.process.SignalHandlingProcess):
# non-Windows platforms.
def __setstate__(self, state):
super(MWorker, self).__init__(
- log_queue=state['log_queue'],
- log_queue_level=state['log_queue_level']
+ log_queue=state["log_queue"], log_queue_level=state["log_queue_level"]
)
- self.opts = state['opts']
- self.req_channels = state['req_channels']
- self.mkey = state['mkey']
- self.key = state['key']
- self.k_mtime = state['k_mtime']
- SMaster.secrets = state['secrets']
+ self.opts = state["opts"]
+ self.req_channels = state["req_channels"]
+ self.mkey = state["mkey"]
+ self.key = state["key"]
+ self.k_mtime = state["k_mtime"]
+ SMaster.secrets = state["secrets"]
def __getstate__(self):
return {
- 'opts': self.opts,
- 'req_channels': self.req_channels,
- 'mkey': self.mkey,
- 'key': self.key,
- 'k_mtime': self.k_mtime,
- 'secrets': SMaster.secrets,
- 'log_queue': self.log_queue,
- 'log_queue_level': self.log_queue_level
+ "opts": self.opts,
+ "req_channels": self.req_channels,
+ "mkey": self.mkey,
+ "key": self.key,
+ "k_mtime": self.k_mtime,
+ "secrets": SMaster.secrets,
+ "log_queue": self.log_queue,
+ "log_queue_level": self.log_queue_level,
}
def _handle_signals(self, signum, sigframe):
- for channel in getattr(self, 'req_channels', ()):
+ for channel in getattr(self, "req_channels", ()):
channel.close()
super(MWorker, self)._handle_signals(signum, sigframe)
def __bind(self):
- '''
+ """
Bind to the local port
- '''
+ """
# using ZMQIOLoop since we *might* need zmq in there
install_zmq()
self.io_loop = ZMQDefaultLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
- req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily?
+ req_channel.post_fork(
+ self._handle_payload, io_loop=self.io_loop
+ ) # TODO: cleaner? Maybe lazily?
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
@@ -1039,7 +1089,7 @@ class MWorker(salt.utils.process.SignalHandlingProcess):
@salt.ext.tornado.gen.coroutine
def _handle_payload(self, payload):
- '''
+ """
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
@@ -1058,86 +1108,91 @@ class MWorker(salt.utils.process.SignalHandlingProcess):
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
- '''
- key = payload['enc']
- load = payload['load']
- ret = {'aes': self._handle_aes,
- 'clear': self._handle_clear}[key](load)
+ """
+ key = payload["enc"]
+ load = payload["load"]
+ ret = {"aes": self._handle_aes, "clear": self._handle_clear}[key](load)
raise salt.ext.tornado.gen.Return(ret)
def _post_stats(self, start, cmd):
- '''
+ """
Calculate the master stats and fire events with stat info
- '''
+ """
end = time.time()
duration = end - start
- self.stats[cmd]['mean'] = (self.stats[cmd]['mean'] * (self.stats[cmd]['runs'] - 1) + duration) / self.stats[cmd]['runs']
- if end - self.stat_clock > self.opts['master_stats_event_iter']:
+ self.stats[cmd]["mean"] = (
+ self.stats[cmd]["mean"] * (self.stats[cmd]["runs"] - 1) + duration
+ ) / self.stats[cmd]["runs"]
+ if end - self.stat_clock > self.opts["master_stats_event_iter"]:
# Fire the event with the stats and wipe the tracker
- self.aes_funcs.event.fire_event({'time': end - self.stat_clock, 'worker': self.name, 'stats': self.stats}, tagify(self.name, 'stats'))
- self.stats = collections.defaultdict(lambda: {'mean': 0, 'runs': 0})
+ self.aes_funcs.event.fire_event(
+ {
+ "time": end - self.stat_clock,
+ "worker": self.name,
+ "stats": self.stats,
+ },
+ tagify(self.name, "stats"),
+ )
+ self.stats = collections.defaultdict(lambda: {"mean": 0, "runs": 0})
self.stat_clock = end
def _handle_clear(self, load):
- '''
+ """
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
- '''
- log.trace('Clear payload received with command %s', load['cmd'])
- cmd = load['cmd']
- if cmd.startswith('__'):
+ """
+ log.trace("Clear payload received with command %s", load["cmd"])
+ cmd = load["cmd"]
+ if cmd.startswith("__"):
return False
- if self.opts['master_stats']:
+ if self.opts["master_stats"]:
start = time.time()
- self.stats[cmd]['runs'] += 1
- ret = getattr(self.clear_funcs, cmd)(load), {'fun': 'send_clear'}
- if self.opts['master_stats']:
+ self.stats[cmd]["runs"] += 1
+ ret = getattr(self.clear_funcs, cmd)(load), {"fun": "send_clear"}
+ if self.opts["master_stats"]:
self._post_stats(start, cmd)
return ret
def _handle_aes(self, data):
- '''
+ """
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
- '''
- if 'cmd' not in data:
- log.error('Received malformed command %s', data)
+ """
+ if "cmd" not in data:
+ log.error("Received malformed command %s", data)
return {}
- cmd = data['cmd']
- log.trace('AES payload received with command %s', data['cmd'])
- if cmd.startswith('__'):
+ cmd = data["cmd"]
+ log.trace("AES payload received with command %s", data["cmd"])
+ if cmd.startswith("__"):
return False
- if self.opts['master_stats']:
+ if self.opts["master_stats"]:
start = time.time()
- self.stats[cmd]['runs'] += 1
+ self.stats[cmd]["runs"] += 1
def run_func(data):
- return self.aes_funcs.run_func(data['cmd'], data)
+ return self.aes_funcs.run_func(data["cmd"], data)
- with StackContext(functools.partial(RequestContext,
- {'data': data,
- 'opts': self.opts})):
+ with StackContext(
+ functools.partial(RequestContext, {"data": data, "opts": self.opts})
+ ):
ret = run_func(data)
- if self.opts['master_stats']:
+ if self.opts["master_stats"]:
self._post_stats(start, cmd)
return ret
def run(self):
- '''
+ """
Start a Master Worker
- '''
+ """
salt.utils.process.appendproctitle(self.name)
- self.clear_funcs = ClearFuncs(
- self.opts,
- self.key,
- )
+ self.clear_funcs = ClearFuncs(self.opts, self.key,)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
@@ -1145,42 +1200,43 @@ class MWorker(salt.utils.process.SignalHandlingProcess):
# TODO: rename? No longer tied to "AES", just "encrypted" or "private" requests
class AESFuncs(object):
- '''
+ """
Set up functions that are available when the load is encrypted with AES
- '''
+ """
+
# The AES Functions:
#
def __init__(self, opts):
- '''
+ """
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
- '''
+ """
self.opts = opts
- self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
+ self.event = salt.utils.event.get_master_event(
+ self.opts, self.opts["sock_dir"], listen=False
+ )
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
- self.local = salt.client.get_local_client(self.opts['conf_file'])
+ self.local = salt.client.get_local_client(self.opts["conf_file"])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
- self.opts,
- states=False,
- rend=False,
- ignore_config_errors=True
+ self.opts, states=False, rend=False, ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
- '''
+ """
Set the local file objects from the file server interface
- '''
+ """
# Avoid circular import
import salt.fileserver
+
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
@@ -1193,7 +1249,7 @@ class AESFuncs(object):
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
- '''
+ """
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
@@ -1202,36 +1258,37 @@ class AESFuncs(object):
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
- '''
+ """
if not salt.utils.verify.valid_id(self.opts, id_):
return False
- pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
+ pub_path = os.path.join(self.opts["pki_dir"], "minions", id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
- 'Salt minion claiming to be %s attempted to communicate with '
- 'master, but key could not be read and verification was denied.',
- id_
+ "Salt minion claiming to be %s attempted to communicate with "
+ "master, but key could not be read and verification was denied.",
+ id_,
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
- if salt.crypt.public_decrypt(pub, token) == b'salt':
+ if salt.crypt.public_decrypt(pub, token) == b"salt":
return True
except ValueError as err:
- log.error('Unable to decrypt token: %s', err)
+ log.error("Unable to decrypt token: %s", err)
log.error(
- 'Salt minion claiming to be %s has attempted to communicate with '
- 'the master and could not be verified', id_
+ "Salt minion claiming to be %s has attempted to communicate with "
+ "the master and could not be verified",
+ id_,
)
return False
def verify_minion(self, id_, token):
- '''
+ """
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
@@ -1240,63 +1297,67 @@ class AESFuncs(object):
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
- '''
+ """
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
- '''
+ """
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
- '''
+ """
# Verify that the load is valid
- if 'peer' not in self.opts:
+ if "peer" not in self.opts:
return False
- if not isinstance(self.opts['peer'], dict):
+ if not isinstance(self.opts["peer"], dict):
return False
- if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
+ if any(
+ key not in clear_load for key in ("fun", "arg", "tgt", "ret", "tok", "id")
+ ):
return False
# If the command will make a recursive publish don't run
- if clear_load['fun'].startswith('publish.'):
+ if clear_load["fun"].startswith("publish."):
return False
# Check the permissions for this minion
- if not self.__verify_minion(clear_load['id'], clear_load['tok']):
+ if not self.__verify_minion(clear_load["id"], clear_load["tok"]):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
- 'Minion id %s is not who it says it is and is attempting '
- 'to issue a peer command', clear_load['id']
+ "Minion id %s is not who it says it is and is attempting "
+ "to issue a peer command",
+ clear_load["id"],
)
return False
- clear_load.pop('tok')
+ clear_load.pop("tok")
perms = []
- for match in self.opts['peer']:
- if re.match(match, clear_load['id']):
+ for match in self.opts["peer"]:
+ if re.match(match, clear_load["id"]):
# This is the list of funcs/modules!
- if isinstance(self.opts['peer'][match], list):
- perms.extend(self.opts['peer'][match])
- if ',' in clear_load['fun']:
+ if isinstance(self.opts["peer"][match], list):
+ perms.extend(self.opts["peer"][match])
+ if "," in clear_load["fun"]:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
- clear_load['fun'] = clear_load['fun'].split(',')
+ clear_load["fun"] = clear_load["fun"].split(",")
arg_ = []
- for arg in clear_load['arg']:
+ for arg in clear_load["arg"]:
arg_.append(arg.split())
- clear_load['arg'] = arg_
+ clear_load["arg"] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
- clear_load['fun'],
- clear_load['arg'],
- clear_load['tgt'],
- clear_load.get('tgt_type', 'glob'),
- publish_validate=True)
+ clear_load["fun"],
+ clear_load["arg"],
+ clear_load["tgt"],
+ clear_load.get("tgt_type", "glob"),
+ publish_validate=True,
+ )
def __verify_load(self, load, verify_keys):
- '''
+ """
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
@@ -1307,35 +1368,37 @@ class AESFuncs(object):
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
- '''
+ """
if any(key not in load for key in verify_keys):
return False
- if 'tok' not in load:
+ if "tok" not in load:
log.error(
- 'Received incomplete call from %s for \'%s\', missing \'%s\'',
- load['id'], inspect_stack()['co_name'], 'tok'
+ "Received incomplete call from %s for '%s', missing '%s'",
+ load["id"],
+ inspect_stack()["co_name"],
+ "tok",
)
return False
- if not self.__verify_minion(load['id'], load['tok']):
+ if not self.__verify_minion(load["id"], load["tok"]):
# The minion is not who it says it is!
# We don't want to listen to it!
- log.warning('Minion id %s is not who it says it is!', load['id'])
+ log.warning("Minion id %s is not who it says it is!", load["id"])
return False
- if 'tok' in load:
- load.pop('tok')
+ if "tok" in load:
+ load.pop("tok")
return load
def _master_tops(self, load):
- '''
+ """
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
- '''
- load = self.__verify_load(load, ('id', 'tok'))
+ """
+ load = self.__verify_load(load, ("id", "tok"))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
@@ -1344,138 +1407,142 @@ class AESFuncs(object):
_ext_nodes = _master_tops
def _master_opts(self, load):
- '''
+ """
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
- '''
+ """
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
- mopts['file_roots'] = file_roots
- mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
- mopts['env_order'] = self.opts['env_order']
- mopts['default_top'] = self.opts['default_top']
- if load.get('env_only'):
+ mopts["file_roots"] = file_roots
+ mopts["top_file_merging_strategy"] = self.opts["top_file_merging_strategy"]
+ mopts["env_order"] = self.opts["env_order"]
+ mopts["default_top"] = self.opts["default_top"]
+ if load.get("env_only"):
return mopts
- mopts['renderer'] = self.opts['renderer']
- mopts['failhard'] = self.opts['failhard']
- mopts['state_top'] = self.opts['state_top']
- mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
- mopts['nodegroups'] = self.opts['nodegroups']
- mopts['state_auto_order'] = self.opts['state_auto_order']
- mopts['state_events'] = self.opts['state_events']
- mopts['state_aggregate'] = self.opts['state_aggregate']
- mopts['jinja_env'] = self.opts['jinja_env']
- mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
- mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
- mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
+ mopts["renderer"] = self.opts["renderer"]
+ mopts["failhard"] = self.opts["failhard"]
+ mopts["state_top"] = self.opts["state_top"]
+ mopts["state_top_saltenv"] = self.opts["state_top_saltenv"]
+ mopts["nodegroups"] = self.opts["nodegroups"]
+ mopts["state_auto_order"] = self.opts["state_auto_order"]
+ mopts["state_events"] = self.opts["state_events"]
+ mopts["state_aggregate"] = self.opts["state_aggregate"]
+ mopts["jinja_env"] = self.opts["jinja_env"]
+ mopts["jinja_sls_env"] = self.opts["jinja_sls_env"]
+ mopts["jinja_lstrip_blocks"] = self.opts["jinja_lstrip_blocks"]
+ mopts["jinja_trim_blocks"] = self.opts["jinja_trim_blocks"]
return mopts
def _mine_get(self, load):
- '''
+ """
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
- '''
- load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
+ """
+ load = self.__verify_load(load, ("id", "tgt", "fun", "tok"))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
- '''
+ """
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
- '''
- load = self.__verify_load(load, ('id', 'data', 'tok'))
+ """
+ load = self.__verify_load(load, ("id", "data", "tok"))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
- '''
+ """
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
- '''
- load = self.__verify_load(load, ('id', 'fun', 'tok'))
+ """
+ load = self.__verify_load(load, ("id", "fun", "tok"))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
- '''
+ """
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
- '''
- load = self.__verify_load(load, ('id', 'tok'))
+ """
+ load = self.__verify_load(load, ("id", "tok"))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
- '''
+ """
Allows minions to send files to the master, files are sent to the
master file cache
- '''
- if any(key not in load for key in ('id', 'path', 'loc')):
+ """
+ if any(key not in load for key in ("id", "path", "loc")):
return False
- if not isinstance(load['path'], list):
+ if not isinstance(load["path"], list):
return False
- if not self.opts['file_recv']:
+ if not self.opts["file_recv"]:
return False
- if not salt.utils.verify.valid_id(self.opts, load['id']):
+ if not salt.utils.verify.valid_id(self.opts, load["id"]):
return False
- file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size']
+ file_recv_max_size = 1024 * 1024 * self.opts["file_recv_max_size"]
- if 'loc' in load and load['loc'] < 0:
- log.error('Invalid file pointer: load[loc] < 0')
+ if "loc" in load and load["loc"] < 0:
+ log.error("Invalid file pointer: load[loc] < 0")
return False
- if len(load['data']) + load.get('loc', 0) > file_recv_max_size:
+ if len(load["data"]) + load.get("loc", 0) > file_recv_max_size:
log.error(
- 'file_recv_max_size limit of %d MB exceeded! %s will be '
- 'truncated. To successfully push this file, adjust '
- 'file_recv_max_size to an integer (in MB) large enough to '
- 'accommodate it.', file_recv_max_size, load['path']
+ "file_recv_max_size limit of %d MB exceeded! %s will be "
+ "truncated. To successfully push this file, adjust "
+ "file_recv_max_size to an integer (in MB) large enough to "
+ "accommodate it.",
+ file_recv_max_size,
+ load["path"],
)
return False
- if 'tok' not in load:
+ if "tok" not in load:
log.error(
- 'Received incomplete call from %s for \'%s\', missing \'%s\'',
- load['id'], inspect_stack()['co_name'], 'tok'
+ "Received incomplete call from %s for '%s', missing '%s'",
+ load["id"],
+ inspect_stack()["co_name"],
+ "tok",
)
return False
- if not self.__verify_minion(load['id'], load['tok']):
+ if not self.__verify_minion(load["id"], load["tok"]):
# The minion is not who it says it is!
# We don't want to listen to it!
- log.warning('Minion id %s is not who it says it is!', load['id'])
+ log.warning("Minion id %s is not who it says it is!", load["id"])
return {}
- load.pop('tok')
+ load.pop("tok")
# Join path
- sep_path = os.sep.join(load['path'])
+ sep_path = os.sep.join(load["path"])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
@@ -1483,21 +1550,19 @@ class AESFuncs(object):
# Ensure that this safety check is done after the path
# have been normalized.
- if os.path.isabs(normpath) or '../' in load['path']:
+ if os.path.isabs(normpath) or "../" in load["path"]:
# Can overwrite master files!!
return False
cpath = os.path.join(
- self.opts['cachedir'],
- 'minions',
- load['id'],
- 'files',
- normpath)
+ self.opts["cachedir"], "minions", load["id"], "files", normpath
+ )
# One last safety check here
- if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
+ if not os.path.normpath(cpath).startswith(self.opts["cachedir"]):
log.warning(
- 'Attempt to write received file outside of master cache '
- 'directory! Requested path: %s. Access denied.', cpath
+ "Attempt to write received file outside of master cache "
+ "directory! Requested path: %s. Access denied.",
+ cpath,
)
return False
cdir = os.path.dirname(cpath)
@@ -1506,60 +1571,65 @@ class AESFuncs(object):
os.makedirs(cdir)
except os.error:
pass
- if os.path.isfile(cpath) and load['loc'] != 0:
- mode = 'ab'
+ if os.path.isfile(cpath) and load["loc"] != 0:
+ mode = "ab"
else:
- mode = 'wb'
+ mode = "wb"
with salt.utils.files.fopen(cpath, mode) as fp_:
- if load['loc']:
- fp_.seek(load['loc'])
+ if load["loc"]:
+ fp_.seek(load["loc"])
- fp_.write(salt.utils.stringutils.to_bytes(load['data']))
+ fp_.write(salt.utils.stringutils.to_bytes(load["data"]))
return True
def _pillar(self, load):
- '''
+ """
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
- '''
- if any(key not in load for key in ('id', 'grains')):
+ """
+ if any(key not in load for key in ("id", "grains")):
return False
- if not salt.utils.verify.valid_id(self.opts, load['id']):
+ if not salt.utils.verify.valid_id(self.opts, load["id"]):
return False
- load['grains']['id'] = load['id']
+ load["grains"]["id"] = load["id"]
pillar = salt.pillar.get_pillar(
self.opts,
- load['grains'],
- load['id'],
- load.get('saltenv', load.get('env')),
- ext=load.get('ext'),
- pillar_override=load.get('pillar_override', {}),
- pillarenv=load.get('pillarenv'),
- extra_minion_data=load.get('extra_minion_data'))
+ load["grains"],
+ load["id"],
+ load.get("saltenv", load.get("env")),
+ ext=load.get("ext"),
+ pillar_override=load.get("pillar_override", {}),
+ pillarenv=load.get("pillarenv"),
+ extra_minion_data=load.get("extra_minion_data"),
+ )
data = pillar.compile_pillar()
self.fs_.update_opts()
- if self.opts.get('minion_data_cache', False):
- self.masterapi.cache.store('minions/{0}'.format(load['id']),
- 'data',
- {'grains': load['grains'],
- 'pillar': data})
- if self.opts.get('minion_data_cache_events') is True:
- self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
+ if self.opts.get("minion_data_cache", False):
+ self.masterapi.cache.store(
+ "minions/{0}".format(load["id"]),
+ "data",
+ {"grains": load["grains"], "pillar": data},
+ )
+ if self.opts.get("minion_data_cache_events") is True:
+ self.event.fire_event(
+ {"Minion data cache refresh": load["id"]},
+ tagify(load["id"], "refresh", "minion"),
+ )
return data
def _minion_event(self, load):
- '''
+ """
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
- '''
- load = self.__verify_load(load, ('id', 'tok'))
+ """
+ load = self.__verify_load(load, ("id", "tok"))
if load is False:
return {}
# Route to master event bus
@@ -1568,38 +1638,33 @@ class AESFuncs(object):
self._handle_minion_event(load)
def _handle_minion_event(self, load):
- '''
+ """
Act on specific events from minions
- '''
- id_ = load['id']
- if load.get('tag', '') == '_salt_error':
+ """
+ id_ = load["id"]
+ if load.get("tag", "") == "_salt_error":
log.error(
- 'Received minion error from [%s]: %s',
- id_, load['data']['message']
+ "Received minion error from [%s]: %s", id_, load["data"]["message"]
)
- for event in load.get('events', []):
- event_data = event.get('data', {})
- if 'minions' in event_data:
- jid = event_data.get('jid')
+ for event in load.get("events", []):
+ event_data = event.get("data", {})
+ if "minions" in event_data:
+ jid = event_data.get("jid")
if not jid:
continue
- minions = event_data['minions']
+ minions = event_data["minions"]
try:
salt.utils.job.store_minions(
- self.opts,
- jid,
- minions,
- mminion=self.mminion,
- syndic_id=id_)
+ self.opts, jid, minions, mminion=self.mminion, syndic_id=id_
+ )
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
- 'Could not add minion(s) %s for job %s: %s',
- minions, jid, exc
+ "Could not add minion(s) %s for job %s: %s", minions, jid, exc
)
def _return(self, load):
- '''
+ """
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
@@ -1607,100 +1672,110 @@ class AESFuncs(object):
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
- '''
- if self.opts['require_minion_sign_messages'] and 'sig' not in load:
+ """
+ if self.opts["require_minion_sign_messages"] and "sig" not in load:
log.critical(
- '_return: Master is requiring minions to sign their '
- 'messages, but there is no signature in this payload from '
- '%s.', load['id']
+ "_return: Master is requiring minions to sign their "
+ "messages, but there is no signature in this payload from "
+ "%s.",
+ load["id"],
)
return False
- if 'sig' in load:
- log.trace('Verifying signed event publish from minion')
- sig = load.pop('sig')
- this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
+ if "sig" in load:
+ log.trace("Verifying signed event publish from minion")
+ sig = load.pop("sig")
+ this_minion_pubkey = os.path.join(
+ self.opts["pki_dir"], "minions/{0}".format(load["id"])
+ )
serialized_load = salt.serializers.msgpack.serialize(load)
- if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
- log.info('Failed to verify event signature from minion %s.', load['id'])
- if self.opts['drop_messages_signature_fail']:
+ if not salt.crypt.verify_signature(
+ this_minion_pubkey, serialized_load, sig
+ ):
+ log.info("Failed to verify event signature from minion %s.", load["id"])
+ if self.opts["drop_messages_signature_fail"]:
log.critical(
- 'drop_messages_signature_fail is enabled, dropping '
- 'message from %s', load['id']
+ "drop_messages_signature_fail is enabled, dropping "
+ "message from %s",
+ load["id"],
)
return False
else:
- log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
- load['sig'] = sig
+ log.info(
+ "But 'drop_message_signature_fail' is disabled, so message is still accepted."
+ )
+ load["sig"] = sig
try:
salt.utils.job.store_job(
- self.opts, load, event=self.event, mminion=self.mminion)
+ self.opts, load, event=self.event, mminion=self.mminion
+ )
except salt.exceptions.SaltCacheError:
- log.error('Could not store job information for load: %s', load)
+ log.error("Could not store job information for load: %s", load)
def _syndic_return(self, load):
- '''
+ """
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
- '''
- loads = load.get('load')
+ """
+ loads = load.get("load")
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
- if any(key not in load for key in ('return', 'jid', 'id')):
+ if any(key not in load for key in ("return", "jid", "id")):
continue
# if we have a load, save it
- if load.get('load'):
- fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
- self.mminion.returners[fstr](load['jid'], load['load'])
+ if load.get("load"):
+ fstr = "{0}.save_load".format(self.opts["master_job_cache"])
+ self.mminion.returners[fstr](load["jid"], load["load"])
# Register the syndic
- syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
+ syndic_cache_path = os.path.join(
+ self.opts["cachedir"], "syndics", load["id"]
+ )
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
- with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
- wfh.write('')
+ with salt.utils.files.fopen(syndic_cache_path, "w") as wfh:
+ wfh.write("")
# Format individual return loads
- for key, item in six.iteritems(load['return']):
- ret = {'jid': load['jid'],
- 'id': key}
+ for key, item in six.iteritems(load["return"]):
+ ret = {"jid": load["jid"], "id": key}
ret.update(item)
- if 'master_id' in load:
- ret['master_id'] = load['master_id']
- if 'fun' in load:
- ret['fun'] = load['fun']
- if 'arg' in load:
- ret['fun_args'] = load['arg']
- if 'out' in load:
- ret['out'] = load['out']
- if 'sig' in load:
- ret['sig'] = load['sig']
+ if "master_id" in load:
+ ret["master_id"] = load["master_id"]
+ if "fun" in load:
+ ret["fun"] = load["fun"]
+ if "arg" in load:
+ ret["fun_args"] = load["arg"]
+ if "out" in load:
+ ret["out"] = load["out"]
+ if "sig" in load:
+ ret["sig"] = load["sig"]
self._return(ret)
def minion_runner(self, clear_load):
- '''
+ """
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
- '''
- load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
+ """
+ load = self.__verify_load(clear_load, ("fun", "arg", "id", "tok"))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
- '''
+ """
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
@@ -1708,25 +1783,23 @@ class AESFuncs(object):
:rtype: dict
:return: Return data corresponding to a given JID
- '''
- load = self.__verify_load(load, ('jid', 'id', 'tok'))
+ """
+ load = self.__verify_load(load, ("jid", "id", "tok"))
if load is False:
return {}
# Check that this minion can access this data
- auth_cache = os.path.join(
- self.opts['cachedir'],
- 'publish_auth')
+ auth_cache = os.path.join(self.opts["cachedir"], "publish_auth")
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
- jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
- with salt.utils.files.fopen(jid_fn, 'r') as fp_:
- if not load['id'] == fp_.read():
+ jid_fn = os.path.join(auth_cache, six.text_type(load["jid"]))
+ with salt.utils.files.fopen(jid_fn, "r") as fp_:
+ if not load["id"] == fp_.read():
return {}
# Grab the latest and return
- return self.local.get_cache_returns(load['jid'])
+ return self.local.get_cache_returns(load["jid"])
def minion_pub(self, clear_load):
- '''
+ """
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
@@ -1754,14 +1827,14 @@ class AESFuncs(object):
execute commands from the test module.
:param dict clear_load: The minion pay
- '''
+ """
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
- '''
+ """
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
@@ -1789,14 +1862,14 @@ class AESFuncs(object):
execute commands from the test module.
:param dict clear_load: The minion payload
- '''
+ """
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
- '''
+ """
Allow a minion to request revocation of its own key
:param dict load: The minion payload
@@ -1806,13 +1879,14 @@ class AESFuncs(object):
:rtype: bool
:return: True if key was revoked, False if not
- '''
- load = self.__verify_load(load, ('id', 'tok'))
+ """
+ load = self.__verify_load(load, ("id", "tok"))
- if not self.opts.get('allow_minion_key_revoke', False):
+ if not self.opts.get("allow_minion_key_revoke", False):
log.warning(
- 'Minion %s requested key revoke, but allow_minion_key_revoke '
- 'is set to False', load['id']
+ "Minion %s requested key revoke, but allow_minion_key_revoke "
+ "is set to False",
+ load["id"],
)
return load
@@ -1822,52 +1896,53 @@ class AESFuncs(object):
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
- '''
+ """
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
- '''
+ """
# Don't honor private functions
- if func.startswith('__'):
+ if func.startswith("__"):
# TODO: return some error? Seems odd to return {}
- return {}, {'fun': 'send'}
+ return {}, {"fun": "send"}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
- 'Master function call %s took %s seconds',
- func, time.time() - start
+ "Master function call %s took %s seconds", func, time.time() - start
)
except Exception: # pylint: disable=broad-except
- ret = ''
- log.error('Error in function %s:\n', func, exc_info=True)
+ ret = ""
+ log.error("Error in function %s:\n", func, exc_info=True)
else:
log.error(
- 'Received function %s which is unavailable on the master, '
- 'returning False', func
+ "Received function %s which is unavailable on the master, "
+ "returning False",
+ func,
)
- return False, {'fun': 'send'}
+ return False, {"fun": "send"}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
- if func == '_return':
- return ret, {'fun': 'send'}
- if func == '_pillar' and 'id' in load:
- if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
+ if func == "_return":
+ return ret, {"fun": "send"}
+ if func == "_pillar" and "id" in load:
+ if load.get("ver") != "2" and self.opts["pillar_version"] == 1:
# Authorized to return old pillar proto
- return ret, {'fun': 'send'}
- return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
+ return ret, {"fun": "send"}
+ return ret, {"fun": "send_private", "key": "pillar", "tgt": load["id"]}
# Encrypt the return
- return ret, {'fun': 'send'}
+ return ret, {"fun": "send"}
class ClearFuncs(object):
- '''
+ """
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
- '''
+ """
+
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
@@ -1876,19 +1951,18 @@ class ClearFuncs(object):
self.opts = opts
self.key = key
# Create the event manager
- self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
+ self.event = salt.utils.event.get_master_event(
+ self.opts, self.opts["sock_dir"], listen=False
+ )
# Make a client
- self.local = salt.client.get_local_client(self.opts['conf_file'])
+ self.local = salt.client.get_local_client(self.opts["conf_file"])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
- self.opts,
- states=False,
- rend=False,
- ignore_config_errors=True
+ self.opts, states=False, rend=False, ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
@@ -1896,33 +1970,37 @@ class ClearFuncs(object):
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def runner(self, clear_load):
- '''
+ """
Send a master control function back to the runner system
- '''
+ """
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
- error = auth_check.get('error')
+ error = auth_check.get("error")
if error:
# Authentication error occurred: do not continue.
- return {'error': error}
+ return {"error": error}
# Authorize
- username = auth_check.get('username')
- if auth_type != 'user':
+ username = auth_check.get("username")
+ if auth_type != "user":
runner_check = self.ckminions.runner_check(
- auth_check.get('auth_list', []),
- clear_load['fun'],
- clear_load.get('kwarg', {})
+ auth_check.get("auth_list", []),
+ clear_load["fun"],
+ clear_load.get("kwarg", {}),
)
if not runner_check:
- return {'error': {'name': err_name,
- 'message': 'Authentication failure of type "{0}" occurred for '
- 'user {1}.'.format(auth_type, username)}}
- elif isinstance(runner_check, dict) and 'error' in runner_check:
+ return {
+ "error": {
+ "name": err_name,
+ "message": 'Authentication failure of type "{0}" occurred for '
+ "user {1}.".format(auth_type, username),
+ }
+ }
+ elif isinstance(runner_check, dict) and "error" in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
@@ -1930,54 +2008,62 @@ class ClearFuncs(object):
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
- if 'user' in clear_load:
- username = clear_load['user']
+ if "user" in clear_load:
+ username = clear_load["user"]
if salt.auth.AuthUser(username).is_sudo():
- username = self.opts.get('user', 'root')
+ username = self.opts.get("user", "root")
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
- fun = clear_load.pop('fun')
+ fun = clear_load.pop("fun")
runner_client = salt.runner.RunnerClient(self.opts)
- return runner_client.asynchronous(fun,
- clear_load.get('kwarg', {}),
- username)
+ return runner_client.asynchronous(
+ fun, clear_load.get("kwarg", {}), username
+ )
except Exception as exc: # pylint: disable=broad-except
- log.error('Exception occurred while introspecting %s: %s', fun, exc)
- return {'error': {'name': exc.__class__.__name__,
- 'args': exc.args,
- 'message': six.text_type(exc)}}
+ log.error("Exception occurred while introspecting %s: %s", fun, exc)
+ return {
+ "error": {
+ "name": exc.__class__.__name__,
+ "args": exc.args,
+ "message": six.text_type(exc),
+ }
+ }
def wheel(self, clear_load):
- '''
+ """
Send a master control function back to the wheel system
- '''
+ """
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
- error = auth_check.get('error')
+ error = auth_check.get("error")
if error:
# Authentication error occurred: do not continue.
- return {'error': error}
+ return {"error": error}
# Authorize
- username = auth_check.get('username')
- if auth_type != 'user':
+ username = auth_check.get("username")
+ if auth_type != "user":
wheel_check = self.ckminions.wheel_check(
- auth_check.get('auth_list', []),
- clear_load['fun'],
- clear_load.get('kwarg', {})
+ auth_check.get("auth_list", []),
+ clear_load["fun"],
+ clear_load.get("kwarg", {}),
)
if not wheel_check:
- return {'error': {'name': err_name,
- 'message': 'Authentication failure of type "{0}" occurred for '
- 'user {1}.'.format(auth_type, username)}}
- elif isinstance(wheel_check, dict) and 'error' in wheel_check:
+ return {
+ "error": {
+ "name": err_name,
+ "message": 'Authentication failure of type "{0}" occurred for '
+ "user {1}.".format(auth_type, username),
+ }
+ }
+ elif isinstance(wheel_check, dict) and "error" in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
@@ -1985,156 +2071,179 @@ class ClearFuncs(object):
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
- if 'user' in clear_load:
- username = clear_load['user']
+ if "user" in clear_load:
+ username = clear_load["user"]
if salt.auth.AuthUser(username).is_sudo():
- username = self.opts.get('user', 'root')
+ username = self.opts.get("user", "root")
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
- fun = clear_load.pop('fun')
- tag = tagify(jid, prefix='wheel')
- data = {'fun': "wheel.{0}".format(fun),
- 'jid': jid,
- 'tag': tag,
- 'user': username}
+ fun = clear_load.pop("fun")
+ tag = tagify(jid, prefix="wheel")
+ data = {
+ "fun": "wheel.{0}".format(fun),
+ "jid": jid,
+ "tag": tag,
+ "user": username,
+ }
- self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
+ self.event.fire_event(data, tagify([jid, "new"], "wheel"))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
- data['return'] = ret['return']
- data['success'] = ret['success']
- self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
- return {'tag': tag,
- 'data': data}
+ data["return"] = ret["return"]
+ data["success"] = ret["success"]
+ self.event.fire_event(data, tagify([jid, "ret"], "wheel"))
+ return {"tag": tag, "data": data}
except Exception as exc: # pylint: disable=broad-except
- log.error('Exception occurred while introspecting %s: %s', fun, exc)
- data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
- fun,
- exc.__class__.__name__,
- exc,
+ log.error("Exception occurred while introspecting %s: %s", fun, exc)
+ data["return"] = "Exception occurred in wheel {0}: {1}: {2}".format(
+ fun, exc.__class__.__name__, exc,
)
- data['success'] = False
- self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
- return {'tag': tag,
- 'data': data}
+ data["success"] = False
+ self.event.fire_event(data, tagify([jid, "ret"], "wheel"))
+ return {"tag": tag, "data": data}
def mk_token(self, clear_load):
- '''
+ """
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
- '''
+ """
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
- return ''
+ return ""
return token
def get_token(self, clear_load):
- '''
+ """
Return the name associated with a token or False if the token is invalid
- '''
- if 'token' not in clear_load:
+ """
+ if "token" not in clear_load:
return False
- return self.loadauth.get_tok(clear_load['token'])
+ return self.loadauth.get_tok(clear_load["token"])
def publish(self, clear_load):
- '''
+ """
This method sends out publications to the minions, it can only be used
by the LocalClient.
- '''
- extra = clear_load.get('kwargs', {})
+ """
+ extra = clear_load.get("kwargs", {})
- publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist'])
+ publisher_acl = salt.acl.PublisherACL(self.opts["publisher_acl_blacklist"])
- if publisher_acl.user_is_blacklisted(clear_load['user']) or \
- publisher_acl.cmd_is_blacklisted(clear_load['fun']):
+ if publisher_acl.user_is_blacklisted(
+ clear_load["user"]
+ ) or publisher_acl.cmd_is_blacklisted(clear_load["fun"]):
log.error(
- '%s does not have permissions to run %s. Please contact '
- 'your local administrator if you believe this is in '
- 'error.\n', clear_load['user'], clear_load['fun']
+ "%s does not have permissions to run %s. Please contact "
+ "your local administrator if you believe this is in "
+ "error.\n",
+ clear_load["user"],
+ clear_load["fun"],
)
- return {'error': {'name': 'AuthorizationError',
- 'message': 'Authorization error occurred.'}}
+ return {
+ "error": {
+ "name": "AuthorizationError",
+ "message": "Authorization error occurred.",
+ }
+ }
# Retrieve the minions list
- delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
+ delimiter = clear_load.get("kwargs", {}).get("delimiter", DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
- clear_load['tgt'],
- clear_load.get('tgt_type', 'glob'),
- delimiter
+ clear_load["tgt"], clear_load.get("tgt_type", "glob"), delimiter
)
- minions = _res.get('minions', list())
- missing = _res.get('missing', list())
- ssh_minions = _res.get('ssh_minions', False)
+ minions = _res.get("minions", list())
+ missing = _res.get("missing", list())
+ ssh_minions = _res.get("ssh_minions", False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
- if auth_type == 'user':
- auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
+ if auth_type == "user":
+ auth_check = self.loadauth.check_authentication(
+ clear_load, auth_type, key=key
+ )
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
- auth_list = auth_check.get('auth_list', [])
+ auth_list = auth_check.get("auth_list", [])
err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type)
- if auth_check.get('error'):
+ if auth_check.get("error"):
# Authentication error occurred: do not continue.
log.warning(err_msg)
- return {'error': {'name': 'AuthenticationError',
- 'message': 'Authentication error occurred.'}}
+ return {
+ "error": {
+ "name": "AuthenticationError",
+ "message": "Authentication error occurred.",
+ }
+ }
# All Token, Eauth, and non-root users must pass the authorization check
- if auth_type != 'user' or (auth_type == 'user' and auth_list):
+ if auth_type != "user" or (auth_type == "user" and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
- clear_load['fun'],
- clear_load['arg'],
- clear_load['tgt'],
- clear_load.get('tgt_type', 'glob'),
+ clear_load["fun"],
+ clear_load["arg"],
+ clear_load["tgt"],
+ clear_load.get("tgt_type", "glob"),
minions=minions,
# always accept find_job
- whitelist=['saltutil.find_job'],
+ whitelist=["saltutil.find_job"],
)
if not authorized:
# Authorization error occurred. Do not continue.
- if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra:
- log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username'])
+ if (
+ auth_type == "eauth"
+ and not auth_list
+ and "username" in extra
+ and "eauth" in extra
+ ):
+ log.debug(
+ 'Auth configuration for eauth "%s" and user "%s" is empty',
+ extra["eauth"],
+ extra["username"],
+ )
log.warning(err_msg)
- return {'error': {'name': 'AuthorizationError',
- 'message': 'Authorization error occurred.'}}
+ return {
+ "error": {
+ "name": "AuthorizationError",
+ "message": "Authorization error occurred.",
+ }
+ }
# Perform some specific auth_type tasks after the authorization check
- if auth_type == 'token':
- username = auth_check.get('username')
- clear_load['user'] = username
+ if auth_type == "token":
+ username = auth_check.get("username")
+ clear_load["user"] = username
log.debug('Minion tokenized user = "%s"', username)
- elif auth_type == 'eauth':
+ elif auth_type == "eauth":
# The username we are attempting to auth with
- clear_load['user'] = self.loadauth.load_name(extra)
+ clear_load["user"] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
- if not self.opts.get('order_masters'):
+ if not self.opts.get("order_masters"):
# Check for no minions
if not minions:
return {
- 'enc': 'clear',
- 'load': {
- 'jid': None,
- 'minions': minions,
- 'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt'])
- }
+ "enc": "clear",
+ "load": {
+ "jid": None,
+ "minions": minions,
+ "error": "Master could not resolve minions for target {0}".format(
+ clear_load["tgt"]
+ ),
+ },
}
jid = self._prep_jid(clear_load, extra)
if jid is None:
- return {'enc': 'clear',
- 'load': {'error': 'Master failed to assign jid'}}
+ return {"enc": "clear", "load": {"error": "Master failed to assign jid"}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
@@ -2142,156 +2251,152 @@ class ClearFuncs(object):
self._send_pub(payload)
return {
- 'enc': 'clear',
- 'load': {
- 'jid': clear_load['jid'],
- 'minions': minions,
- 'missing': missing
- }
+ "enc": "clear",
+ "load": {"jid": clear_load["jid"], "minions": minions, "missing": missing},
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
- if 'token' in clear_load:
- auth_type = 'token'
- err_name = 'TokenAuthenticationError'
- sensitive_load_keys = ['token']
- elif 'eauth' in clear_load:
- auth_type = 'eauth'
- err_name = 'EauthAuthenticationError'
- sensitive_load_keys = ['username', 'password']
+ if "token" in clear_load:
+ auth_type = "token"
+ err_name = "TokenAuthenticationError"
+ sensitive_load_keys = ["token"]
+ elif "eauth" in clear_load:
+ auth_type = "eauth"
+ err_name = "EauthAuthenticationError"
+ sensitive_load_keys = ["username", "password"]
else:
- auth_type = 'user'
- err_name = 'UserAuthenticationError'
+ auth_type = "user"
+ err_name = "UserAuthenticationError"
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _prep_jid(self, clear_load, extra):
- '''
+ """
Return a jid for this publication
- '''
+ """
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
- passed_jid = clear_load['jid'] if clear_load.get('jid') else None
- nocache = extra.get('nocache', False)
+ passed_jid = clear_load["jid"] if clear_load.get("jid") else None
+ nocache = extra.get("nocache", False)
# Retrieve the jid
- fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
+ fstr = "{0}.prep_jid".format(self.opts["master_job_cache"])
try:
# Retrieve the jid
- jid = self.mminion.returners[fstr](nocache=nocache,
- passed_jid=passed_jid)
+ jid = self.mminion.returners[fstr](nocache=nocache, passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
- 'Failed to allocate a jid. The requested returner \'{0}\' '
- 'could not be loaded.'.format(fstr.split('.')[0])
+ "Failed to allocate a jid. The requested returner '{0}' "
+ "could not be loaded.".format(fstr.split(".")[0])
)
log.error(msg)
- return {'error': msg}
+ return {"error": msg}
return jid
def _send_pub(self, load):
- '''
+ """
Take a load and send it across the network to connected minions
- '''
+ """
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
@property
def ssh_client(self):
- if not hasattr(self, '_ssh_client'):
+ if not hasattr(self, "_ssh_client"):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _send_ssh_pub(self, load, ssh_minions=False):
- '''
+ """
Take a load and send it across the network to ssh minions
- '''
- if self.opts['enable_ssh_minions'] is True and ssh_minions is True:
- log.debug('Send payload to ssh minions')
+ """
+ if self.opts["enable_ssh_minions"] is True and ssh_minions is True:
+ log.debug("Send payload to ssh minions")
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
def _prep_pub(self, minions, jid, clear_load, extra, missing):
- '''
+ """
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
- '''
- clear_load['jid'] = jid
- delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
+ """
+ clear_load["jid"] = jid
+ delimiter = clear_load.get("kwargs", {}).get("delimiter", DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
- self.event.fire_event({'minions': minions}, clear_load['jid'])
+ self.event.fire_event({"minions": minions}, clear_load["jid"])
new_job_load = {
- 'jid': clear_load['jid'],
- 'tgt_type': clear_load['tgt_type'],
- 'tgt': clear_load['tgt'],
- 'user': clear_load['user'],
- 'fun': clear_load['fun'],
- 'arg': clear_load['arg'],
- 'minions': minions,
- 'missing': missing,
- }
+ "jid": clear_load["jid"],
+ "tgt_type": clear_load["tgt_type"],
+ "tgt": clear_load["tgt"],
+ "user": clear_load["user"],
+ "fun": clear_load["fun"],
+ "arg": clear_load["arg"],
+ "minions": minions,
+ "missing": missing,
+ }
# Announce the job on the event bus
- self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
+ self.event.fire_event(new_job_load, tagify([clear_load["jid"], "new"], "job"))
- if self.opts['ext_job_cache']:
- fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
+ if self.opts["ext_job_cache"]:
+ fstr = "{0}.save_load".format(self.opts["ext_job_cache"])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
- arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr])
+ arg_spec = salt.utils.args.get_function_argspec(
+ self.mminion.returners[fstr]
+ )
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
- if 'minions' not in arg_spec.args:
+ if "minions" not in arg_spec.args:
log.critical(
- 'The specified returner used for the external job cache '
- '\'%s\' does not have a \'minions\' kwarg in the returner\'s '
- 'save_load function.', self.opts['ext_job_cache']
+ "The specified returner used for the external job cache "
+ "'%s' does not have a 'minions' kwarg in the returner's "
+ "save_load function.",
+ self.opts["ext_job_cache"],
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
- 'The specified returner used for the external job cache '
+ "The specified returner used for the external job cache "
'"%s" does not have a save_load function!',
- self.opts['ext_job_cache']
+ self.opts["ext_job_cache"],
)
if save_load_func:
try:
- self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions)
+ self.mminion.returners[fstr](
+ clear_load["jid"], clear_load, minions=minions
+ )
except Exception: # pylint: disable=broad-except
log.critical(
- 'The specified returner threw a stack trace:\n',
- exc_info=True
+ "The specified returner threw a stack trace:\n", exc_info=True
)
# always write out to the master job caches
try:
- fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
- self.mminion.returners[fstr](clear_load['jid'], clear_load, minions)
+ fstr = "{0}.save_load".format(self.opts["master_job_cache"])
+ self.mminion.returners[fstr](clear_load["jid"], clear_load, minions)
except KeyError:
log.critical(
- 'The specified returner used for the master job cache '
+ "The specified returner used for the master job cache "
'"%s" does not have a save_load function!',
- self.opts['master_job_cache']
+ self.opts["master_job_cache"],
)
except Exception: # pylint: disable=broad-except
- log.critical(
- 'The specified returner threw a stack trace:\n',
- exc_info=True
- )
+ log.critical("The specified returner threw a stack trace:\n", exc_info=True)
# Set up the payload
- payload = {'enc': 'aes'}
+ payload = {"enc": "aes"}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
@@ -2301,61 +2406,62 @@ class ClearFuncs(object):
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
- 'fun': clear_load['fun'],
- 'arg': clear_load['arg'],
- 'tgt': clear_load['tgt'],
- 'jid': clear_load['jid'],
- 'ret': clear_load['ret'],
+ "fun": clear_load["fun"],
+ "arg": clear_load["arg"],
+ "tgt": clear_load["tgt"],
+ "jid": clear_load["jid"],
+ "ret": clear_load["ret"],
}
# if you specified a master id, lets put that in the load
- if 'master_id' in self.opts:
- load['master_id'] = self.opts['master_id']
+ if "master_id" in self.opts:
+ load["master_id"] = self.opts["master_id"]
# if someone passed us one, use that
- if 'master_id' in extra:
- load['master_id'] = extra['master_id']
+ if "master_id" in extra:
+ load["master_id"] = extra["master_id"]
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
- load['delimiter'] = delimiter
+ load["delimiter"] = delimiter
- if 'id' in extra:
- load['id'] = extra['id']
- if 'tgt_type' in clear_load:
- load['tgt_type'] = clear_load['tgt_type']
- if 'to' in clear_load:
- load['to'] = clear_load['to']
+ if "id" in extra:
+ load["id"] = extra["id"]
+ if "tgt_type" in clear_load:
+ load["tgt_type"] = clear_load["tgt_type"]
+ if "to" in clear_load:
+ load["to"] = clear_load["to"]
- if 'kwargs' in clear_load:
- if 'ret_config' in clear_load['kwargs']:
- load['ret_config'] = clear_load['kwargs'].get('ret_config')
+ if "kwargs" in clear_load:
+ if "ret_config" in clear_load["kwargs"]:
+ load["ret_config"] = clear_load["kwargs"].get("ret_config")
- if 'metadata' in clear_load['kwargs']:
- load['metadata'] = clear_load['kwargs'].get('metadata')
+ if "metadata" in clear_load["kwargs"]:
+ load["metadata"] = clear_load["kwargs"].get("metadata")
- if 'module_executors' in clear_load['kwargs']:
- load['module_executors'] = clear_load['kwargs'].get('module_executors')
+ if "module_executors" in clear_load["kwargs"]:
+ load["module_executors"] = clear_load["kwargs"].get("module_executors")
- if 'executor_opts' in clear_load['kwargs']:
- load['executor_opts'] = clear_load['kwargs'].get('executor_opts')
+ if "executor_opts" in clear_load["kwargs"]:
+ load["executor_opts"] = clear_load["kwargs"].get("executor_opts")
- if 'ret_kwargs' in clear_load['kwargs']:
- load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs')
+ if "ret_kwargs" in clear_load["kwargs"]:
+ load["ret_kwargs"] = clear_load["kwargs"].get("ret_kwargs")
- if 'user' in clear_load:
+ if "user" in clear_load:
log.info(
- 'User %s Published command %s with jid %s',
- clear_load['user'], clear_load['fun'], clear_load['jid']
+ "User %s Published command %s with jid %s",
+ clear_load["user"],
+ clear_load["fun"],
+ clear_load["jid"],
)
- load['user'] = clear_load['user']
+ load["user"] = clear_load["user"]
else:
log.info(
- 'Published command %s with jid %s',
- clear_load['fun'], clear_load['jid']
+ "Published command %s with jid %s", clear_load["fun"], clear_load["jid"]
)
- log.debug('Published command details %s', load)
+ log.debug("Published command details %s", load)
return load
def ping(self, clear_load):
- '''
+ """
Send the load back to the sender.
- '''
+ """
return clear_load
diff --git a/salt/matchers/__init__.py b/salt/matchers/__init__.py
index 22f81456180..01bff648670 100644
--- a/salt/matchers/__init__.py
+++ b/salt/matchers/__init__.py
@@ -1,43 +1,45 @@
# -*- coding: utf-8 -*-
-'''
+"""
Salt package
-'''
+"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import warnings
# All salt related deprecation warnings should be shown once each!
warnings.filterwarnings(
- 'once', # Show once
- '', # No deprecation message match
+ "once", # Show once
+ "", # No deprecation message match
DeprecationWarning, # This filter is for DeprecationWarnings
- r'^(salt|salt\.(.*))$' # Match module(s) 'salt' and 'salt.'
+ r"^(salt|salt\.(.*))$", # Match module(s) 'salt' and 'salt.'
)
# While we are supporting Python2.6, hide nested with-statements warnings
warnings.filterwarnings(
- 'ignore',
- 'With-statements now directly support multiple context managers',
- DeprecationWarning
+ "ignore",
+ "With-statements now directly support multiple context managers",
+ DeprecationWarning,
)
# Filter the backports package UserWarning about being re-imported
warnings.filterwarnings(
- 'ignore',
- '^Module backports was already imported from (.*), but (.*) is being added to sys.path$',
- UserWarning
+ "ignore",
+ "^Module backports was already imported from (.*), but (.*) is being added to sys.path$",
+ UserWarning,
)
def __define_global_system_encoding_variable__():
import sys
+
# This is the most trustworthy source of the system encoding, though, if
# salt is being imported after being daemonized, this information is lost
# and reset to None
encoding = None
- if not sys.platform.startswith('win') and sys.stdin is not None:
+ if not sys.platform.startswith("win") and sys.stdin is not None:
# On linux we can rely on sys.stdin for the encoding since it
# most commonly matches the filesystem encoding. This however
# does not apply to windows
@@ -48,6 +50,7 @@ def __define_global_system_encoding_variable__():
# encoding. MS Windows has problems with this and reports the wrong
# encoding
import locale
+
try:
encoding = locale.getdefaultlocale()[-1]
except ValueError:
@@ -63,16 +66,16 @@ def __define_global_system_encoding_variable__():
# the way back to ascii
encoding = sys.getdefaultencoding()
if not encoding:
- if sys.platform.startswith('darwin'):
+ if sys.platform.startswith("darwin"):
# Mac OS X uses UTF-8
- encoding = 'utf-8'
- elif sys.platform.startswith('win'):
+ encoding = "utf-8"
+ elif sys.platform.startswith("win"):
# Windows uses a configurable encoding; on Windows, Python uses the name “mbcs”
# to refer to whatever the currently configured encoding is.
- encoding = 'mbcs'
+ encoding = "mbcs"
else:
# On linux default to ascii as a last resort
- encoding = 'ascii'
+ encoding = "ascii"
# We can't use six.moves.builtins because these builtins get deleted sooner
# than expected. See:
@@ -83,7 +86,7 @@ def __define_global_system_encoding_variable__():
import builtins # pylint: disable=import-error
# Define the detected encoding as a built-in variable for ease of use
- setattr(builtins, '__salt_system_encoding__', encoding)
+ setattr(builtins, "__salt_system_encoding__", encoding)
# This is now garbage collectable
del sys
diff --git a/salt/matchers/cache_match.py b/salt/matchers/cache_match.py
index d34c2a2670a..3ad0bff4007 100644
--- a/salt/matchers/cache_match.py
+++ b/salt/matchers/cache_match.py
@@ -1,34 +1,42 @@
# -*- coding: utf-8 -*-
-'''
+"""
This is the default cache matcher function. It only exists for the master,
this is why there is only a ``mmatch()`` but not ``match()``.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
-import salt.utils.data # pylint: disable=3rd-party-module-not-gated
+import salt.utils.data # pylint: disable=3rd-party-module-not-gated
import salt.utils.minions # pylint: disable=3rd-party-module-not-gated
log = logging.getLogger(__name__)
-def mmatch(expr,
- delimiter,
- greedy,
- search_type,
- regex_match=False,
- exact_match=False,
- opts=None):
- '''
+def mmatch(
+ expr,
+ delimiter,
+ greedy,
+ search_type,
+ regex_match=False,
+ exact_match=False,
+ opts=None,
+):
+ """
Helper function to search for minions in master caches
If 'greedy' return accepted minions that matched by the condition or absent in the cache.
If not 'greedy' return the only minions have cache data and matched by the condition.
- '''
+ """
if not opts:
opts = __opts__
ckminions = salt.utils.minions.CkMinions(opts)
- return ckminions._check_cache_minions(expr, delimiter, greedy,
- search_type, regex_match=regex_match,
- exact_match=exact_match)
+ return ckminions._check_cache_minions(
+ expr,
+ delimiter,
+ greedy,
+ search_type,
+ regex_match=regex_match,
+ exact_match=exact_match,
+ )
diff --git a/salt/matchers/compound_match.py b/salt/matchers/compound_match.py
index 678844f194a..fabc7ec4cdd 100644
--- a/salt/matchers/compound_match.py
+++ b/salt/matchers/compound_match.py
@@ -1,17 +1,19 @@
# -*- coding: utf-8 -*-
-'''
+"""
This is the default compound matcher function.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
-from salt.ext import six # pylint: disable=3rd-party-module-not-gated
+
import salt.loader
import salt.utils.minions # pylint: disable=3rd-party-module-not-gated
+from salt.ext import six # pylint: disable=3rd-party-module-not-gated
HAS_RANGE = False
try:
import seco.range # pylint: disable=unused-import
+
HAS_RANGE = True
except ImportError:
pass
@@ -20,32 +22,34 @@ log = logging.getLogger(__name__)
def match(tgt, opts=None):
- '''
+ """
Runs the compound target check
- '''
+ """
if not opts:
opts = __opts__
- nodegroups = opts.get('nodegroups', {})
+ nodegroups = opts.get("nodegroups", {})
matchers = salt.loader.matchers(opts)
- minion_id = opts.get('minion_id', opts['id'])
+ minion_id = opts.get("minion_id", opts["id"])
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
- log.error('Compound target received that is neither string, list nor tuple')
+ log.error("Compound target received that is neither string, list nor tuple")
return False
- log.debug('compound_match: %s ? %s', minion_id, tgt)
- ref = {'G': 'grain',
- 'P': 'grain_pcre',
- 'I': 'pillar',
- 'J': 'pillar_pcre',
- 'L': 'list',
- 'N': None, # Nodegroups should already be expanded
- 'S': 'ipcidr',
- 'E': 'pcre'}
+ log.debug("compound_match: %s ? %s", minion_id, tgt)
+ ref = {
+ "G": "grain",
+ "P": "grain_pcre",
+ "I": "pillar",
+ "J": "pillar_pcre",
+ "L": "list",
+ "N": None, # Nodegroups should already be expanded
+ "S": "ipcidr",
+ "E": "pcre",
+ }
if HAS_RANGE:
- ref['R'] = 'range'
+ ref["R"] = "range"
results = []
- opers = ['and', 'or', 'not', '(', ')']
+ opers = ["and", "or", "not", "(", ")"]
if isinstance(tgt, six.string_types):
words = tgt.split()
@@ -60,56 +64,62 @@ def match(tgt, opts=None):
# Easy check first
if word in opers:
if results:
- if results[-1] == '(' and word in ('and', 'or'):
+ if results[-1] == "(" and word in ("and", "or"):
log.error('Invalid beginning operator after "(": %s', word)
return False
- if word == 'not':
- if not results[-1] in ('and', 'or', '('):
- results.append('and')
+ if word == "not":
+ if not results[-1] in ("and", "or", "("):
+ results.append("and")
results.append(word)
else:
# seq start with binary oper, fail
- if word not in ['(', 'not']:
- log.error('Invalid beginning operator: %s', word)
+ if word not in ["(", "not"]:
+ log.error("Invalid beginning operator: %s", word)
return False
results.append(word)
- elif target_info and target_info['engine']:
- if 'N' == target_info['engine']:
+ elif target_info and target_info["engine"]:
+ if "N" == target_info["engine"]:
# if we encounter a node group, just evaluate it in-place
- decomposed = salt.utils.minions.nodegroup_comp(target_info['pattern'], nodegroups)
+ decomposed = salt.utils.minions.nodegroup_comp(
+ target_info["pattern"], nodegroups
+ )
if decomposed:
words = decomposed + words
continue
- engine = ref.get(target_info['engine'])
+ engine = ref.get(target_info["engine"])
if not engine:
# If an unknown engine is called at any time, fail out
log.error(
- 'Unrecognized target engine "%s" for target '
- 'expression "%s"', target_info['engine'], word
+ 'Unrecognized target engine "%s" for target ' 'expression "%s"',
+ target_info["engine"],
+ word,
)
return False
- engine_args = [target_info['pattern']]
- engine_kwargs = {'opts': opts}
- if target_info['delimiter']:
- engine_kwargs['delimiter'] = target_info['delimiter']
+ engine_args = [target_info["pattern"]]
+ engine_kwargs = {"opts": opts}
+ if target_info["delimiter"]:
+ engine_kwargs["delimiter"] = target_info["delimiter"]
results.append(
- six.text_type(matchers['{0}_match.match'.format(engine)](*engine_args, **engine_kwargs))
+ six.text_type(
+ matchers["{0}_match.match".format(engine)](
+ *engine_args, **engine_kwargs
+ )
+ )
)
else:
# The match is not explicitly defined, evaluate it as a glob
- results.append(six.text_type(matchers['glob_match.match'](word, opts)))
+ results.append(six.text_type(matchers["glob_match.match"](word, opts)))
- results = ' '.join(results)
+ results = " ".join(results)
log.debug('compound_match %s ? "%s" => "%s"', minion_id, tgt, results)
try:
return eval(results) # pylint: disable=W0123
except Exception: # pylint: disable=broad-except
- log.error(
- 'Invalid compound target: %s for results: %s', tgt, results)
+ log.error("Invalid compound target: %s for results: %s", tgt, results)
return False
return False
diff --git a/salt/matchers/compound_pillar_exact_match.py b/salt/matchers/compound_pillar_exact_match.py
index 21346e804cd..1d9112671ab 100644
--- a/salt/matchers/compound_pillar_exact_match.py
+++ b/salt/matchers/compound_pillar_exact_match.py
@@ -1,10 +1,10 @@
# -*- coding: utf-8 -*-
-'''
+"""
This is the default pillar exact matcher for compound matches.
There is no minion-side equivalent for this, so consequently there is no ``match()``
function below, only an ``mmatch()``
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
@@ -15,12 +15,11 @@ log = logging.getLogger(__name__)
def mmatch(expr, delimiter, greedy, opts=None):
- '''
+ """
Return the minions found by looking via pillar
- '''
+ """
if not opts:
opts = __opts__
ckminions = salt.utils.minions.CkMinions(opts)
- return ckminions._check_compound_minions(expr, delimiter, greedy,
- pillar_exact=True)
+ return ckminions._check_compound_minions(expr, delimiter, greedy, pillar_exact=True)
diff --git a/salt/matchers/confirm_top.py b/salt/matchers/confirm_top.py
index dfca546a11b..05581d8df98 100644
--- a/salt/matchers/confirm_top.py
+++ b/salt/matchers/confirm_top.py
@@ -1,10 +1,11 @@
# -*- coding: utf-8 -*-
-'''
+"""
The matcher subsystem needs a function called 'confirm_top', which
takes the data passed to a top file environment and determines if that
data matches this minion.
-'''
+"""
from __future__ import absolute_import
+
import logging
import salt.loader
@@ -13,23 +14,22 @@ log = logging.getLogger(__file__)
def confirm_top(match, data, nodegroups=None):
- '''
+ """
Takes the data passed to a top file environment and determines if the
data matches this minion
- '''
- matcher = 'compound'
+ """
+ matcher = "compound"
if not data:
- log.error('Received bad data when setting the match from the top '
- 'file')
+ log.error("Received bad data when setting the match from the top " "file")
return False
for item in data:
if isinstance(item, dict):
- if 'match' in item:
- matcher = item['match']
+ if "match" in item:
+ matcher = item["match"]
matchers = salt.loader.matchers(__opts__)
- funcname = matcher + '_match.match'
- if matcher == 'nodegroup':
+ funcname = matcher + "_match.match"
+ if matcher == "nodegroup":
return matchers[funcname](match, nodegroups)
else:
m = matchers[funcname]
diff --git a/salt/matchers/data_match.py b/salt/matchers/data_match.py
index 3109627a6dd..20b10b9aa25 100644
--- a/salt/matchers/data_match.py
+++ b/salt/matchers/data_match.py
@@ -1,34 +1,34 @@
# -*- coding: utf-8 -*-
-'''
+"""
This is the default data matcher.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
import fnmatch
import logging
-from salt.ext import six # pylint: disable=3rd-party-module-not-gated
+import salt.loader # pylint: disable=3rd-party-module-not-gated
import salt.utils.data # pylint: disable=3rd-party-module-not-gated
import salt.utils.minions # pylint: disable=3rd-party-module-not-gated
import salt.utils.network # pylint: disable=3rd-party-module-not-gated
-import salt.loader # pylint: disable=3rd-party-module-not-gated
+from salt.ext import six # pylint: disable=3rd-party-module-not-gated
log = logging.getLogger(__name__)
def match(tgt, functions=None, opts=None):
- '''
+ """
Match based on the local data store on the minion
- '''
+ """
if not opts:
opts = __opts__
if functions is None:
utils = salt.loader.utils(opts)
functions = salt.loader.minion_mods(opts, utils=utils)
- comps = tgt.split(':')
+ comps = tgt.split(":")
if len(comps) < 2:
return False
- val = functions['data.getval'](comps[0])
+ val = functions["data.getval"](comps[0])
if val is None:
# The value is not defined
return False
@@ -42,7 +42,4 @@ def match(tgt, functions=None, opts=None):
if comps[1] in val:
return True
return False
- return bool(fnmatch.fnmatch(
- val,
- comps[1],
- ))
+ return bool(fnmatch.fnmatch(val, comps[1],))
diff --git a/salt/matchers/glob_match.py b/salt/matchers/glob_match.py
index 1934585d68c..88740289693 100644
--- a/salt/matchers/glob_match.py
+++ b/salt/matchers/glob_match.py
@@ -1,20 +1,21 @@
# -*- coding: utf-8 -*-
-'''
+"""
This is the default glob matcher function.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
import fnmatch
+
from salt.ext import six # pylint: disable=3rd-party-module-not-gated
def match(tgt, opts=None):
- '''
+ """
Returns true if the passed glob matches the id
- '''
+ """
if not opts:
opts = __opts__
- minion_id = opts.get('minion_id', opts['id'])
+ minion_id = opts.get("minion_id", opts["id"])
if not isinstance(tgt, six.string_types):
return False
diff --git a/salt/matchers/grain_match.py b/salt/matchers/grain_match.py
index 5318023bc84..99a7c3ec44d 100644
--- a/salt/matchers/grain_match.py
+++ b/salt/matchers/grain_match.py
@@ -1,30 +1,31 @@
# -*- coding: utf-8 -*-
-'''
+"""
This is the default grains matcher function.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
-from salt.defaults import DEFAULT_TARGET_DELIM # pylint: disable=3rd-party-module-not-gated
import salt.utils.data # pylint: disable=3rd-party-module-not-gated
+from salt.defaults import ( # pylint: disable=3rd-party-module-not-gated
+ DEFAULT_TARGET_DELIM,
+)
log = logging.getLogger(__name__)
def match(tgt, delimiter=DEFAULT_TARGET_DELIM, opts=None):
- '''
+ """
Reads in the grains glob match
- '''
+ """
if not opts:
opts = __opts__
- log.debug('grains target: %s', tgt)
+ log.debug("grains target: %s", tgt)
if delimiter not in tgt:
- log.error('Got insufficient arguments for grains match '
- 'statement from master')
+ log.error(
+ "Got insufficient arguments for grains match " "statement from master"
+ )
return False
- return salt.utils.data.subdict_match(
- opts['grains'], tgt, delimiter=delimiter
- )
+ return salt.utils.data.subdict_match(opts["grains"], tgt, delimiter=delimiter)
diff --git a/salt/matchers/grain_pcre_match.py b/salt/matchers/grain_pcre_match.py
index b429f4e29b5..de8921859ad 100644
--- a/salt/matchers/grain_pcre_match.py
+++ b/salt/matchers/grain_pcre_match.py
@@ -1,28 +1,32 @@
# -*- coding: utf-8 -*-
-'''
+"""
This is the default grains PCRE matcher.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
-from salt.defaults import DEFAULT_TARGET_DELIM # pylint: disable=3rd-party-module-not-gated
import salt.utils.data # pylint: disable=3rd-party-module-not-gated
+from salt.defaults import ( # pylint: disable=3rd-party-module-not-gated
+ DEFAULT_TARGET_DELIM,
+)
log = logging.getLogger(__name__)
def match(tgt, delimiter=DEFAULT_TARGET_DELIM, opts=None):
- '''
+ """
Matches a grain based on regex
- '''
+ """
if not opts:
opts = __opts__
- log.debug('grains pcre target: %s', tgt)
+ log.debug("grains pcre target: %s", tgt)
if delimiter not in tgt:
- log.error('Got insufficient arguments for grains pcre match '
- 'statement from master')
+ log.error(
+ "Got insufficient arguments for grains pcre match " "statement from master"
+ )
return False
return salt.utils.data.subdict_match(
- opts['grains'], tgt, delimiter=delimiter, regex_match=True)
+ opts["grains"], tgt, delimiter=delimiter, regex_match=True
+ )
diff --git a/salt/matchers/ipcidr_match.py b/salt/matchers/ipcidr_match.py
index 0c5d71e802d..ca0ac5a4077 100644
--- a/salt/matchers/ipcidr_match.py
+++ b/salt/matchers/ipcidr_match.py
@@ -1,13 +1,13 @@
# -*- coding: utf-8 -*-
-'''
+"""
This is the default ipcidr matcher.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
-from salt.ext import six # pylint: disable=3rd-party-module-not-gated
import salt.utils.network # pylint: disable=3rd-party-module-not-gated
+from salt.ext import six # pylint: disable=3rd-party-module-not-gated
if six.PY3:
import ipaddress
@@ -18,9 +18,9 @@ log = logging.getLogger(__name__)
def match(tgt, opts=None):
- '''
+ """
Matches based on IP address or CIDR notation
- '''
+ """
if not opts:
opts = __opts__
@@ -32,11 +32,11 @@ def match(tgt, opts=None):
# Target is a network?
tgt = ipaddress.ip_network(tgt)
except: # pylint: disable=bare-except
- log.error('Invalid IP/CIDR target: %s', tgt)
+ log.error("Invalid IP/CIDR target: %s", tgt)
return []
- proto = 'ipv{0}'.format(tgt.version)
+ proto = "ipv{0}".format(tgt.version)
- grains = opts['grains']
+ grains = opts["grains"]
if proto not in grains:
match = False
diff --git a/salt/matchers/list_match.py b/salt/matchers/list_match.py
index 01d9ada71b2..b15f125ca5c 100644
--- a/salt/matchers/list_match.py
+++ b/salt/matchers/list_match.py
@@ -1,33 +1,36 @@
# -*- coding: utf-8 -*-
-'''
+"""
This is the default list matcher.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
log = logging.getLogger(__name__)
def match(tgt, opts=None):
- '''
+ """
Determines if this host is on the list
- '''
+ """
if not opts:
opts = __opts__
try:
- if ',' + opts['id'] + ',' in tgt \
- or tgt.startswith(opts['id'] + ',') \
- or tgt.endswith(',' + opts['id']):
+ if (
+ "," + opts["id"] + "," in tgt
+ or tgt.startswith(opts["id"] + ",")
+ or tgt.endswith("," + opts["id"])
+ ):
return True
# tgt is a string, which we know because the if statement above did not
# cause one of the exceptions being caught. Therefore, look for an
# exact match. (e.g. salt -L foo test.ping)
- return opts['id'] == tgt
+ return opts["id"] == tgt
except (AttributeError, TypeError):
# tgt is not a string, maybe it's a sequence type?
try:
- return opts['id'] in tgt
+ return opts["id"] in tgt
except Exception: # pylint: disable=broad-except
# tgt was likely some invalid type
return False
@@ -36,7 +39,8 @@ def match(tgt, opts=None):
# above. If we do, it is because something above changed, and should be
# considered as a bug. Log a warning to help us catch this.
log.warning(
- 'List matcher unexpectedly did not return, for target %s, '
- 'this is probably a bug.', tgt
+ "List matcher unexpectedly did not return, for target %s, "
+ "this is probably a bug.",
+ tgt,
)
return False
diff --git a/salt/matchers/nodegroup_match.py b/salt/matchers/nodegroup_match.py
index 1e5fb04c220..146b1de23b9 100644
--- a/salt/matchers/nodegroup_match.py
+++ b/salt/matchers/nodegroup_match.py
@@ -1,30 +1,31 @@
# -*- coding: utf-8 -*-
-'''
+"""
This is the default nodegroup matcher.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
-import salt.utils.minions # pylint: disable=3rd-party-module-not-gated
-import salt.loader
import logging
+import salt.loader
+import salt.utils.minions # pylint: disable=3rd-party-module-not-gated
+
log = logging.getLogger(__name__)
def match(tgt, nodegroups=None, opts=None):
- '''
+ """
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
- '''
+ """
if not opts:
opts = __opts__
if not nodegroups:
- log.debug('Nodegroup matcher called with no nodegroups.')
+ log.debug("Nodegroup matcher called with no nodegroups.")
return False
if tgt in nodegroups:
matchers = salt.loader.matchers(opts)
- return matchers['compound_match.match'](
+ return matchers["compound_match.match"](
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
diff --git a/salt/matchers/pcre_match.py b/salt/matchers/pcre_match.py
index 4133128d632..c94bcc607d8 100644
--- a/salt/matchers/pcre_match.py
+++ b/salt/matchers/pcre_match.py
@@ -1,17 +1,17 @@
# -*- coding: utf-8 -*-
-'''
+"""
This is the default pcre matcher.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
import re
def match(tgt, opts=None):
- '''
+ """
Returns true if the passed pcre regex matches
- '''
+ """
if not opts:
- return bool(re.match(tgt, __opts__['id']))
+ return bool(re.match(tgt, __opts__["id"]))
else:
- return bool(re.match(tgt, opts['id']))
+ return bool(re.match(tgt, opts["id"]))
diff --git a/salt/matchers/pillar_exact_match.py b/salt/matchers/pillar_exact_match.py
index 564f5c04ffe..ffb4c55f464 100644
--- a/salt/matchers/pillar_exact_match.py
+++ b/salt/matchers/pillar_exact_match.py
@@ -1,32 +1,34 @@
# -*- coding: utf-8 -*-
-'''
+"""
This is the default pillar exact matcher.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
+
import salt.utils.data # pylint: disable=3rd-party-module-not-gated
log = logging.getLogger(__name__)
-def match(tgt, delimiter=':', opts=None):
- '''
+def match(tgt, delimiter=":", opts=None):
+ """
Reads in the pillar match, no globbing, no PCRE
- '''
+ """
if not opts:
opts = __opts__
- log.debug('pillar target: %s', tgt)
+ log.debug("pillar target: %s", tgt)
if delimiter not in tgt:
- log.error('Got insufficient arguments for pillar match '
- 'statement from master')
+ log.error(
+ "Got insufficient arguments for pillar match " "statement from master"
+ )
return False
- if 'pillar' in opts:
- pillar = opts['pillar']
- elif 'ext_pillar' in opts:
- log.info('No pillar found, fallback to ext_pillar')
- pillar = opts['ext_pillar']
+ if "pillar" in opts:
+ pillar = opts["pillar"]
+ elif "ext_pillar" in opts:
+ log.info("No pillar found, fallback to ext_pillar")
+ pillar = opts["ext_pillar"]
return salt.utils.data.subdict_match(
pillar, tgt, delimiter=delimiter, exact_match=True
diff --git a/salt/matchers/pillar_match.py b/salt/matchers/pillar_match.py
index 32d681130fe..c69f49ce6a0 100644
--- a/salt/matchers/pillar_match.py
+++ b/salt/matchers/pillar_match.py
@@ -1,34 +1,36 @@
# -*- coding: utf-8 -*-
-'''
+"""
This is the default pillar matcher function.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
-from salt.defaults import DEFAULT_TARGET_DELIM # pylint: disable=3rd-party-module-not-gated
+
import salt.utils.data # pylint: disable=3rd-party-module-not-gated
+from salt.defaults import ( # pylint: disable=3rd-party-module-not-gated
+ DEFAULT_TARGET_DELIM,
+)
log = logging.getLogger(__name__)
def match(tgt, delimiter=DEFAULT_TARGET_DELIM, opts=None):
- '''
+ """
Reads in the pillar glob match
- '''
+ """
if not opts:
opts = __opts__
- log.debug('pillar target: %s', tgt)
+ log.debug("pillar target: %s", tgt)
if delimiter not in tgt:
- log.error('Got insufficient arguments for pillar match '
- 'statement from master')
+ log.error(
+ "Got insufficient arguments for pillar match " "statement from master"
+ )
return False
- if 'pillar' in opts:
- pillar = opts['pillar']
- elif 'ext_pillar' in opts:
- log.info('No pillar found, fallback to ext_pillar')
- pillar = opts['ext_pillar']
+ if "pillar" in opts:
+ pillar = opts["pillar"]
+ elif "ext_pillar" in opts:
+ log.info("No pillar found, fallback to ext_pillar")
+ pillar = opts["ext_pillar"]
- return salt.utils.data.subdict_match(
- pillar, tgt, delimiter=delimiter
- )
+ return salt.utils.data.subdict_match(pillar, tgt, delimiter=delimiter)
diff --git a/salt/matchers/pillar_pcre_match.py b/salt/matchers/pillar_pcre_match.py
index 3fbf0e6f941..27d9b25aad9 100644
--- a/salt/matchers/pillar_pcre_match.py
+++ b/salt/matchers/pillar_pcre_match.py
@@ -1,33 +1,37 @@
# -*- coding: utf-8 -*-
-'''
+"""
This is the default pillar PCRE matcher.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
-from salt.defaults import DEFAULT_TARGET_DELIM # pylint: disable=3rd-party-module-not-gated
+
import salt.utils.data # pylint: disable=3rd-party-module-not-gated
+from salt.defaults import ( # pylint: disable=3rd-party-module-not-gated
+ DEFAULT_TARGET_DELIM,
+)
log = logging.getLogger(__name__)
def match(tgt, delimiter=DEFAULT_TARGET_DELIM, opts=None):
- '''
+ """
Reads in the pillar pcre match
- '''
+ """
if not opts:
opts = __opts__
- log.debug('pillar PCRE target: %s', tgt)
+ log.debug("pillar PCRE target: %s", tgt)
if delimiter not in tgt:
- log.error('Got insufficient arguments for pillar PCRE match '
- 'statement from master')
+ log.error(
+ "Got insufficient arguments for pillar PCRE match " "statement from master"
+ )
return False
- if 'pillar' in opts:
- pillar = opts['pillar']
- elif 'ext_pillar' in opts:
- log.info('No pillar found, fallback to ext_pillar')
- pillar = opts['ext_pillar']
+ if "pillar" in opts:
+ pillar = opts["pillar"]
+ elif "ext_pillar" in opts:
+ log.info("No pillar found, fallback to ext_pillar")
+ pillar = opts["ext_pillar"]
return salt.utils.data.subdict_match(
pillar, tgt, delimiter=delimiter, regex_match=True
diff --git a/salt/matchers/range_match.py b/salt/matchers/range_match.py
index d37d9b790a1..8ec1f443607 100644
--- a/salt/matchers/range_match.py
+++ b/salt/matchers/range_match.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
-'''
+"""
This is the default range matcher.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
@@ -9,6 +9,7 @@ import logging
HAS_RANGE = False
try:
import seco.range
+
HAS_RANGE = True
except ImportError:
pass
@@ -17,16 +18,16 @@ log = logging.getLogger(__name__)
def match(tgt, opts=None):
- '''
+ """
Matches based on range cluster
- '''
+ """
if not opts:
opts = __opts__
if HAS_RANGE:
- range_ = seco.range.Range(opts['range_server'])
+ range_ = seco.range.Range(opts["range_server"])
try:
- return opts['grains']['fqdn'] in range_.expand(tgt)
+ return opts["grains"]["fqdn"] in range_.expand(tgt)
except seco.range.RangeException as exc:
- log.debug('Range exception in compound match: %s', exc)
+ log.debug("Range exception in compound match: %s", exc)
return False
return False
diff --git a/salt/metaproxy/__init__.py b/salt/metaproxy/__init__.py
index 2ada1ab598b..70e6568dd29 100644
--- a/salt/metaproxy/__init__.py
+++ b/salt/metaproxy/__init__.py
@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
-'''
+"""
Metaproxy Directory
-'''
+"""
diff --git a/salt/metaproxy/proxy.py b/salt/metaproxy/proxy.py
index cc59d477c9f..07e0ff033ba 100644
--- a/salt/metaproxy/proxy.py
+++ b/salt/metaproxy/proxy.py
@@ -2,29 +2,38 @@
#
# Proxy minion metaproxy modules
#
-from __future__ import absolute_import, print_function, with_statement, unicode_literals
+from __future__ import absolute_import, print_function, unicode_literals, with_statement
+
+import logging
import os
import signal
import sys
-import types
-import logging
import threading
import traceback
+import types
# Import Salt Libs
# pylint: disable=3rd-party-module-not-gated
import salt
+import salt.beacons
+import salt.cli.daemons
import salt.client
import salt.crypt
-import salt.loader
-import salt.beacons
+import salt.defaults.exitcodes
import salt.engines
+import salt.ext.tornado.gen # pylint: disable=F0401
+import salt.ext.tornado.ioloop # pylint: disable=F0401
+import salt.loader
+import salt.log.setup
+import salt.minion
import salt.payload
import salt.pillar
+import salt.serializers.msgpack
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
+import salt.utils.dictupdate
import salt.utils.error
import salt.utils.event
import salt.utils.files
@@ -38,15 +47,7 @@ import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.user
import salt.utils.zeromq
-import salt.defaults.exitcodes
-import salt.cli.daemons
-import salt.log.setup
-import salt.serializers.msgpack
-import salt.minion
-import salt.defaults.exitcodes
-
-import salt.utils.dictupdate
-from salt.utils.event import tagify
+from salt.defaults import DEFAULT_TARGET_DELIM
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
@@ -56,14 +57,8 @@ from salt.exceptions import (
from salt.ext import six
from salt.ext.six.moves import range
from salt.minion import ProxyMinion
-
-from salt.defaults import DEFAULT_TARGET_DELIM
-from salt.utils.process import (default_signals,
- SignalHandlingProcess)
-
-
-import salt.ext.tornado.gen # pylint: disable=F0401
-import salt.ext.tornado.ioloop # pylint: disable=F0401
+from salt.utils.event import tagify
+from salt.utils.process import SignalHandlingProcess, default_signals
log = logging.getLogger(__name__)
@@ -71,55 +66,69 @@ log = logging.getLogger(__name__)
def post_master_init(self, master):
log.debug("subclassed LazyLoaded _post_master_init")
if self.connected:
- self.opts['master'] = master
+ self.opts["master"] = master
- self.opts['pillar'] = yield salt.pillar.get_async_pillar(
+ self.opts["pillar"] = yield salt.pillar.get_async_pillar(
self.opts,
- self.opts['grains'],
- self.opts['id'],
- saltenv=self.opts['saltenv'],
- pillarenv=self.opts.get('pillarenv'),
+ self.opts["grains"],
+ self.opts["id"],
+ saltenv=self.opts["saltenv"],
+ pillarenv=self.opts.get("pillarenv"),
).compile_pillar()
- if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
- errmsg = 'No proxy key found in pillar or opts for id ' + self.opts['id'] + '. ' + \
- 'Check your pillar/opts configuration and contents. Salt-proxy aborted.'
+ if "proxy" not in self.opts["pillar"] and "proxy" not in self.opts:
+ errmsg = (
+ "No proxy key found in pillar or opts for id "
+ + self.opts["id"]
+ + ". "
+ + "Check your pillar/opts configuration and contents. Salt-proxy aborted."
+ )
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
- if 'proxy' not in self.opts:
- self.opts['proxy'] = self.opts['pillar']['proxy']
+ if "proxy" not in self.opts:
+ self.opts["proxy"] = self.opts["pillar"]["proxy"]
- if self.opts.get('proxy_merge_pillar_in_opts'):
+ if self.opts.get("proxy_merge_pillar_in_opts"):
# Override proxy opts with pillar data when the user required.
- self.opts = salt.utils.dictupdate.merge(self.opts,
- self.opts['pillar'],
- strategy=self.opts.get('proxy_merge_pillar_in_opts_strategy'),
- merge_lists=self.opts.get('proxy_deep_merge_pillar_in_opts', False))
- elif self.opts.get('proxy_mines_pillar'):
+ self.opts = salt.utils.dictupdate.merge(
+ self.opts,
+ self.opts["pillar"],
+ strategy=self.opts.get("proxy_merge_pillar_in_opts_strategy"),
+ merge_lists=self.opts.get("proxy_deep_merge_pillar_in_opts", False),
+ )
+ elif self.opts.get("proxy_mines_pillar"):
# Even when not required, some details such as mine configuration
# should be merged anyway whenever possible.
- if 'mine_interval' in self.opts['pillar']:
- self.opts['mine_interval'] = self.opts['pillar']['mine_interval']
- if 'mine_functions' in self.opts['pillar']:
- general_proxy_mines = self.opts.get('mine_functions', [])
- specific_proxy_mines = self.opts['pillar']['mine_functions']
+ if "mine_interval" in self.opts["pillar"]:
+ self.opts["mine_interval"] = self.opts["pillar"]["mine_interval"]
+ if "mine_functions" in self.opts["pillar"]:
+ general_proxy_mines = self.opts.get("mine_functions", [])
+ specific_proxy_mines = self.opts["pillar"]["mine_functions"]
try:
- self.opts['mine_functions'] = general_proxy_mines + specific_proxy_mines
+ self.opts["mine_functions"] = general_proxy_mines + specific_proxy_mines
except TypeError as terr:
- log.error('Unable to merge mine functions from the pillar in the opts, for proxy {}'.format(
- self.opts['id']))
+ log.error(
+ "Unable to merge mine functions from the pillar in the opts, for proxy {}".format(
+ self.opts["id"]
+ )
+ )
- fq_proxyname = self.opts['proxy']['proxytype']
+ fq_proxyname = self.opts["proxy"]["proxytype"]
# Need to load the modules so they get all the dunder variables
- self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
+ (
+ self.functions,
+ self.returners,
+ self.function_errors,
+ self.executors,
+ ) = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
- self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
+ self.functions["saltutil.sync_all"](saltenv=self.opts["saltenv"])
# Pull in the utils
self.utils = salt.loader.utils(self.opts)
@@ -128,15 +137,20 @@ def post_master_init(self, master):
self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
# And re-load the modules so the __proxy__ variable gets injected
- self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
- self.functions.pack['__proxy__'] = self.proxy
- self.proxy.pack['__salt__'] = self.functions
- self.proxy.pack['__ret__'] = self.returners
- self.proxy.pack['__pillar__'] = self.opts['pillar']
+ (
+ self.functions,
+ self.returners,
+ self.function_errors,
+ self.executors,
+ ) = self._load_modules()
+ self.functions.pack["__proxy__"] = self.proxy
+ self.proxy.pack["__salt__"] = self.functions
+ self.proxy.pack["__ret__"] = self.returners
+ self.proxy.pack["__pillar__"] = self.opts["pillar"]
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
- self.proxy.pack['__utils__'] = self.utils
+ self.proxy.pack["__utils__"] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
@@ -144,124 +158,156 @@ def post_master_init(self, master):
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
- self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
- self.process_manager, proxy=self.proxy)
+ self.io_loop.spawn_callback(
+ salt.engines.start_engines, self.opts, self.process_manager, proxy=self.proxy
+ )
- if ('{0}.init'.format(fq_proxyname) not in self.proxy
- or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
- errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
- 'Check your proxymodule. Salt-proxy aborted.'
+ if (
+ "{0}.init".format(fq_proxyname) not in self.proxy
+ or "{0}.shutdown".format(fq_proxyname) not in self.proxy
+ ):
+ errmsg = (
+ "Proxymodule {0} is missing an init() or a shutdown() or both. ".format(
+ fq_proxyname
+ )
+ + "Check your proxymodule. Salt-proxy aborted."
+ )
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
- self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
- proxy_init_fn = self.proxy[fq_proxyname + '.init']
+ self.module_executors = self.proxy.get(
+ "{0}.module_executors".format(fq_proxyname), lambda: []
+ )()
+ proxy_init_fn = self.proxy[fq_proxyname + ".init"]
proxy_init_fn(self.opts)
- self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
+ self.opts["grains"] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matchers = salt.loader.matchers(self.opts)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
- uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
- self.proc_dir = salt.minion.get_proc_dir(self.opts['cachedir'], uid=uid)
+ uid = salt.utils.user.get_uid(user=self.opts.get("user", None))
+ self.proc_dir = salt.minion.get_proc_dir(self.opts["cachedir"], uid=uid)
- if self.connected and self.opts['pillar']:
+ if self.connected and self.opts["pillar"]:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
- self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
- if hasattr(self, 'schedule'):
+ (
+ self.functions,
+ self.returners,
+ self.function_errors,
+ self.executors,
+ ) = self._load_modules()
+ if hasattr(self, "schedule"):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
- if not hasattr(self, 'schedule'):
+ if not hasattr(self, "schedule"):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
- cleanup=[salt.minion.master_event(type='alive')],
- proxy=self.proxy)
+ cleanup=[salt.minion.master_event(type="alive")],
+ proxy=self.proxy,
+ )
# add default scheduling jobs to the minions scheduler
- if self.opts['mine_enabled'] and 'mine.update' in self.functions:
- self.schedule.add_job({
- '__mine_interval':
- {
- 'function': 'mine.update',
- 'minutes': self.opts['mine_interval'],
- 'jid_include': True,
- 'maxrunning': 2,
- 'run_on_start': True,
- 'return_job': self.opts.get('mine_return_job', False)
+ if self.opts["mine_enabled"] and "mine.update" in self.functions:
+ self.schedule.add_job(
+ {
+ "__mine_interval": {
+ "function": "mine.update",
+ "minutes": self.opts["mine_interval"],
+ "jid_include": True,
+ "maxrunning": 2,
+ "run_on_start": True,
+ "return_job": self.opts.get("mine_return_job", False),
}
- }, persist=True)
- log.info('Added mine.update to scheduler')
+ },
+ persist=True,
+ )
+ log.info("Added mine.update to scheduler")
else:
- self.schedule.delete_job('__mine_interval', persist=True)
+ self.schedule.delete_job("__mine_interval", persist=True)
# add master_alive job if enabled
- if (self.opts['transport'] != 'tcp' and
- self.opts['master_alive_interval'] > 0):
- self.schedule.add_job({
- salt.minion.master_event(type='alive', master=self.opts['master']):
- {
- 'function': 'status.master',
- 'seconds': self.opts['master_alive_interval'],
- 'jid_include': True,
- 'maxrunning': 1,
- 'return_job': False,
- 'kwargs': {'master': self.opts['master'],
- 'connected': True}
+ if self.opts["transport"] != "tcp" and self.opts["master_alive_interval"] > 0:
+ self.schedule.add_job(
+ {
+ salt.minion.master_event(type="alive", master=self.opts["master"]): {
+ "function": "status.master",
+ "seconds": self.opts["master_alive_interval"],
+ "jid_include": True,
+ "maxrunning": 1,
+ "return_job": False,
+ "kwargs": {"master": self.opts["master"], "connected": True},
}
- }, persist=True)
- if self.opts['master_failback'] and \
- 'master_list' in self.opts and \
- self.opts['master'] != self.opts['master_list'][0]:
- self.schedule.add_job({
- salt.minion.master_event(type='failback'):
- {
- 'function': 'status.ping_master',
- 'seconds': self.opts['master_failback_interval'],
- 'jid_include': True,
- 'maxrunning': 1,
- 'return_job': False,
- 'kwargs': {'master': self.opts['master_list'][0]}
+ },
+ persist=True,
+ )
+ if (
+ self.opts["master_failback"]
+ and "master_list" in self.opts
+ and self.opts["master"] != self.opts["master_list"][0]
+ ):
+ self.schedule.add_job(
+ {
+ salt.minion.master_event(type="failback"): {
+ "function": "status.ping_master",
+ "seconds": self.opts["master_failback_interval"],
+ "jid_include": True,
+ "maxrunning": 1,
+ "return_job": False,
+ "kwargs": {"master": self.opts["master_list"][0]},
}
- }, persist=True)
+ },
+ persist=True,
+ )
else:
- self.schedule.delete_job(salt.minion.master_event(type='failback'), persist=True)
+ self.schedule.delete_job(
+ salt.minion.master_event(type="failback"), persist=True
+ )
else:
- self.schedule.delete_job(salt.minion.master_event(type='alive', master=self.opts['master']), persist=True)
- self.schedule.delete_job(salt.minion.master_event(type='failback'), persist=True)
+ self.schedule.delete_job(
+ salt.minion.master_event(type="alive", master=self.opts["master"]),
+ persist=True,
+ )
+ self.schedule.delete_job(
+ salt.minion.master_event(type="failback"), persist=True
+ )
# proxy keepalive
- proxy_alive_fn = fq_proxyname+'.alive'
- if (proxy_alive_fn in self.proxy
- and 'status.proxy_reconnect' in self.functions
- and self.opts.get('proxy_keep_alive', True)):
+ proxy_alive_fn = fq_proxyname + ".alive"
+ if (
+ proxy_alive_fn in self.proxy
+ and "status.proxy_reconnect" in self.functions
+ and self.opts.get("proxy_keep_alive", True)
+ ):
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
- self.schedule.add_job({
- '__proxy_keepalive':
- {
- 'function': 'status.proxy_reconnect',
- 'minutes': self.opts.get('proxy_keep_alive_interval', 1), # by default, check once per minute
- 'jid_include': True,
- 'maxrunning': 1,
- 'return_job': False,
- 'kwargs': {
- 'proxy_name': fq_proxyname
- }
+ self.schedule.add_job(
+ {
+ "__proxy_keepalive": {
+ "function": "status.proxy_reconnect",
+ "minutes": self.opts.get(
+ "proxy_keep_alive_interval", 1
+ ), # by default, check once per minute
+ "jid_include": True,
+ "maxrunning": 1,
+ "return_job": False,
+ "kwargs": {"proxy_name": fq_proxyname},
}
- }, persist=True)
+ },
+ persist=True,
+ )
self.schedule.enable_schedule()
else:
- self.schedule.delete_job('__proxy_keepalive', persist=True)
+ self.schedule.delete_job("__proxy_keepalive", persist=True)
# Sync the grains here so the proxy can communicate them to the master
- self.functions['saltutil.sync_grains'](saltenv='base')
- self.grains_cache = self.opts['grains']
+ self.functions["saltutil.sync_grains"](saltenv="base")
+ self.grains_cache = self.opts["grains"]
self.ready = True
@@ -270,11 +316,14 @@ def target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
- if not hasattr(minion_instance, 'functions'):
+ if not hasattr(minion_instance, "functions"):
# Need to load the modules so they get all the dunder variables
- functions, returners, function_errors, executors = (
- minion_instance._load_modules(grains=opts['grains'])
- )
+ (
+ functions,
+ returners,
+ function_errors,
+ executors,
+ ) = minion_instance._load_modules(grains=opts["grains"])
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
@@ -284,117 +333,152 @@ def target(cls, minion_instance, opts, data, connected):
minion_instance.utils = salt.loader.utils(minion_instance.opts)
# Then load the proxy module
- minion_instance.proxy = salt.loader.proxy(minion_instance.opts, utils=minion_instance.utils)
+ minion_instance.proxy = salt.loader.proxy(
+ minion_instance.opts, utils=minion_instance.utils
+ )
# And re-load the modules so the __proxy__ variable gets injected
- functions, returners, function_errors, executors = (
- minion_instance._load_modules(grains=opts['grains'])
- )
+ (
+ functions,
+ returners,
+ function_errors,
+ executors,
+ ) = minion_instance._load_modules(grains=opts["grains"])
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
- minion_instance.functions.pack['__proxy__'] = minion_instance.proxy
- minion_instance.proxy.pack['__salt__'] = minion_instance.functions
- minion_instance.proxy.pack['__ret__'] = minion_instance.returners
- minion_instance.proxy.pack['__pillar__'] = minion_instance.opts['pillar']
+ minion_instance.functions.pack["__proxy__"] = minion_instance.proxy
+ minion_instance.proxy.pack["__salt__"] = minion_instance.functions
+ minion_instance.proxy.pack["__ret__"] = minion_instance.returners
+ minion_instance.proxy.pack["__pillar__"] = minion_instance.opts["pillar"]
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
- minion_instance.utils = salt.loader.utils(minion_instance.opts, proxy=minion_instance.proxy)
- minion_instance.proxy.pack['__utils__'] = minion_instance.utils
+ minion_instance.utils = salt.loader.utils(
+ minion_instance.opts, proxy=minion_instance.proxy
+ )
+ minion_instance.proxy.pack["__utils__"] = minion_instance.utils
# Reload all modules so all dunder variables are injected
minion_instance.proxy.reload_modules()
- fq_proxyname = opts['proxy']['proxytype']
+ fq_proxyname = opts["proxy"]["proxytype"]
- minion_instance.module_executors = minion_instance.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
+ minion_instance.module_executors = minion_instance.proxy.get(
+ "{0}.module_executors".format(fq_proxyname), lambda: []
+ )()
- proxy_init_fn = minion_instance.proxy[fq_proxyname + '.init']
+ proxy_init_fn = minion_instance.proxy[fq_proxyname + ".init"]
proxy_init_fn(opts)
- if not hasattr(minion_instance, 'serial'):
+ if not hasattr(minion_instance, "serial"):
minion_instance.serial = salt.payload.Serial(opts)
- if not hasattr(minion_instance, 'proc_dir'):
- uid = salt.utils.user.get_uid(user=opts.get('user', None))
- minion_instance.proc_dir = (
- salt.minion.get_proc_dir(opts['cachedir'], uid=uid)
+ if not hasattr(minion_instance, "proc_dir"):
+ uid = salt.utils.user.get_uid(user=opts.get("user", None))
+ minion_instance.proc_dir = salt.minion.get_proc_dir(
+ opts["cachedir"], uid=uid
)
with salt.ext.tornado.stack_context.StackContext(minion_instance.ctx):
- if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
+ if isinstance(data["fun"], tuple) or isinstance(data["fun"], list):
ProxyMinion._thread_multi_return(minion_instance, opts, data)
else:
ProxyMinion._thread_return(minion_instance, opts, data)
def thread_return(cls, minion_instance, opts, data):
- '''
+ """
This method should be used as a threading target, start the actual
minion side execution.
- '''
- fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
+ """
+ fn_ = os.path.join(minion_instance.proc_dir, data["jid"])
- salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
+ salt.utils.process.appendproctitle(
+ "{0}._thread_return {1}".format(cls.__name__, data["jid"])
+ )
- sdata = {'pid': os.getpid()}
+ sdata = {"pid": os.getpid()}
sdata.update(data)
- log.info('Starting a new job with PID %s', sdata['pid'])
- with salt.utils.files.fopen(fn_, 'w+b') as fp_:
+ log.info("Starting a new job with PID %s", sdata["pid"])
+ with salt.utils.files.fopen(fn_, "w+b") as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
- ret = {'success': False}
- function_name = data['fun']
- executors = data.get('module_executors') or \
- getattr(minion_instance, 'module_executors', []) or \
- opts.get('module_executors', ['direct_call'])
- allow_missing_funcs = any([
- minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name)
- for executor in executors
- if '{0}.allow_missing_func'.format(executor) in minion_instance.executors
- ])
+ ret = {"success": False}
+ function_name = data["fun"]
+ executors = (
+ data.get("module_executors")
+ or getattr(minion_instance, "module_executors", [])
+ or opts.get("module_executors", ["direct_call"])
+ )
+ allow_missing_funcs = any(
+ [
+ minion_instance.executors["{0}.allow_missing_func".format(executor)](
+ function_name
+ )
+ for executor in executors
+ if "{0}.allow_missing_func".format(executor) in minion_instance.executors
+ ]
+ )
if function_name in minion_instance.functions or allow_missing_funcs is True:
try:
minion_blackout_violation = False
- if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
- whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
+ if minion_instance.connected and minion_instance.opts["pillar"].get(
+ "minion_blackout", False
+ ):
+ whitelist = minion_instance.opts["pillar"].get(
+ "minion_blackout_whitelist", []
+ )
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
- if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
+ if (
+ function_name != "saltutil.refresh_pillar"
+ and function_name not in whitelist
+ ):
minion_blackout_violation = True
# use minion_blackout_whitelist from grains if it exists
- if minion_instance.opts['grains'].get('minion_blackout', False):
- whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
- if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
+ if minion_instance.opts["grains"].get("minion_blackout", False):
+ whitelist = minion_instance.opts["grains"].get(
+ "minion_blackout_whitelist", []
+ )
+ if (
+ function_name != "saltutil.refresh_pillar"
+ and function_name not in whitelist
+ ):
minion_blackout_violation = True
if minion_blackout_violation:
- raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
- 'to False in pillar or grains to resume operations. Only '
- 'saltutil.refresh_pillar allowed in blackout mode.')
+ raise SaltInvocationError(
+ "Minion in blackout mode. Set 'minion_blackout' "
+ "to False in pillar or grains to resume operations. Only "
+ "saltutil.refresh_pillar allowed in blackout mode."
+ )
if function_name in minion_instance.functions:
func = minion_instance.functions[function_name]
- args, kwargs = salt.minion.load_args_and_kwargs(
- func,
- data['arg'],
- data)
+ args, kwargs = salt.minion.load_args_and_kwargs(func, data["arg"], data)
else:
# only run if function_name is not in minion_instance.functions and allow_missing_funcs is True
func = function_name
- args, kwargs = data['arg'], data
- minion_instance.functions.pack['__context__']['retcode'] = 0
+ args, kwargs = data["arg"], data
+ minion_instance.functions.pack["__context__"]["retcode"] = 0
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
- raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
- format(executors))
- if opts.get('sudo_user', '') and executors[-1] != 'sudo':
- executors[-1] = 'sudo' # replace the last one with sudo
- log.trace('Executors list %s', executors) # pylint: disable=no-member
+ raise SaltInvocationError(
+ "Wrong executors specification: {0}. String or non-empty list expected".format(
+ executors
+ )
+ )
+ if opts.get("sudo_user", "") and executors[-1] != "sudo":
+ executors[-1] = "sudo" # replace the last one with sudo
+ log.trace("Executors list %s", executors) # pylint: disable=no-member
for name in executors:
- fname = '{0}.execute'.format(name)
+ fname = "{0}.execute".format(name)
if fname not in minion_instance.executors:
- raise SaltInvocationError("Executor '{0}' is not available".format(name))
- return_data = minion_instance.executors[fname](opts, data, func, args, kwargs)
+ raise SaltInvocationError(
+ "Executor '{0}' is not available".format(name)
+ )
+ return_data = minion_instance.executors[fname](
+ opts, data, func, args, kwargs
+ )
if return_data is not None:
break
@@ -408,261 +492,264 @@ def thread_return(cls, minion_instance, opts, data):
if not iret:
iret = []
iret.append(single)
- tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job')
- event_data = {'return': single}
+ tag = tagify(
+ [data["jid"], "prog", opts["id"], six.text_type(ind)], "job"
+ )
+ event_data = {"return": single}
minion_instance._fire_master(event_data, tag)
ind += 1
- ret['return'] = iret
+ ret["return"] = iret
else:
- ret['return'] = return_data
+ ret["return"] = return_data
- retcode = minion_instance.functions.pack['__context__'].get(
- 'retcode',
- salt.defaults.exitcodes.EX_OK
+ retcode = minion_instance.functions.pack["__context__"].get(
+ "retcode", salt.defaults.exitcodes.EX_OK
)
if retcode == salt.defaults.exitcodes.EX_OK:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
- func_result = all(return_data.get(x, True)
- for x in ('result', 'success'))
+ func_result = all(
+ return_data.get(x, True) for x in ("result", "success")
+ )
except Exception: # pylint: disable=broad-except
# return data is not a dict
func_result = True
if not func_result:
retcode = salt.defaults.exitcodes.EX_GENERIC
- ret['retcode'] = retcode
- ret['success'] = retcode == salt.defaults.exitcodes.EX_OK
+ ret["retcode"] = retcode
+ ret["success"] = retcode == salt.defaults.exitcodes.EX_OK
except CommandNotFoundError as exc:
- msg = 'Command required for \'{0}\' not found'.format(
- function_name
- )
+ msg = "Command required for '{0}' not found".format(function_name)
log.debug(msg, exc_info=True)
- ret['return'] = '{0}: {1}'.format(msg, exc)
- ret['out'] = 'nested'
- ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
+ ret["return"] = "{0}: {1}".format(msg, exc)
+ ret["out"] = "nested"
+ ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
except CommandExecutionError as exc:
log.error(
- 'A command in \'%s\' had a problem: %s',
- function_name, exc,
- exc_info_on_loglevel=logging.DEBUG
+ "A command in '%s' had a problem: %s",
+ function_name,
+ exc,
+ exc_info_on_loglevel=logging.DEBUG,
)
- ret['return'] = 'ERROR: {0}'.format(exc)
- ret['out'] = 'nested'
- ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
+ ret["return"] = "ERROR: {0}".format(exc)
+ ret["out"] = "nested"
+ ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
except SaltInvocationError as exc:
log.error(
- 'Problem executing \'%s\': %s',
- function_name, exc,
- exc_info_on_loglevel=logging.DEBUG
+ "Problem executing '%s': %s",
+ function_name,
+ exc,
+ exc_info_on_loglevel=logging.DEBUG,
)
- ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
- function_name, exc
- )
- ret['out'] = 'nested'
- ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
+ ret["return"] = "ERROR executing '{0}': {1}".format(function_name, exc)
+ ret["out"] = "nested"
+ ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
except TypeError as exc:
- msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(
- function_name, exc, func.__doc__ or ''
+ msg = "Passed invalid arguments to {0}: {1}\n{2}".format(
+ function_name, exc, func.__doc__ or ""
)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
- ret['return'] = msg
- ret['out'] = 'nested'
- ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
+ ret["return"] = msg
+ ret["out"] = "nested"
+ ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
except Exception: # pylint: disable=broad-except
- msg = 'The minion function caused an exception'
+ msg = "The minion function caused an exception"
log.warning(msg, exc_info_on_loglevel=True)
- salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
- ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
- ret['out'] = 'nested'
- ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
+ salt.utils.error.fire_exception(
+ salt.exceptions.MinionError(msg), opts, job=data
+ )
+ ret["return"] = "{0}: {1}".format(msg, traceback.format_exc())
+ ret["out"] = "nested"
+ ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
else:
- docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name))
+ docs = minion_instance.functions["sys.doc"]("{0}*".format(function_name))
if docs:
- docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
- ret['return'] = docs
+ docs[function_name] = minion_instance.functions.missing_fun_string(
+ function_name
+ )
+ ret["return"] = docs
else:
- ret['return'] = minion_instance.functions.missing_fun_string(function_name)
- mod_name = function_name.split('.')[0]
+ ret["return"] = minion_instance.functions.missing_fun_string(function_name)
+ mod_name = function_name.split(".")[0]
if mod_name in minion_instance.function_errors:
- ret['return'] += ' Possible reasons: \'{0}\''.format(
+ ret["return"] += " Possible reasons: '{0}'".format(
minion_instance.function_errors[mod_name]
)
- ret['success'] = False
- ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
- ret['out'] = 'nested'
+ ret["success"] = False
+ ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
+ ret["out"] = "nested"
- ret['jid'] = data['jid']
- ret['fun'] = data['fun']
- ret['fun_args'] = data['arg']
- if 'master_id' in data:
- ret['master_id'] = data['master_id']
- if 'metadata' in data:
- if isinstance(data['metadata'], dict):
- ret['metadata'] = data['metadata']
+ ret["jid"] = data["jid"]
+ ret["fun"] = data["fun"]
+ ret["fun_args"] = data["arg"]
+ if "master_id" in data:
+ ret["master_id"] = data["master_id"]
+ if "metadata" in data:
+ if isinstance(data["metadata"], dict):
+ ret["metadata"] = data["metadata"]
else:
- log.warning('The metadata parameter must be a dictionary. Ignoring.')
+ log.warning("The metadata parameter must be a dictionary. Ignoring.")
if minion_instance.connected:
- minion_instance._return_pub(
- ret,
- timeout=minion_instance._return_retry_timer()
- )
+ minion_instance._return_pub(ret, timeout=minion_instance._return_retry_timer())
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
- if isinstance(opts.get('return'), six.string_types):
- if data['ret']:
- data['ret'] = ','.join((data['ret'], opts['return']))
+ if isinstance(opts.get("return"), six.string_types):
+ if data["ret"]:
+ data["ret"] = ",".join((data["ret"], opts["return"]))
else:
- data['ret'] = opts['return']
+ data["ret"] = opts["return"]
- log.debug('minion return: %s', ret)
+ log.debug("minion return: %s", ret)
# TODO: make a list? Seems odd to split it this late :/
- if data['ret'] and isinstance(data['ret'], six.string_types):
- if 'ret_config' in data:
- ret['ret_config'] = data['ret_config']
- if 'ret_kwargs' in data:
- ret['ret_kwargs'] = data['ret_kwargs']
- ret['id'] = opts['id']
- for returner in set(data['ret'].split(',')):
+ if data["ret"] and isinstance(data["ret"], six.string_types):
+ if "ret_config" in data:
+ ret["ret_config"] = data["ret_config"]
+ if "ret_kwargs" in data:
+ ret["ret_kwargs"] = data["ret_kwargs"]
+ ret["id"] = opts["id"]
+ for returner in set(data["ret"].split(",")):
try:
- returner_str = '{0}.returner'.format(returner)
+ returner_str = "{0}.returner".format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
- returner_err = minion_instance.returners.missing_fun_string(returner_str)
+ returner_err = minion_instance.returners.missing_fun_string(
+ returner_str
+ )
log.error(
- 'Returner %s could not be loaded: %s',
- returner_str, returner_err
+ "Returner %s could not be loaded: %s",
+ returner_str,
+ returner_err,
)
except Exception as exc: # pylint: disable=broad-except
- log.exception(
- 'The return failed for job %s: %s', data['jid'], exc
- )
+ log.exception("The return failed for job %s: %s", data["jid"], exc)
def thread_multi_return(cls, minion_instance, opts, data):
- '''
+ """
This method should be used as a threading target, start the actual
minion side execution.
- '''
- fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
+ """
+ fn_ = os.path.join(minion_instance.proc_dir, data["jid"])
- salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
+ salt.utils.process.appendproctitle(
+ "{0}._thread_multi_return {1}".format(cls.__name__, data["jid"])
+ )
- sdata = {'pid': os.getpid()}
+ sdata = {"pid": os.getpid()}
sdata.update(data)
- log.info('Starting a new job with PID %s', sdata['pid'])
- with salt.utils.files.fopen(fn_, 'w+b') as fp_:
+ log.info("Starting a new job with PID %s", sdata["pid"])
+ with salt.utils.files.fopen(fn_, "w+b") as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
- multifunc_ordered = opts.get('multifunc_ordered', False)
- num_funcs = len(data['fun'])
+ multifunc_ordered = opts.get("multifunc_ordered", False)
+ num_funcs = len(data["fun"])
if multifunc_ordered:
ret = {
- 'return': [None] * num_funcs,
- 'retcode': [None] * num_funcs,
- 'success': [False] * num_funcs
+ "return": [None] * num_funcs,
+ "retcode": [None] * num_funcs,
+ "success": [False] * num_funcs,
}
else:
- ret = {
- 'return': {},
- 'retcode': {},
- 'success': {}
- }
+ ret = {"return": {}, "retcode": {}, "success": {}}
for ind in range(0, num_funcs):
if not multifunc_ordered:
- ret['success'][data['fun'][ind]] = False
+ ret["success"][data["fun"][ind]] = False
try:
minion_blackout_violation = False
- if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
- whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
+ if minion_instance.connected and minion_instance.opts["pillar"].get(
+ "minion_blackout", False
+ ):
+ whitelist = minion_instance.opts["pillar"].get(
+ "minion_blackout_whitelist", []
+ )
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
- if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
+ if (
+ data["fun"][ind] != "saltutil.refresh_pillar"
+ and data["fun"][ind] not in whitelist
+ ):
minion_blackout_violation = True
- elif minion_instance.opts['grains'].get('minion_blackout', False):
- whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
- if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
+ elif minion_instance.opts["grains"].get("minion_blackout", False):
+ whitelist = minion_instance.opts["grains"].get(
+ "minion_blackout_whitelist", []
+ )
+ if (
+ data["fun"][ind] != "saltutil.refresh_pillar"
+ and data["fun"][ind] not in whitelist
+ ):
minion_blackout_violation = True
if minion_blackout_violation:
- raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
- 'to False in pillar or grains to resume operations. Only '
- 'saltutil.refresh_pillar allowed in blackout mode.')
+ raise SaltInvocationError(
+ "Minion in blackout mode. Set 'minion_blackout' "
+ "to False in pillar or grains to resume operations. Only "
+ "saltutil.refresh_pillar allowed in blackout mode."
+ )
- func = minion_instance.functions[data['fun'][ind]]
+ func = minion_instance.functions[data["fun"][ind]]
args, kwargs = salt.minion.load_args_and_kwargs(
- func,
- data['arg'][ind],
- data)
- minion_instance.functions.pack['__context__']['retcode'] = 0
- key = ind if multifunc_ordered else data['fun'][ind]
- ret['return'][key] = func(*args, **kwargs)
- retcode = minion_instance.functions.pack['__context__'].get(
- 'retcode',
- 0
+ func, data["arg"][ind], data
)
+ minion_instance.functions.pack["__context__"]["retcode"] = 0
+ key = ind if multifunc_ordered else data["fun"][ind]
+ ret["return"][key] = func(*args, **kwargs)
+ retcode = minion_instance.functions.pack["__context__"].get("retcode", 0)
if retcode == 0:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
- func_result = all(ret['return'][key].get(x, True)
- for x in ('result', 'success'))
+ func_result = all(
+ ret["return"][key].get(x, True) for x in ("result", "success")
+ )
except Exception: # pylint: disable=broad-except
# return data is not a dict
func_result = True
if not func_result:
retcode = 1
- ret['retcode'][key] = retcode
- ret['success'][key] = retcode == 0
+ ret["retcode"][key] = retcode
+ ret["success"][key] = retcode == 0
except Exception as exc: # pylint: disable=broad-except
trb = traceback.format_exc()
- log.warning('The minion function caused an exception: %s', exc)
+ log.warning("The minion function caused an exception: %s", exc)
if multifunc_ordered:
- ret['return'][ind] = trb
+ ret["return"][ind] = trb
else:
- ret['return'][data['fun'][ind]] = trb
- ret['jid'] = data['jid']
- ret['fun'] = data['fun']
- ret['fun_args'] = data['arg']
- if 'metadata' in data:
- ret['metadata'] = data['metadata']
+ ret["return"][data["fun"][ind]] = trb
+ ret["jid"] = data["jid"]
+ ret["fun"] = data["fun"]
+ ret["fun_args"] = data["arg"]
+ if "metadata" in data:
+ ret["metadata"] = data["metadata"]
if minion_instance.connected:
- minion_instance._return_pub(
- ret,
- timeout=minion_instance._return_retry_timer()
- )
- if data['ret']:
- if 'ret_config' in data:
- ret['ret_config'] = data['ret_config']
- if 'ret_kwargs' in data:
- ret['ret_kwargs'] = data['ret_kwargs']
- for returner in set(data['ret'].split(',')):
- ret['id'] = opts['id']
+ minion_instance._return_pub(ret, timeout=minion_instance._return_retry_timer())
+ if data["ret"]:
+ if "ret_config" in data:
+ ret["ret_config"] = data["ret_config"]
+ if "ret_kwargs" in data:
+ ret["ret_kwargs"] = data["ret_kwargs"]
+ for returner in set(data["ret"].split(",")):
+ ret["id"] = opts["id"]
try:
- minion_instance.returners['{0}.returner'.format(
- returner
- )](ret)
+ minion_instance.returners["{0}.returner".format(returner)](ret)
except Exception as exc: # pylint: disable=broad-except
- log.error(
- 'The return failed for job %s: %s',
- data['jid'], exc
- )
+ log.error("The return failed for job %s: %s", data["jid"], exc)
def handle_payload(self, payload):
- if payload is not None and payload['enc'] == 'aes':
- if self._target_load(payload['load']):
+ if payload is not None and payload["enc"] == "aes":
+ if self._target_load(payload["load"]):
- self._handle_decoded_payload(payload['load'])
- elif self.opts['zmq_filtering']:
+ self._handle_decoded_payload(payload["load"])
+ elif self.opts["zmq_filtering"]:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace(
- 'Broadcast message received not for this minion, Load: %s',
- payload['load']
+ "Broadcast message received not for this minion, Load: %s",
+ payload["load"],
)
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
@@ -670,46 +757,54 @@ def handle_payload(self, payload):
def handle_decoded_payload(self, data):
- '''
+ """
Override this method if you wish to handle the decoded data
differently.
- '''
+ """
# Ensure payload is unicode. Disregard failure to decode binary blobs.
if six.PY2:
data = salt.utils.data.decode(data, keep=True)
- if 'user' in data:
+ if "user" in data:
log.info(
- 'User %s Executing command %s with jid %s',
- data['user'], data['fun'], data['jid']
+ "User %s Executing command %s with jid %s",
+ data["user"],
+ data["fun"],
+ data["jid"],
)
else:
- log.info(
- 'Executing command %s with jid %s',
- data['fun'], data['jid']
- )
- log.debug('Command details %s', data)
+ log.info("Executing command %s with jid %s", data["fun"], data["jid"])
+ log.debug("Command details %s", data)
# Don't duplicate jobs
- log.trace('Started JIDs: %s', self.jid_queue)
+ log.trace("Started JIDs: %s", self.jid_queue)
if self.jid_queue is not None:
- if data['jid'] in self.jid_queue:
+ if data["jid"] in self.jid_queue:
return
else:
- self.jid_queue.append(data['jid'])
- if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
+ self.jid_queue.append(data["jid"])
+ if len(self.jid_queue) > self.opts["minion_jid_queue_hwm"]:
self.jid_queue.pop(0)
- if isinstance(data['fun'], six.string_types):
- if data['fun'] == 'sys.reload_modules':
- self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
+ if isinstance(data["fun"], six.string_types):
+ if data["fun"] == "sys.reload_modules":
+ (
+ self.functions,
+ self.returners,
+ self.function_errors,
+ self.executors,
+ ) = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
- process_count_max = self.opts.get('process_count_max')
+ process_count_max = self.opts.get("process_count_max")
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
- log.warning("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
+ log.warning(
+ "Maximum number of processes reached while executing jid {0}, waiting...".format(
+ data["jid"]
+ )
+ )
yield salt.ext.tornado.gen.sleep(10)
process_count = len(salt.utils.minion.running(self.opts))
@@ -718,23 +813,23 @@ def handle_decoded_payload(self, data):
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
- multiprocessing_enabled = self.opts.get('multiprocessing', True)
+ multiprocessing_enabled = self.opts.get("multiprocessing", True)
if multiprocessing_enabled:
- if sys.platform.startswith('win'):
+ if sys.platform.startswith("win"):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingProcess(
target=self._target,
- name='ProcessPayload',
- args=(instance, self.opts, data, self.connected)
+ name="ProcessPayload",
+ args=(instance, self.opts, data, self.connected),
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
- name=data['jid']
+ name=data["jid"],
)
if multiprocessing_enabled:
@@ -744,14 +839,13 @@ def handle_decoded_payload(self, data):
process.start()
else:
process.start()
- process.name = '{}-Job-{}'.format(process.name, data['jid'])
+ process.name = "{}-Job-{}".format(process.name, data["jid"])
self.subprocess_list.add(process)
def target_load(self, load):
# Verify that the publication is valid
- if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
- or 'arg' not in load:
+ if "tgt" not in load or "jid" not in load or "fun" not in load or "arg" not in load:
return False
# Verify that the publication applies to this minion
@@ -760,18 +854,18 @@ def target_load(self, load):
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
- if 'tgt_type' in load:
- match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None)
+ if "tgt_type" in load:
+ match_func = self.matchers.get("{0}_match.match".format(load["tgt_type"]), None)
if match_func is None:
return False
- if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
- delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
- if not match_func(load['tgt'], delimiter=delimiter):
+ if load["tgt_type"] in ("grain", "grain_pcre", "pillar"):
+ delimiter = load.get("delimiter", DEFAULT_TARGET_DELIM)
+ if not match_func(load["tgt"], delimiter=delimiter):
return False
- elif not match_func(load['tgt']):
+ elif not match_func(load["tgt"]):
return False
else:
- if not self.matchers['glob_match.match'](load['tgt']):
+ if not self.matchers["glob_match.match"](load["tgt"]):
return False
return True
diff --git a/salt/minion.py b/salt/minion.py
index 4e4ac5e2e1b..b0808cbbd2c 100644
--- a/salt/minion.py
+++ b/salt/minion.py
@@ -1,77 +1,52 @@
# -*- coding: utf-8 -*-
-'''
+"""
Routines to set up a minion
-'''
+"""
# Import python libs
-from __future__ import absolute_import, print_function, with_statement, unicode_literals
-import functools
-import os
-import sys
-import copy
-import time
-import types
-import signal
-import random
-import logging
-import threading
-import traceback
+from __future__ import absolute_import, print_function, unicode_literals, with_statement
+
import contextlib
+import copy
+import functools
+import logging
import multiprocessing
+import os
+import random
+import signal
+import sys
+import threading
+import time
+import traceback
+import types
+from binascii import crc32
from random import randint, shuffle
from stat import S_IMODE
-import salt.serializers.msgpack
-from binascii import crc32
-# Import Salt Libs
-# pylint: disable=import-error,no-name-in-module,redefined-builtin
-from salt.ext import six
-from salt._compat import ipaddress
-from salt.utils.network import parse_host_port
-from salt.ext.six.moves import range
-from salt.template import SLS_ENCODING
-from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
-import salt.transport.client
-import salt.defaults.exitcodes
-import salt.utils.crypt
-
-from salt.utils.ctx import RequestContext
-
-# pylint: enable=no-name-in-module,redefined-builtin
-import salt.ext.tornado
-
-HAS_PSUTIL = False
-try:
- import salt.utils.psutil_compat as psutil
- HAS_PSUTIL = True
-except ImportError:
- pass
-
-HAS_RESOURCE = False
-try:
- import resource
- HAS_RESOURCE = True
-except ImportError:
- pass
-
-try:
- import salt.utils.win_functions
- HAS_WIN_FUNCTIONS = True
-except ImportError:
- HAS_WIN_FUNCTIONS = False
-# pylint: enable=import-error
# Import salt libs
import salt
+import salt.beacons
+import salt.cli.daemons
import salt.client
import salt.crypt
-import salt.loader
-import salt.beacons
+import salt.defaults.exitcodes
import salt.engines
+
+# pylint: enable=no-name-in-module,redefined-builtin
+import salt.ext.tornado
+import salt.ext.tornado.gen # pylint: disable=F0401
+import salt.ext.tornado.ioloop # pylint: disable=F0401
+import salt.loader
+import salt.log.setup
import salt.payload
import salt.pillar
+import salt.serializers.msgpack
import salt.syspaths
+import salt.transport.client
import salt.utils.args
import salt.utils.context
+import salt.utils.crypt
import salt.utils.data
+import salt.utils.dictupdate
import salt.utils.error
import salt.utils.event
import salt.utils.files
@@ -85,34 +60,58 @@ import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.user
import salt.utils.zeromq
-import salt.defaults.exitcodes
-import salt.cli.daemons
-import salt.log.setup
-
-import salt.utils.dictupdate
+from salt._compat import ipaddress
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
-from salt.utils.debug import enable_sigusr1_handler
-from salt.utils.event import tagify
-from salt.utils.odict import OrderedDict
-from salt.utils.process import (default_signals,
- SignalHandlingProcess,
- ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
- SaltInvocationError,
- SaltReqTimeoutError,
SaltClientError,
- SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
- SaltMasterUnresolvableError
+ SaltInvocationError,
+ SaltMasterUnresolvableError,
+ SaltReqTimeoutError,
+ SaltSystemExit,
)
+# Import Salt Libs
+# pylint: disable=import-error,no-name-in-module,redefined-builtin
+from salt.ext import six
+from salt.ext.six.moves import range
+from salt.template import SLS_ENCODING
+from salt.utils.ctx import RequestContext
+from salt.utils.debug import enable_sigusr1_handler
+from salt.utils.event import tagify
+from salt.utils.network import parse_host_port
+from salt.utils.odict import OrderedDict
+from salt.utils.process import ProcessManager, SignalHandlingProcess, default_signals
+from salt.utils.zeromq import ZMQ_VERSION_INFO, ZMQDefaultLoop, install_zmq, zmq
+
+HAS_PSUTIL = False
+try:
+ import salt.utils.psutil_compat as psutil
+
+ HAS_PSUTIL = True
+except ImportError:
+ pass
+
+HAS_RESOURCE = False
+try:
+ import resource
+
+ HAS_RESOURCE = True
+except ImportError:
+ pass
+
+try:
+ import salt.utils.win_functions
+
+ HAS_WIN_FUNCTIONS = True
+except ImportError:
+ HAS_WIN_FUNCTIONS = False
+# pylint: enable=import-error
-import salt.ext.tornado.gen # pylint: disable=F0401
-import salt.ext.tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
@@ -126,116 +125,129 @@ log = logging.getLogger(__name__)
def resolve_dns(opts, fallback=True):
- '''
+ """
Resolves the master_ip and master_uri options
- '''
+ """
ret = {}
check_dns = True
- if (opts.get('file_client', 'remote') == 'local' and
- not opts.get('use_master_when_local', False)):
+ if opts.get("file_client", "remote") == "local" and not opts.get(
+ "use_master_when_local", False
+ ):
check_dns = False
# Since salt.log is imported below, salt.utils.network needs to be imported here as well
import salt.utils.network
if check_dns is True:
try:
- if opts['master'] == '':
+ if opts["master"] == "":
raise SaltSystemExit
- ret['master_ip'] = salt.utils.network.dns_check(
- opts['master'],
- int(opts['master_port']),
- True,
- opts['ipv6'])
+ ret["master_ip"] = salt.utils.network.dns_check(
+ opts["master"], int(opts["master_port"]), True, opts["ipv6"]
+ )
except SaltClientError:
- retry_dns_count = opts.get('retry_dns_count', None)
- if opts['retry_dns']:
+ retry_dns_count = opts.get("retry_dns_count", None)
+ if opts["retry_dns"]:
while True:
if retry_dns_count is not None:
if retry_dns_count == 0:
raise SaltMasterUnresolvableError
retry_dns_count -= 1
import salt.log
- msg = ('Master hostname: \'{0}\' not found or not responsive. '
- 'Retrying in {1} seconds').format(opts['master'], opts['retry_dns'])
+
+ msg = (
+ "Master hostname: '{0}' not found or not responsive. "
+ "Retrying in {1} seconds"
+ ).format(opts["master"], opts["retry_dns"])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
- print('WARNING: {0}'.format(msg))
- time.sleep(opts['retry_dns'])
+ print("WARNING: {0}".format(msg))
+ time.sleep(opts["retry_dns"])
try:
- ret['master_ip'] = salt.utils.network.dns_check(
- opts['master'],
- int(opts['master_port']),
- True,
- opts['ipv6'])
+ ret["master_ip"] = salt.utils.network.dns_check(
+ opts["master"], int(opts["master_port"]), True, opts["ipv6"]
+ )
break
except SaltClientError:
pass
else:
if fallback:
- ret['master_ip'] = '127.0.0.1'
+ ret["master_ip"] = "127.0.0.1"
else:
raise
except SaltSystemExit:
- unknown_str = 'unknown address'
- master = opts.get('master', unknown_str)
- if master == '':
+ unknown_str = "unknown address"
+ master = opts.get("master", unknown_str)
+ if master == "":
master = unknown_str
- if opts.get('__role') == 'syndic':
- err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
- 'Set \'syndic_master\' value in minion config.'.format(master)
+ if opts.get("__role") == "syndic":
+ err = (
+ "Master address: '{0}' could not be resolved. Invalid or unresolveable address. "
+ "Set 'syndic_master' value in minion config.".format(master)
+ )
else:
- err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
- 'Set \'master\' value in minion config.'.format(master)
+ err = (
+ "Master address: '{0}' could not be resolved. Invalid or unresolveable address. "
+ "Set 'master' value in minion config.".format(master)
+ )
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
- ret['master_ip'] = '127.0.0.1'
+ ret["master_ip"] = "127.0.0.1"
- if 'master_ip' in ret and 'master_ip' in opts:
- if ret['master_ip'] != opts['master_ip']:
+ if "master_ip" in ret and "master_ip" in opts:
+ if ret["master_ip"] != opts["master_ip"]:
log.warning(
- 'Master ip address changed from %s to %s',
- opts['master_ip'], ret['master_ip']
+ "Master ip address changed from %s to %s",
+ opts["master_ip"],
+ ret["master_ip"],
)
- if opts['source_interface_name']:
- log.trace('Custom source interface required: %s', opts['source_interface_name'])
+ if opts["source_interface_name"]:
+ log.trace("Custom source interface required: %s", opts["source_interface_name"])
interfaces = salt.utils.network.interfaces()
- log.trace('The following interfaces are available on this Minion:')
+ log.trace("The following interfaces are available on this Minion:")
log.trace(interfaces)
- if opts['source_interface_name'] in interfaces:
- if interfaces[opts['source_interface_name']]['up']:
- addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\
- interfaces[opts['source_interface_name']]['inet6']
- ret['source_ip'] = addrs[0]['address']
- log.debug('Using %s as source IP address', ret['source_ip'])
+ if opts["source_interface_name"] in interfaces:
+ if interfaces[opts["source_interface_name"]]["up"]:
+ addrs = (
+ interfaces[opts["source_interface_name"]]["inet"]
+ if not opts["ipv6"]
+ else interfaces[opts["source_interface_name"]]["inet6"]
+ )
+ ret["source_ip"] = addrs[0]["address"]
+ log.debug("Using %s as source IP address", ret["source_ip"])
else:
- log.warning('The interface %s is down so it cannot be used as source to connect to the Master',
- opts['source_interface_name'])
+ log.warning(
+ "The interface %s is down so it cannot be used as source to connect to the Master",
+ opts["source_interface_name"],
+ )
else:
- log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name'])
- elif opts['source_address']:
- ret['source_ip'] = salt.utils.network.dns_check(
- opts['source_address'],
- int(opts['source_ret_port']),
- True,
- opts['ipv6'])
- log.debug('Using %s as source IP address', ret['source_ip'])
- if opts['source_ret_port']:
- ret['source_ret_port'] = int(opts['source_ret_port'])
- log.debug('Using %d as source port for the ret server', ret['source_ret_port'])
- if opts['source_publish_port']:
- ret['source_publish_port'] = int(opts['source_publish_port'])
- log.debug('Using %d as source port for the master pub', ret['source_publish_port'])
- ret['master_uri'] = 'tcp://{ip}:{port}'.format(
- ip=ret['master_ip'], port=opts['master_port'])
- log.debug('Master URI: %s', ret['master_uri'])
+ log.warning(
+ "%s is not a valid interface. Ignoring.", opts["source_interface_name"]
+ )
+ elif opts["source_address"]:
+ ret["source_ip"] = salt.utils.network.dns_check(
+ opts["source_address"], int(opts["source_ret_port"]), True, opts["ipv6"]
+ )
+ log.debug("Using %s as source IP address", ret["source_ip"])
+ if opts["source_ret_port"]:
+ ret["source_ret_port"] = int(opts["source_ret_port"])
+ log.debug("Using %d as source port for the ret server", ret["source_ret_port"])
+ if opts["source_publish_port"]:
+ ret["source_publish_port"] = int(opts["source_publish_port"])
+ log.debug(
+ "Using %d as source port for the master pub", ret["source_publish_port"]
+ )
+ ret["master_uri"] = "tcp://{ip}:{port}".format(
+ ip=ret["master_ip"], port=opts["master_port"]
+ )
+ log.debug("Master URI: %s", ret["master_uri"])
return ret
def prep_ip_port(opts):
- '''
+ """
parse host:port values from opts['master'] and return valid:
master: ip address or hostname as a string
master_port: (optional) master returner port as integer
@@ -245,24 +257,24 @@ def prep_ip_port(opts):
- master: '127.0.0.1:1234' -> {'master': '127.0.0.1', 'master_port' :1234}
- master: '[::1]:1234' -> {'master': '::1', 'master_port': 1234}
- master: 'fe80::a00:27ff:fedc:ba98' -> {'master': 'fe80::a00:27ff:fedc:ba98'}
- '''
+ """
ret = {}
# Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without
# a port specified. The is_ipv6 check returns False if brackets are used in the IP
# definition such as master: '[::1]:1234'.
- if opts['master_uri_format'] == 'ip_only':
- ret['master'] = ipaddress.ip_address(opts['master'])
+ if opts["master_uri_format"] == "ip_only":
+ ret["master"] = ipaddress.ip_address(opts["master"])
else:
- host, port = parse_host_port(opts['master'])
- ret = {'master': host}
+ host, port = parse_host_port(opts["master"])
+ ret = {"master": host}
if port:
- ret.update({'master_port': port})
+ ret.update({"master_port": port})
return ret
def get_proc_dir(cachedir, **kwargs):
- '''
+ """
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
@@ -276,14 +288,14 @@ def get_proc_dir(cachedir, **kwargs):
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
- '''
- fn_ = os.path.join(cachedir, 'proc')
- mode = kwargs.pop('mode', None)
+ """
+ fn_ = os.path.join(cachedir, "proc")
+ mode = kwargs.pop("mode", None)
if mode is None:
mode = {}
else:
- mode = {'mode': mode}
+ mode = {"mode": mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
@@ -295,35 +307,36 @@ def get_proc_dir(cachedir, **kwargs):
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
- if mode_part != mode['mode']:
- os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
+ if mode_part != mode["mode"]:
+ os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode["mode"])
- if hasattr(os, 'chown'):
+ if hasattr(os, "chown"):
# only on unix/unix like systems
- uid = kwargs.pop('uid', -1)
- gid = kwargs.pop('gid', -1)
+ uid = kwargs.pop("uid", -1)
+ gid = kwargs.pop("gid", -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
- if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
- [i for i in (uid, gid) if i != -1]:
+ if (d_stat.st_uid != uid or d_stat.st_gid != gid) and [
+ i for i in (uid, gid) if i != -1
+ ]:
os.chown(fn_, uid, gid)
return fn_
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
- '''
+ """
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
- '''
+ """
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
- if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
+ if isinstance(arg, dict) and arg.pop("__kwarg__", False) is True:
# if the arg is a dict with __kwarg__ == True, then its a kwarg
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
@@ -334,11 +347,13 @@ def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
- invalid_kwargs.append('{0}={1}'.format(key, val))
+ invalid_kwargs.append("{0}={1}".format(key, val))
continue
else:
- string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632
+ string_kwarg = salt.utils.args.parse_input([arg], condition=False)[
+ 1
+ ] # pylint: disable=W0632
if string_kwarg:
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
@@ -349,7 +364,7 @@ def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
- invalid_kwargs.append('{0}={1}'.format(key, val))
+ invalid_kwargs.append("{0}={1}".format(key, val))
else:
_args.append(arg)
@@ -359,68 +374,70 @@ def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
- _kwargs['__pub_{0}'.format(key)] = val
+ _kwargs["__pub_{0}".format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
- '''
+ """
Evaluate master function if master type is 'func'
and save it result in opts['master']
- '''
- if '__master_func_evaluated' not in opts:
+ """
+ if "__master_func_evaluated" not in opts:
# split module and function and try loading the module
- mod_fun = opts['master']
- mod, fun = mod_fun.split('.')
+ mod_fun = opts["master"]
+ mod, fun = mod_fun.split(".")
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise KeyError
# we take whatever the module returns as master address
- opts['master'] = master_mod[mod_fun]()
+ opts["master"] = master_mod[mod_fun]()
# Check for valid types
- if not isinstance(opts['master'], (six.string_types, list)):
+ if not isinstance(opts["master"], (six.string_types, list)):
raise TypeError
- opts['__master_func_evaluated'] = True
+ opts["__master_func_evaluated"] = True
except KeyError:
- log.error('Failed to load module %s', mod_fun)
+ log.error("Failed to load module %s", mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except TypeError:
- log.error('%s returned from %s is not a string', opts['master'], mod_fun)
+ log.error("%s returned from %s is not a string", opts["master"], mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
- log.info('Evaluated master from module: %s', mod_fun)
+ log.info("Evaluated master from module: %s", mod_fun)
def master_event(type, master=None):
- '''
+ """
Centralized master event function which will return event type based on event_map
- '''
- event_map = {'connected': '__master_connected',
- 'disconnected': '__master_disconnected',
- 'failback': '__master_failback',
- 'alive': '__master_alive'}
+ """
+ event_map = {
+ "connected": "__master_connected",
+ "disconnected": "__master_disconnected",
+ "failback": "__master_failback",
+ "alive": "__master_alive",
+ }
- if type == 'alive' and master is not None:
- return '{0}_{1}'.format(event_map.get(type), master)
+ if type == "alive" and master is not None:
+ return "{0}_{1}".format(event_map.get(type), master)
return event_map.get(type, None)
def service_name():
- '''
+ """
Return the proper service name based on platform
- '''
- return 'salt_minion' if 'bsd' in sys.platform else 'salt-minion'
+ """
+ return "salt_minion" if "bsd" in sys.platform else "salt-minion"
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
- self.beacons_leader = opts.get('beacons_leader', True)
+ self.beacons_leader = opts.get("beacons_leader", True)
def gen_modules(self, initial_load=False, context=None):
- '''
+ """
Tell the minion to reload the execution modules
CLI Example:
@@ -428,75 +445,83 @@ class MinionBase(object):
.. code-block:: bash
salt '*' sys.reload_modules
- '''
+ """
if initial_load:
- self.opts['pillar'] = salt.pillar.get_pillar(
+ self.opts["pillar"] = salt.pillar.get_pillar(
self.opts,
- self.opts['grains'],
- self.opts['id'],
- self.opts['saltenv'],
- pillarenv=self.opts.get('pillarenv'),
+ self.opts["grains"],
+ self.opts["id"],
+ self.opts["saltenv"],
+ pillarenv=self.opts.get("pillarenv"),
).compile_pillar()
self.utils = salt.loader.utils(self.opts, context=context)
- self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, context=context)
+ self.functions = salt.loader.minion_mods(
+ self.opts, utils=self.utils, context=context
+ )
self.serializers = salt.loader.serializers(self.opts)
- self.returners = salt.loader.returners(self.opts, functions=self.functions, context=context)
- self.proxy = salt.loader.proxy(self.opts, functions=self.functions, returners=self.returners)
+ self.returners = salt.loader.returners(
+ self.opts, functions=self.functions, context=context
+ )
+ self.proxy = salt.loader.proxy(
+ self.opts, functions=self.functions, returners=self.returners
+ )
# TODO: remove
self.function_errors = {} # Keep the funcs clean
- self.states = salt.loader.states(self.opts,
- functions=self.functions,
- utils=self.utils,
- serializers=self.serializers,
- context=context)
- self.rend = salt.loader.render(self.opts, functions=self.functions, context=context)
-# self.matcher = Matcher(self.opts, self.functions)
+ self.states = salt.loader.states(
+ self.opts,
+ functions=self.functions,
+ utils=self.utils,
+ serializers=self.serializers,
+ context=context,
+ )
+ self.rend = salt.loader.render(
+ self.opts, functions=self.functions, context=context
+ )
+ # self.matcher = Matcher(self.opts, self.functions)
self.matchers = salt.loader.matchers(self.opts)
- self.functions['sys.reload_modules'] = self.gen_modules
- self.executors = salt.loader.executors(self.opts,
- functions=self.functions,
- proxy=self.proxy,
- context=context)
+ self.functions["sys.reload_modules"] = self.gen_modules
+ self.executors = salt.loader.executors(
+ self.opts, functions=self.functions, proxy=self.proxy, context=context
+ )
@staticmethod
def process_schedule(minion, loop_interval):
try:
- if hasattr(minion, 'schedule'):
+ if hasattr(minion, "schedule"):
minion.schedule.eval()
else:
- log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
+ log.error(
+ "Minion scheduler not initialized. Scheduled jobs will not be run."
+ )
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
- log.debug(
- 'Overriding loop_interval because of scheduled jobs.'
- )
+ log.debug("Overriding loop_interval because of scheduled jobs.")
except Exception as exc: # pylint: disable=broad-except
- log.error('Exception %s occurred in scheduled job', exc)
+ log.error("Exception %s occurred in scheduled job", exc)
return loop_interval
def process_beacons(self, functions):
- '''
+ """
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
- '''
- if 'config.merge' in functions:
- b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True)
+ """
+ if "config.merge" in functions:
+ b_conf = functions["config.merge"](
+ "beacons", self.opts["beacons"], omit_opts=True
+ )
if b_conf:
- return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member
+ return self.beacons.process(
+ b_conf, self.opts["grains"]
+ ) # pylint: disable=no-member
return []
@salt.ext.tornado.gen.coroutine
- def eval_master(self,
- opts,
- timeout=60,
- safe=True,
- failed=False,
- failback=False):
- '''
+ def eval_master(self, opts, timeout=60, safe=True, failed=False, failback=False):
+ """
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
@@ -510,10 +535,10 @@ class MinionBase(object):
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
- '''
+ """
# return early if we are not connecting to a master
- if opts['master_type'] == 'disable':
- log.warning('Master is set to disable, skipping connection')
+ if opts["master_type"] == "disable":
+ log.warning("Master is set to disable, skipping connection")
self.connected = False
raise salt.ext.tornado.gen.Return((None, None))
@@ -522,52 +547,67 @@ class MinionBase(object):
self._discover_masters()
# check if master_type was altered from its default
- if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
+ if opts["master_type"] != "str" and opts["__role"] != "syndic":
# check for a valid keyword
- if opts['master_type'] == 'func':
+ if opts["master_type"] == "func":
eval_master_func(opts)
# if failover or distributed is set, master has to be of type list
- elif opts['master_type'] in ('failover', 'distributed'):
- if isinstance(opts['master'], list):
+ elif opts["master_type"] in ("failover", "distributed"):
+ if isinstance(opts["master"], list):
log.info(
- 'Got list of available master addresses: %s',
- opts['master']
+ "Got list of available master addresses: %s", opts["master"]
)
- if opts['master_type'] == 'distributed':
- master_len = len(opts['master'])
+ if opts["master_type"] == "distributed":
+ master_len = len(opts["master"])
if master_len > 1:
- secondary_masters = opts['master'][1:]
- master_idx = crc32(opts['id']) % master_len
+ secondary_masters = opts["master"][1:]
+ master_idx = crc32(opts["id"]) % master_len
try:
- preferred_masters = opts['master']
- preferred_masters[0] = opts['master'][master_idx]
- preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]]
- opts['master'] = preferred_masters
- log.info('Distributed to the master at \'%s\'.', opts['master'][0])
+ preferred_masters = opts["master"]
+ preferred_masters[0] = opts["master"][master_idx]
+ preferred_masters[1:] = [
+ m
+ for m in opts["master"]
+ if m != preferred_masters[0]
+ ]
+ opts["master"] = preferred_masters
+ log.info(
+ "Distributed to the master at '%s'.",
+ opts["master"][0],
+ )
except (KeyError, AttributeError, TypeError):
- log.warning('Failed to distribute to a specific master.')
+ log.warning(
+ "Failed to distribute to a specific master."
+ )
else:
- log.warning('master_type = distributed needs more than 1 master.')
+ log.warning(
+ "master_type = distributed needs more than 1 master."
+ )
- if opts['master_shuffle']:
+ if opts["master_shuffle"]:
log.warning(
- 'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor '
- 'of \'random_master\'. Please update your minion config file.'
+ "Use of 'master_shuffle' detected. 'master_shuffle' is deprecated in favor "
+ "of 'random_master'. Please update your minion config file."
)
- opts['random_master'] = opts['master_shuffle']
+ opts["random_master"] = opts["master_shuffle"]
- opts['auth_tries'] = 0
- if opts['master_failback'] and opts['master_failback_interval'] == 0:
- opts['master_failback_interval'] = opts['master_alive_interval']
+ opts["auth_tries"] = 0
+ if (
+ opts["master_failback"]
+ and opts["master_failback_interval"] == 0
+ ):
+ opts["master_failback_interval"] = opts["master_alive_interval"]
# if opts['master'] is a str and we have never created opts['master_list']
- elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts):
+ elif isinstance(opts["master"], six.string_types) and (
+ "master_list" not in opts
+ ):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
- opts['master'] = [opts['master']]
- elif opts['__role'] == 'syndic':
- log.info('Syndic setting master_syndic to \'%s\'', opts['master'])
+ opts["master"] = [opts["master"]]
+ elif opts["__role"] == "syndic":
+ log.info("Syndic setting master_syndic to '%s'", opts["master"])
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
@@ -576,121 +616,132 @@ class MinionBase(object):
elif failed:
if failback:
# failback list of masters to original config
- opts['master'] = opts['master_list']
+ opts["master"] = opts["master_list"]
else:
log.info(
- 'Moving possibly failed master %s to the end of '
- 'the list of masters', opts['master']
+ "Moving possibly failed master %s to the end of "
+ "the list of masters",
+ opts["master"],
)
- if opts['master'] in opts['local_masters']:
+ if opts["master"] in opts["local_masters"]:
# create new list of master with the possibly failed
# one moved to the end
- failed_master = opts['master']
- opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
- opts['master'].append(failed_master)
+ failed_master = opts["master"]
+ opts["master"] = [
+ x for x in opts["local_masters"] if opts["master"] != x
+ ]
+ opts["master"].append(failed_master)
else:
- opts['master'] = opts['master_list']
+ opts["master"] = opts["master_list"]
else:
- msg = ('master_type set to \'failover\' but \'master\' '
- 'is not of type list but of type '
- '{0}'.format(type(opts['master'])))
+ msg = (
+ "master_type set to 'failover' but 'master' "
+ "is not of type list but of type "
+ "{0}".format(type(opts["master"]))
+ )
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
- if opts['retry_dns'] and opts['master_type'] == 'failover':
- msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
- 'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
+ if opts["retry_dns"] and opts["master_type"] == "failover":
+ msg = (
+ "'master_type' set to 'failover' but 'retry_dns' is not 0. "
+ "Setting 'retry_dns' to 0 to failover to the next master on DNS errors."
+ )
log.critical(msg)
- opts['retry_dns'] = 0
+ opts["retry_dns"] = 0
else:
- msg = ('Invalid keyword \'{0}\' for variable '
- '\'master_type\''.format(opts['master_type']))
+ msg = "Invalid keyword '{0}' for variable " "'master_type'".format(
+ opts["master_type"]
+ )
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
- factory_kwargs = {'timeout': timeout, 'safe': safe}
- if getattr(self, 'io_loop', None):
- factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
+ factory_kwargs = {"timeout": timeout, "safe": safe}
+ if getattr(self, "io_loop", None):
+ factory_kwargs["io_loop"] = self.io_loop # pylint: disable=no-member
- tries = opts.get('master_tries', 1)
+ tries = opts.get("master_tries", 1)
attempts = 0
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
- if isinstance(opts['master'], list):
+ if isinstance(opts["master"], list):
conn = False
last_exc = None
- opts['master_uri_list'] = []
- opts['local_masters'] = copy.copy(opts['master'])
+ opts["master_uri_list"] = []
+ opts["local_masters"] = copy.copy(opts["master"])
# shuffle the masters and then loop through them
- if opts['random_master']:
+ if opts["random_master"]:
# master_failback is only used when master_type is set to failover
- if opts['master_type'] == 'failover' and opts['master_failback']:
- secondary_masters = opts['local_masters'][1:]
+ if opts["master_type"] == "failover" and opts["master_failback"]:
+ secondary_masters = opts["local_masters"][1:]
shuffle(secondary_masters)
- opts['local_masters'][1:] = secondary_masters
+ opts["local_masters"][1:] = secondary_masters
else:
- shuffle(opts['local_masters'])
+ shuffle(opts["local_masters"])
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
- for master in opts['local_masters']:
- opts['master'] = master
+ for master in opts["local_masters"]:
+ opts["master"] = master
opts.update(prep_ip_port(opts))
- opts['master_uri_list'].append(resolve_dns(opts)['master_uri'])
+ opts["master_uri_list"].append(resolve_dns(opts)["master_uri"])
pub_channel = None
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
- yield salt.ext.tornado.gen.sleep(opts['acceptance_wait_time'])
+ yield salt.ext.tornado.gen.sleep(opts["acceptance_wait_time"])
attempts += 1
if tries > 0:
- log.debug(
- 'Connecting to master. Attempt %s of %s',
- attempts, tries
- )
+ log.debug("Connecting to master. Attempt %s of %s", attempts, tries)
else:
log.debug(
- 'Connecting to master. Attempt %s (infinite attempts)',
- attempts
+ "Connecting to master. Attempt %s (infinite attempts)", attempts
)
- for master in opts['local_masters']:
- opts['master'] = master
+ for master in opts["local_masters"]:
+ opts["master"] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
- if 'master_list' not in opts:
- opts['master_list'] = copy.copy(opts['local_masters'])
+ if "master_list" not in opts:
+ opts["master_list"] = copy.copy(opts["local_masters"])
self.opts = opts
- pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
+ pub_channel = salt.transport.client.AsyncPubChannel.factory(
+ opts, **factory_kwargs
+ )
try:
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
- if exc.strerror.startswith('Could not access'):
+ if exc.strerror.startswith("Could not access"):
msg = (
- 'Failed to initiate connection with Master '
- '%s: check ownership/permissions. Error '
- 'message: %s', opts['master'], exc
+ "Failed to initiate connection with Master "
+ "%s: check ownership/permissions. Error "
+ "message: %s",
+ opts["master"],
+ exc,
)
else:
- msg = ('Master %s could not be reached, trying next '
- 'next master (if any)', opts['master'])
+ msg = (
+ "Master %s could not be reached, trying next "
+ "next master (if any)",
+ opts["master"],
+ )
log.info(msg)
pub_channel.close()
pub_channel = None
@@ -700,10 +751,10 @@ class MinionBase(object):
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
- self.opts['master'] = copy.copy(self.opts['local_masters'])
+ self.opts["master"] = copy.copy(self.opts["local_masters"])
log.error(
- 'No master could be reached or all masters '
- 'denied the minion\'s connection attempt.'
+ "No master could be reached or all masters "
+ "denied the minion's connection attempt."
)
if pub_channel:
pub_channel.close()
@@ -711,52 +762,54 @@ class MinionBase(object):
# should already be set.
raise last_exc # pylint: disable=E0702
else:
- self.tok = pub_channel.auth.gen_token(b'salt')
+ self.tok = pub_channel.auth.gen_token(b"salt")
self.connected = True
- raise salt.ext.tornado.gen.Return((opts['master'], pub_channel))
+ raise salt.ext.tornado.gen.Return((opts["master"], pub_channel))
# single master sign in
else:
- if opts['random_master']:
- log.warning('random_master is True but there is only one master specified. Ignoring.')
+ if opts["random_master"]:
+ log.warning(
+ "random_master is True but there is only one master specified. Ignoring."
+ )
pub_channel = None
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
- yield salt.ext.tornado.gen.sleep(opts['acceptance_wait_time'])
+ yield salt.ext.tornado.gen.sleep(opts["acceptance_wait_time"])
attempts += 1
if tries > 0:
- log.debug(
- 'Connecting to master. Attempt %s of %s',
- attempts, tries
- )
+ log.debug("Connecting to master. Attempt %s of %s", attempts, tries)
else:
log.debug(
- 'Connecting to master. Attempt %s (infinite attempts)',
- attempts
+ "Connecting to master. Attempt %s (infinite attempts)", attempts
)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
- if self.opts['transport'] == 'detect':
- self.opts['detect_mode'] = True
- for trans in ('zeromq', 'tcp'):
- if trans == 'zeromq' and not zmq:
+ if self.opts["transport"] == "detect":
+ self.opts["detect_mode"] = True
+ for trans in ("zeromq", "tcp"):
+ if trans == "zeromq" and not zmq:
continue
- self.opts['transport'] = trans
- pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
+ self.opts["transport"] = trans
+ pub_channel = salt.transport.client.AsyncPubChannel.factory(
+ self.opts, **factory_kwargs
+ )
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
- del self.opts['detect_mode']
+ del self.opts["detect_mode"]
break
else:
- pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
+ pub_channel = salt.transport.client.AsyncPubChannel.factory(
+ self.opts, **factory_kwargs
+ )
yield pub_channel.connect()
- self.tok = pub_channel.auth.gen_token(b'salt')
+ self.tok = pub_channel.auth.gen_token(b"salt")
self.connected = True
- raise salt.ext.tornado.gen.Return((opts['master'], pub_channel))
+ raise salt.ext.tornado.gen.Return((opts["master"], pub_channel))
except SaltClientError:
if attempts == tries:
# Exhausted all attempts. Return exception.
@@ -766,138 +819,157 @@ class MinionBase(object):
six.reraise(*sys.exc_info())
def _discover_masters(self):
- '''
+ """
Discover master(s) and decide where to connect, if SSDP is around.
This modifies the configuration on the fly.
:return:
- '''
- if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False:
+ """
+ if (
+ self.opts["master"] == DEFAULT_MINION_OPTS["master"]
+ and self.opts["discovery"] is not False
+ ):
master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient()
masters = {}
- for att in range(self.opts['discovery'].get('attempts', 3)):
+ for att in range(self.opts["discovery"].get("attempts", 3)):
try:
att += 1
- log.info('Attempting %s time(s) to discover masters', att)
+ log.info("Attempting %s time(s) to discover masters", att)
masters.update(master_discovery_client.discover())
if not masters:
- time.sleep(self.opts['discovery'].get('pause', 5))
+ time.sleep(self.opts["discovery"].get("pause", 5))
else:
break
except Exception as err: # pylint: disable=broad-except
- log.error('SSDP discovery failure: %s', err)
+ log.error("SSDP discovery failure: %s", err)
break
if masters:
- policy = self.opts.get('discovery', {}).get('match', 'any')
- if policy not in ['any', 'all']:
- log.error('SSDP configuration matcher failure: unknown value "%s". '
- 'Should be "any" or "all"', policy)
+ policy = self.opts.get("discovery", {}).get("match", "any")
+ if policy not in ["any", "all"]:
+ log.error(
+ 'SSDP configuration matcher failure: unknown value "%s". '
+ 'Should be "any" or "all"',
+ policy,
+ )
else:
- mapping = self.opts['discovery'].get('mapping', {})
+ mapping = self.opts["discovery"].get("mapping", {})
for addr, mappings in masters.items():
for proto_data in mappings:
- cnt = len([key for key, value in mapping.items()
- if proto_data.get('mapping', {}).get(key) == value])
- if policy == 'any' and bool(cnt) or cnt == len(mapping):
- self.opts['master'] = proto_data['master']
+ cnt = len(
+ [
+ key
+ for key, value in mapping.items()
+ if proto_data.get("mapping", {}).get(key) == value
+ ]
+ )
+ if policy == "any" and bool(cnt) or cnt == len(mapping):
+ self.opts["master"] = proto_data["master"]
return
def _return_retry_timer(self):
- '''
+ """
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
- '''
- msg = 'Minion return retry timer set to %s seconds'
- if self.opts.get('return_retry_timer_max'):
+ """
+ msg = "Minion return retry timer set to %s seconds"
+ if self.opts.get("return_retry_timer_max"):
try:
- random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
+ random_retry = randint(
+ self.opts["return_retry_timer"], self.opts["return_retry_timer_max"]
+ )
retry_msg = msg % random_retry
- log.debug('%s (randomized)', msg % random_retry)
+ log.debug("%s (randomized)", msg % random_retry)
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
- 'Invalid value (return_retry_timer: %s or '
- 'return_retry_timer_max: %s). Both must be positive '
- 'integers.',
- self.opts['return_retry_timer'],
- self.opts['return_retry_timer_max'],
+ "Invalid value (return_retry_timer: %s or "
+ "return_retry_timer_max: %s). Both must be positive "
+ "integers.",
+ self.opts["return_retry_timer"],
+ self.opts["return_retry_timer_max"],
)
- log.debug(msg, DEFAULT_MINION_OPTS['return_retry_timer'])
- return DEFAULT_MINION_OPTS['return_retry_timer']
+ log.debug(msg, DEFAULT_MINION_OPTS["return_retry_timer"])
+ return DEFAULT_MINION_OPTS["return_retry_timer"]
else:
- log.debug(msg, self.opts.get('return_retry_timer'))
- return self.opts.get('return_retry_timer')
+ log.debug(msg, self.opts.get("return_retry_timer"))
+ return self.opts.get("return_retry_timer")
class SMinion(MinionBase):
- '''
+ """
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
- '''
+ """
+
def __init__(self, opts, context=None):
# Late setup of the opts grains, so we can log from the grains module
import salt.loader
- opts['grains'] = salt.loader.grains(opts)
+
+ opts["grains"] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
- if (self.opts.get('file_client', 'remote') == 'remote'
- or self.opts.get('use_master_when_local', False)):
+ if self.opts.get("file_client", "remote") == "remote" or self.opts.get(
+ "use_master_when_local", False
+ ):
install_zmq()
io_loop = ZMQDefaultLoop.current()
- io_loop.run_sync(
- lambda: self.eval_master(self.opts, failed=True)
- )
+ io_loop.run_sync(lambda: self.eval_master(self.opts, failed=True))
self.gen_modules(initial_load=True, context=context or {})
# If configured, cache pillar data on the minion
- if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
+ if self.opts["file_client"] == "remote" and self.opts.get(
+ "minion_pillar_cache", False
+ ):
import salt.utils.yaml
- pdir = os.path.join(self.opts['cachedir'], 'pillar')
+
+ pdir = os.path.join(self.opts["cachedir"], "pillar")
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
- ptop = os.path.join(pdir, 'top.sls')
- if self.opts['saltenv'] is not None:
- penv = self.opts['saltenv']
+ ptop = os.path.join(pdir, "top.sls")
+ if self.opts["saltenv"] is not None:
+ penv = self.opts["saltenv"]
else:
- penv = 'base'
- cache_top = {penv: {self.opts['id']: ['cache']}}
- with salt.utils.files.fopen(ptop, 'wb') as fp_:
+ penv = "base"
+ cache_top = {penv: {self.opts["id"]: ["cache"]}}
+ with salt.utils.files.fopen(ptop, "wb") as fp_:
salt.utils.yaml.safe_dump(cache_top, fp_, encoding=SLS_ENCODING)
os.chmod(ptop, 0o600)
- cache_sls = os.path.join(pdir, 'cache.sls')
- with salt.utils.files.fopen(cache_sls, 'wb') as fp_:
- salt.utils.yaml.safe_dump(self.opts['pillar'], fp_, encoding=SLS_ENCODING)
+ cache_sls = os.path.join(pdir, "cache.sls")
+ with salt.utils.files.fopen(cache_sls, "wb") as fp_:
+ salt.utils.yaml.safe_dump(
+ self.opts["pillar"], fp_, encoding=SLS_ENCODING
+ )
os.chmod(cache_sls, 0o600)
class MasterMinion(object):
- '''
+ """
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
- '''
+ """
+
def __init__(
- self,
- opts,
- returners=True,
- states=True,
- rend=True,
- matcher=True,
- whitelist=None,
- ignore_config_errors=True):
+ self,
+ opts,
+ returners=True,
+ states=True,
+ rend=True,
+ matcher=True,
+ whitelist=None,
+ ignore_config_errors=True,
+ ):
self.opts = salt.config.minion_config(
- opts['conf_file'],
- ignore_config_errors=ignore_config_errors,
- role='master'
+ opts["conf_file"], ignore_config_errors=ignore_config_errors, role="master"
)
self.opts.update(opts)
self.whitelist = whitelist
- self.opts['grains'] = salt.loader.grains(opts)
- self.opts['pillar'] = {}
+ self.opts["grains"] = salt.loader.grains(opts)
+ self.opts["pillar"] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
@@ -905,7 +977,7 @@ class MasterMinion(object):
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
- '''
+ """
Tell the minion to reload the execution modules
CLI Example:
@@ -913,59 +985,64 @@ class MasterMinion(object):
.. code-block:: bash
salt '*' sys.reload_modules
- '''
+ """
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
- initial_load=initial_load)
+ initial_load=initial_load,
+ )
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
- self.states = salt.loader.states(self.opts,
- self.functions,
- self.utils,
- self.serializers)
+ self.states = salt.loader.states(
+ self.opts, self.functions, self.utils, self.serializers
+ )
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matchers = salt.loader.matchers(self.opts)
- self.functions['sys.reload_modules'] = self.gen_modules
+ self.functions["sys.reload_modules"] = self.gen_modules
class MinionManager(MinionBase):
- '''
+ """
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
- '''
+ """
+
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
- self.auth_wait = self.opts['acceptance_wait_time']
- self.max_auth_wait = self.opts['acceptance_wait_time_max']
+ self.auth_wait = self.opts["acceptance_wait_time"]
+ self.max_auth_wait = self.opts["acceptance_wait_time_max"]
self.minions = []
self.jid_queue = []
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
- self.process_manager = ProcessManager(name='MultiMinionProcessManager')
- self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # Tornado backward compat
+ self.process_manager = ProcessManager(name="MultiMinionProcessManager")
+ self.io_loop.spawn_callback(
+ self.process_manager.run, **{"asynchronous": True}
+ ) # Tornado backward compat
# pylint: disable=W1701
def __del__(self):
self.destroy()
+
# pylint: enable=W1701
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
- self.opts,
- io_loop=self.io_loop,
+ self.opts, io_loop=self.io_loop,
)
- self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop)
- self.event.subscribe('')
+ self.event = salt.utils.event.get_event(
+ "minion", opts=self.opts, io_loop=self.io_loop
+ )
+ self.event.subscribe("")
self.event.set_event_handler(self.handle_event)
@salt.ext.tornado.gen.coroutine
@@ -973,66 +1050,71 @@ class MinionManager(MinionBase):
for minion in self.minions:
minion.handle_event(package)
- def _create_minion_object(self, opts, timeout, safe,
- io_loop=None, loaded_base_name=None,
- jid_queue=None):
- '''
+ def _create_minion_object(
+ self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None
+ ):
+ """
Helper function to return the correct type of object
- '''
- return Minion(opts,
- timeout,
- safe,
- io_loop=io_loop,
- loaded_base_name=loaded_base_name,
- jid_queue=jid_queue)
+ """
+ return Minion(
+ opts,
+ timeout,
+ safe,
+ io_loop=io_loop,
+ loaded_base_name=loaded_base_name,
+ jid_queue=jid_queue,
+ )
def _check_minions(self):
- '''
+ """
Check the size of self.minions and raise an error if it's empty
- '''
+ """
if not self.minions:
- err = ('Minion unable to successfully connect to '
- 'a Salt Master.')
+ err = "Minion unable to successfully connect to " "a Salt Master."
log.error(err)
def _spawn_minions(self, timeout=60):
- '''
+ """
Spawn all the coroutines which will sign in to masters
- '''
- masters = self.opts['master']
- if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list):
+ """
+ masters = self.opts["master"]
+ if (self.opts["master_type"] in ("failover", "distributed")) or not isinstance(
+ self.opts["master"], list
+ ):
masters = [masters]
beacons_leader = True
for master in masters:
s_opts = copy.deepcopy(self.opts)
- s_opts['master'] = master
- s_opts['multimaster'] = True
- s_opts['beacons_leader'] = beacons_leader
+ s_opts["master"] = master
+ s_opts["multimaster"] = True
+ s_opts["beacons_leader"] = beacons_leader
if beacons_leader:
beacons_leader = False
- minion = self._create_minion_object(s_opts,
- s_opts['auth_timeout'],
- False,
- io_loop=self.io_loop,
- loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
- jid_queue=self.jid_queue)
+ minion = self._create_minion_object(
+ s_opts,
+ s_opts["auth_timeout"],
+ False,
+ io_loop=self.io_loop,
+ loaded_base_name="salt.loader.{0}".format(s_opts["master"]),
+ jid_queue=self.jid_queue,
+ )
self.io_loop.spawn_callback(self._connect_minion, minion)
self.io_loop.call_later(timeout, self._check_minions)
@salt.ext.tornado.gen.coroutine
def _connect_minion(self, minion):
- '''
+ """
Create a minion, and asynchronously connect it to a master
- '''
+ """
last = 0 # never have we signed in
- auth_wait = minion.opts['acceptance_wait_time']
+ auth_wait = minion.opts["acceptance_wait_time"]
failed = False
while True:
try:
- if minion.opts.get('beacons_before_connect', False):
+ if minion.opts.get("beacons_before_connect", False):
minion.setup_beacons(before_connect=True)
- if minion.opts.get('scheduler_before_connect', False):
+ if minion.opts.get("scheduler_before_connect", False):
minion.setup_scheduler(before_connect=True)
yield minion.connect_master(failed=failed)
minion.tune_in(start=False)
@@ -1041,34 +1123,38 @@ class MinionManager(MinionBase):
except SaltClientError as exc:
failed = True
log.error(
- 'Error while bringing up minion for multi-master. Is '
- 'master at %s responding?', minion.opts['master']
+ "Error while bringing up minion for multi-master. Is "
+ "master at %s responding?",
+ minion.opts["master"],
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield salt.ext.tornado.gen.sleep(auth_wait) # TODO: log?
except SaltMasterUnresolvableError:
- err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
- 'Set \'master\' value in minion config.'.format(minion.opts['master'])
+ err = (
+ "Master address: '{0}' could not be resolved. Invalid or unresolveable address. "
+ "Set 'master' value in minion config.".format(minion.opts["master"])
+ )
log.error(err)
break
except Exception as e: # pylint: disable=broad-except
failed = True
log.critical(
- 'Unexpected error while connecting to %s',
- minion.opts['master'], exc_info=True
+ "Unexpected error while connecting to %s",
+ minion.opts["master"],
+ exc_info=True,
)
# Multi Master Tune In
def tune_in(self):
- '''
+ """
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
- '''
+ """
self._bind()
# Fire off all the minion coroutines
@@ -1098,14 +1184,23 @@ class MinionManager(MinionBase):
class Minion(MinionBase):
- '''
+ """
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
- '''
- def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231
- '''
+ """
+
+ def __init__(
+ self,
+ opts,
+ timeout=60,
+ safe=True,
+ loaded_base_name=None,
+ io_loop=None,
+ jid_queue=None,
+ ): # pylint: disable=W0231
+ """
Pass in the options dict
- '''
+ """
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
@@ -1133,49 +1228,51 @@ class Minion(MinionBase):
if zmq:
if ZMQ_VERSION_INFO < (3, 2):
log.warning(
- 'You have a version of ZMQ less than ZMQ 3.2! There are '
- 'known connection keep-alive issues with ZMQ < 3.2 which '
- 'may result in loss of contact with minions. Please '
- 'upgrade your ZMQ!'
+ "You have a version of ZMQ less than ZMQ 3.2! There are "
+ "known connection keep-alive issues with ZMQ < 3.2 which "
+ "may result in loss of contact with minions. Please "
+ "upgrade your ZMQ!"
)
# Late setup of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.platform.is_proxy():
- self.opts['grains'] = salt.loader.grains(opts)
+ self.opts["grains"] = salt.loader.grains(opts)
else:
- if self.opts.get('beacons_before_connect', False):
+ if self.opts.get("beacons_before_connect", False):
log.warning(
- '\'beacons_before_connect\' is not supported '
- 'for proxy minions. Setting to False'
+ "'beacons_before_connect' is not supported "
+ "for proxy minions. Setting to False"
)
- self.opts['beacons_before_connect'] = False
- if self.opts.get('scheduler_before_connect', False):
+ self.opts["beacons_before_connect"] = False
+ if self.opts.get("scheduler_before_connect", False):
log.warning(
- '\'scheduler_before_connect\' is not supported '
- 'for proxy minions. Setting to False'
+ "'scheduler_before_connect' is not supported "
+ "for proxy minions. Setting to False"
)
- self.opts['scheduler_before_connect'] = False
+ self.opts["scheduler_before_connect"] = False
- log.info('Creating minion process manager')
+ log.info("Creating minion process manager")
- if self.opts['random_startup_delay']:
- sleep_time = random.randint(0, self.opts['random_startup_delay'])
+ if self.opts["random_startup_delay"]:
+ sleep_time = random.randint(0, self.opts["random_startup_delay"])
log.info(
- 'Minion sleeping for %s seconds due to configured '
- 'startup_delay between 0 and %s seconds',
- sleep_time, self.opts['random_startup_delay']
+ "Minion sleeping for %s seconds due to configured "
+ "startup_delay between 0 and %s seconds",
+ sleep_time,
+ self.opts["random_startup_delay"],
)
time.sleep(sleep_time)
- self.process_manager = ProcessManager(name='MinionProcessManager')
- self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True})
+ self.process_manager = ProcessManager(name="MinionProcessManager")
+ self.io_loop.spawn_callback(self.process_manager.run, **{"asynchronous": True})
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.platform.is_proxy():
- self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
- self.process_manager)
+ self.io_loop.spawn_callback(
+ salt.engines.start_engines, self.opts, self.process_manager
+ )
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
@@ -1197,9 +1294,9 @@ class Minion(MinionBase):
sys.exit(0)
def sync_connect_master(self, timeout=None, failed=False):
- '''
+ """
Block until we are connected to a master
- '''
+ """
self._sync_connect_master_success = False
log.debug("sync_connect_master")
@@ -1225,20 +1322,22 @@ class Minion(MinionBase):
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
- raise SaltDaemonNotRunning('Failed to connect to the salt-master')
+ raise SaltDaemonNotRunning("Failed to connect to the salt-master")
@salt.ext.tornado.gen.coroutine
def connect_master(self, failed=False):
- '''
+ """
Return a future which will complete when you are connected to a master
- '''
- master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed)
+ """
+ master, self.pub_channel = yield self.eval_master(
+ self.opts, self.timeout, self.safe, failed
+ )
yield self._post_master_init(master)
# TODO: better name...
@salt.ext.tornado.gen.coroutine
def _post_master_init(self, master):
- '''
+ """
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
@@ -1250,107 +1349,124 @@ class Minion(MinionBase):
Minions and ProxyMinions need significantly different post master setups,
which is why the differences are not factored out into separate helper
functions.
- '''
+ """
if self.connected:
- self.opts['master'] = master
+ self.opts["master"] = master
# Initialize pillar before loader to make pillar accessible in modules
async_pillar = salt.pillar.get_async_pillar(
self.opts,
- self.opts['grains'],
- self.opts['id'],
- self.opts['saltenv'],
- pillarenv=self.opts.get('pillarenv')
+ self.opts["grains"],
+ self.opts["id"],
+ self.opts["saltenv"],
+ pillarenv=self.opts.get("pillarenv"),
)
- self.opts['pillar'] = yield async_pillar.compile_pillar()
+ self.opts["pillar"] = yield async_pillar.compile_pillar()
async_pillar.destroy()
if not self.ready:
self._setup_core()
- elif self.connected and self.opts['pillar']:
+ elif self.connected and self.opts["pillar"]:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
- self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
- if hasattr(self, 'schedule'):
+ (
+ self.functions,
+ self.returners,
+ self.function_errors,
+ self.executors,
+ ) = self._load_modules()
+ if hasattr(self, "schedule"):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
- if not hasattr(self, 'schedule'):
+ if not hasattr(self, "schedule"):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
- cleanup=[master_event(type='alive')])
+ cleanup=[master_event(type="alive")],
+ )
# add default scheduling jobs to the minions scheduler
- if self.opts['mine_enabled'] and 'mine.update' in self.functions:
- self.schedule.add_job({
- '__mine_interval':
+ if self.opts["mine_enabled"] and "mine.update" in self.functions:
+ self.schedule.add_job(
{
- 'function': 'mine.update',
- 'minutes': self.opts['mine_interval'],
- 'jid_include': True,
- 'maxrunning': 2,
- 'run_on_start': True,
- 'return_job': self.opts.get('mine_return_job', False)
- }
- }, persist=True)
- log.info('Added mine.update to scheduler')
+ "__mine_interval": {
+ "function": "mine.update",
+ "minutes": self.opts["mine_interval"],
+ "jid_include": True,
+ "maxrunning": 2,
+ "run_on_start": True,
+ "return_job": self.opts.get("mine_return_job", False),
+ }
+ },
+ persist=True,
+ )
+ log.info("Added mine.update to scheduler")
else:
- self.schedule.delete_job('__mine_interval', persist=True)
+ self.schedule.delete_job("__mine_interval", persist=True)
# add master_alive job if enabled
- if (self.opts['transport'] != 'tcp' and
- self.opts['master_alive_interval'] > 0 and
- self.connected):
- self.schedule.add_job({
- master_event(type='alive', master=self.opts['master']):
+ if (
+ self.opts["transport"] != "tcp"
+ and self.opts["master_alive_interval"] > 0
+ and self.connected
+ ):
+ self.schedule.add_job(
{
- 'function': 'status.master',
- 'seconds': self.opts['master_alive_interval'],
- 'jid_include': True,
- 'maxrunning': 1,
- 'return_job': False,
- 'kwargs': {'master': self.opts['master'],
- 'connected': True}
- }
- }, persist=True)
- if self.opts['master_failback'] and \
- 'master_list' in self.opts and \
- self.opts['master'] != self.opts['master_list'][0]:
- self.schedule.add_job({
- master_event(type='failback'):
- {
- 'function': 'status.ping_master',
- 'seconds': self.opts['master_failback_interval'],
- 'jid_include': True,
- 'maxrunning': 1,
- 'return_job': False,
- 'kwargs': {'master': self.opts['master_list'][0]}
+ master_event(type="alive", master=self.opts["master"]): {
+ "function": "status.master",
+ "seconds": self.opts["master_alive_interval"],
+ "jid_include": True,
+ "maxrunning": 1,
+ "return_job": False,
+ "kwargs": {"master": self.opts["master"], "connected": True},
}
- }, persist=True)
+ },
+ persist=True,
+ )
+ if (
+ self.opts["master_failback"]
+ and "master_list" in self.opts
+ and self.opts["master"] != self.opts["master_list"][0]
+ ):
+ self.schedule.add_job(
+ {
+ master_event(type="failback"): {
+ "function": "status.ping_master",
+ "seconds": self.opts["master_failback_interval"],
+ "jid_include": True,
+ "maxrunning": 1,
+ "return_job": False,
+ "kwargs": {"master": self.opts["master_list"][0]},
+ }
+ },
+ persist=True,
+ )
else:
- self.schedule.delete_job(master_event(type='failback'), persist=True)
+ self.schedule.delete_job(master_event(type="failback"), persist=True)
else:
- self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
- self.schedule.delete_job(master_event(type='failback'), persist=True)
+ self.schedule.delete_job(
+ master_event(type="alive", master=self.opts["master"]), persist=True
+ )
+ self.schedule.delete_job(master_event(type="failback"), persist=True)
def _prep_mod_opts(self):
- '''
+ """
Returns a copy of the opts with key bits stripped out
- '''
+ """
mod_opts = {}
for key, val in six.iteritems(self.opts):
- if key == 'logger':
+ if key == "logger":
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None):
- '''
+ """
Return the functions and the returners loaded up from the loader
module
- '''
+ """
opt_in = True
if not opts:
opts = self.opts
@@ -1359,43 +1475,54 @@ class Minion(MinionBase):
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
- if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
+ if opts.get("modules_max_memory", -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug(
- 'modules_max_memory set, enforcing a maximum of %s',
- opts['modules_max_memory']
+ "modules_max_memory set, enforcing a maximum of %s",
+ opts["modules_max_memory"],
)
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()[:2]
- mem_limit = rss + vms + opts['modules_max_memory']
+ mem_limit = rss + vms + opts["modules_max_memory"]
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
- elif opts.get('modules_max_memory', -1) > 0:
+ elif opts.get("modules_max_memory", -1) > 0:
if not HAS_PSUTIL:
- log.error('Unable to enforce modules_max_memory because psutil is missing')
+ log.error(
+ "Unable to enforce modules_max_memory because psutil is missing"
+ )
if not HAS_RESOURCE:
- log.error('Unable to enforce modules_max_memory because resource is missing')
+ log.error(
+ "Unable to enforce modules_max_memory because resource is missing"
+ )
# This might be a proxy minion
- if hasattr(self, 'proxy'):
+ if hasattr(self, "proxy"):
proxy = self.proxy
else:
proxy = None
if grains is None:
- opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy)
+ opts["grains"] = salt.loader.grains(opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(opts, proxy=proxy)
- if opts.get('multimaster', False):
+ if opts.get("multimaster", False):
s_opts = copy.deepcopy(opts)
- functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
- loaded_base_name=self.loaded_base_name, notify=notify)
+ functions = salt.loader.minion_mods(
+ s_opts,
+ utils=self.utils,
+ proxy=proxy,
+ loaded_base_name=self.loaded_base_name,
+ notify=notify,
+ )
else:
- functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy)
+ functions = salt.loader.minion_mods(
+ opts, utils=self.utils, notify=notify, proxy=proxy
+ )
returners = salt.loader.returners(opts, functions, proxy=proxy)
errors = {}
- if '_errors' in functions:
- errors = functions['_errors']
- functions.pop('_errors')
+ if "_errors" in functions:
+ errors = functions["_errors"]
+ functions.pop("_errors")
# we're done, reset the limits!
if modules_max_memory is True:
@@ -1410,11 +1537,13 @@ class Minion(MinionBase):
def _send_req_sync(self, load, timeout):
- if self.opts['minion_sign_messages']:
- log.trace('Signing event to be published onto the bus.')
- minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
- sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
- load['sig'] = sig
+ if self.opts["minion_sign_messages"]:
+ log.trace("Signing event to be published onto the bus.")
+ minion_privkey_path = os.path.join(self.opts["pki_dir"], "minion.pem")
+ sig = salt.crypt.sign_message(
+ minion_privkey_path, salt.serializers.msgpack.serialize(load)
+ )
+ load["sig"] = sig
with salt.transport.client.ReqChannel.factory(self.opts) as channel:
return channel.send(load, timeout=timeout)
@@ -1422,102 +1551,136 @@ class Minion(MinionBase):
@salt.ext.tornado.gen.coroutine
def _send_req_async(self, load, timeout):
- if self.opts['minion_sign_messages']:
- log.trace('Signing event to be published onto the bus.')
- minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
- sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
- load['sig'] = sig
+ if self.opts["minion_sign_messages"]:
+ log.trace("Signing event to be published onto the bus.")
+ minion_privkey_path = os.path.join(self.opts["pki_dir"], "minion.pem")
+ sig = salt.crypt.sign_message(
+ minion_privkey_path, salt.serializers.msgpack.serialize(load)
+ )
+ load["sig"] = sig
with salt.transport.client.AsyncReqChannel.factory(self.opts) as channel:
ret = yield channel.send(load, timeout=timeout)
raise salt.ext.tornado.gen.Return(ret)
- def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None, include_startup_grains=False):
- '''
+ def _fire_master(
+ self,
+ data=None,
+ tag=None,
+ events=None,
+ pretag=None,
+ timeout=60,
+ sync=True,
+ timeout_handler=None,
+ include_startup_grains=False,
+ ):
+ """
Fire an event on the master, or drop message if unable to send.
- '''
- load = {'id': self.opts['id'],
- 'cmd': '_minion_event',
- 'pretag': pretag,
- 'tok': self.tok}
+ """
+ load = {
+ "id": self.opts["id"],
+ "cmd": "_minion_event",
+ "pretag": pretag,
+ "tok": self.tok,
+ }
if events:
- load['events'] = events
+ load["events"] = events
elif data and tag:
- load['data'] = data
- load['tag'] = tag
+ load["data"] = data
+ load["tag"] = tag
elif not data and tag:
- load['data'] = {}
- load['tag'] = tag
+ load["data"] = {}
+ load["tag"] = tag
else:
return
if include_startup_grains:
grains_to_add = dict(
- [(k, v) for k, v in six.iteritems(self.opts.get('grains', {})) if k in self.opts['start_event_grains']])
- load['grains'] = grains_to_add
+ [
+ (k, v)
+ for k, v in six.iteritems(self.opts.get("grains", {}))
+ if k in self.opts["start_event_grains"]
+ ]
+ )
+ load["grains"] = grains_to_add
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
- log.info('fire_master failed: master could not be contacted. Request timed out.')
+ log.info(
+ "fire_master failed: master could not be contacted. Request timed out."
+ )
return False
except Exception: # pylint: disable=broad-except
- log.info('fire_master failed: %s', traceback.format_exc())
+ log.info("fire_master failed: %s", traceback.format_exc())
return False
else:
if timeout_handler is None:
+
def handle_timeout(*_):
- log.info('fire_master failed: master could not be contacted. Request timed out.')
+ log.info(
+ "fire_master failed: master could not be contacted. Request timed out."
+ )
return True
+
timeout_handler = handle_timeout
with salt.ext.tornado.stack_context.ExceptionStackContext(timeout_handler):
- self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
+ # pylint: disable=unexpected-keyword-arg
+ self._send_req_async(load, timeout, callback=lambda f: None)
+ # pylint: enable=unexpected-keyword-arg
return True
@salt.ext.tornado.gen.coroutine
def _handle_decoded_payload(self, data):
- '''
+ """
Override this method if you wish to handle the decoded data
differently.
- '''
+ """
# Ensure payload is unicode. Disregard failure to decode binary blobs.
if six.PY2:
data = salt.utils.data.decode(data, keep=True)
- if 'user' in data:
+ if "user" in data:
log.info(
- 'User %s Executing command %s with jid %s',
- data['user'], data['fun'], data['jid']
+ "User %s Executing command %s with jid %s",
+ data["user"],
+ data["fun"],
+ data["jid"],
)
else:
- log.info(
- 'Executing command %s with jid %s',
- data['fun'], data['jid']
- )
- log.debug('Command details %s', data)
+ log.info("Executing command %s with jid %s", data["fun"], data["jid"])
+ log.debug("Command details %s", data)
# Don't duplicate jobs
- log.trace('Started JIDs: %s', self.jid_queue)
+ log.trace("Started JIDs: %s", self.jid_queue)
if self.jid_queue is not None:
- if data['jid'] in self.jid_queue:
+ if data["jid"] in self.jid_queue:
return
else:
- self.jid_queue.append(data['jid'])
- if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
+ self.jid_queue.append(data["jid"])
+ if len(self.jid_queue) > self.opts["minion_jid_queue_hwm"]:
self.jid_queue.pop(0)
- if isinstance(data['fun'], six.string_types):
- if data['fun'] == 'sys.reload_modules':
- self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
+ if isinstance(data["fun"], six.string_types):
+ if data["fun"] == "sys.reload_modules":
+ (
+ self.functions,
+ self.returners,
+ self.function_errors,
+ self.executors,
+ ) = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
- process_count_max = self.opts.get('process_count_max')
+ process_count_max = self.opts.get("process_count_max")
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
- log.warning("Maximum number of processes reached while executing jid %s, waiting...", data['jid'])
+ log.warning(
+ "Maximum number of processes reached while executing jid %s, waiting...",
+ data["jid"],
+ )
yield salt.ext.tornado.gen.sleep(10)
process_count = len(salt.utils.minion.running(self.opts))
@@ -1526,24 +1689,26 @@ class Minion(MinionBase):
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
- multiprocessing_enabled = self.opts.get('multiprocessing', True)
+ multiprocessing_enabled = self.opts.get("multiprocessing", True)
if multiprocessing_enabled:
- if sys.platform.startswith('win'):
+ if sys.platform.startswith("win"):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingProcess(
target=self._target,
- name='ProcessPayload',
- args=(instance, self.opts, data, self.connected)
+ name="ProcessPayload",
+ args=(instance, self.opts, data, self.connected),
+ )
+ process._after_fork_methods.append(
+ (salt.utils.crypt.reinit_crypto, [], {})
)
- process._after_fork_methods.append((salt.utils.crypt.reinit_crypto, [], {}))
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
- name=data['jid']
+ name=data["jid"],
)
if multiprocessing_enabled:
@@ -1553,13 +1718,13 @@ class Minion(MinionBase):
process.start()
else:
process.start()
- process.name = '{}-Job-{}'.format(process.name, data['jid'])
+ process.name = "{}-Job-{}".format(process.name, data["jid"])
self.subprocess_list.add(process)
def ctx(self):
- '''
+ """
Return a single context manager for the minion's data
- '''
+ """
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
@@ -1578,102 +1743,131 @@ class Minion(MinionBase):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
- if not hasattr(minion_instance, 'functions'):
- functions, returners, function_errors, executors = (
- minion_instance._load_modules(grains=opts['grains'])
- )
+ if not hasattr(minion_instance, "functions"):
+ (
+ functions,
+ returners,
+ function_errors,
+ executors,
+ ) = minion_instance._load_modules(grains=opts["grains"])
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
- if not hasattr(minion_instance, 'serial'):
+ if not hasattr(minion_instance, "serial"):
minion_instance.serial = salt.payload.Serial(opts)
- if not hasattr(minion_instance, 'proc_dir'):
- uid = salt.utils.user.get_uid(user=opts.get('user', None))
- minion_instance.proc_dir = (
- get_proc_dir(opts['cachedir'], uid=uid)
- )
+ if not hasattr(minion_instance, "proc_dir"):
+ uid = salt.utils.user.get_uid(user=opts.get("user", None))
+ minion_instance.proc_dir = get_proc_dir(opts["cachedir"], uid=uid)
def run_func(minion_instance, opts, data):
- if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
+ if isinstance(data["fun"], tuple) or isinstance(data["fun"], list):
return Minion._thread_multi_return(minion_instance, opts, data)
else:
return Minion._thread_return(minion_instance, opts, data)
- with salt.ext.tornado.stack_context.StackContext(functools.partial(RequestContext,
- {'data': data, 'opts': opts})):
+ with salt.ext.tornado.stack_context.StackContext(
+ functools.partial(RequestContext, {"data": data, "opts": opts})
+ ):
with salt.ext.tornado.stack_context.StackContext(minion_instance.ctx):
run_func(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
- '''
+ """
This method should be used as a threading target, start the actual
minion side execution.
- '''
+ """
minion_instance.gen_modules()
- fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
+ fn_ = os.path.join(minion_instance.proc_dir, data["jid"])
- salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
+ salt.utils.process.appendproctitle(
+ "{0}._thread_return {1}".format(cls.__name__, data["jid"])
+ )
- sdata = {'pid': os.getpid()}
+ sdata = {"pid": os.getpid()}
sdata.update(data)
- log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid'])
- with salt.utils.files.fopen(fn_, 'w+b') as fp_:
+ log.info("Starting a new job %s with PID %s", data["jid"], sdata["pid"])
+ with salt.utils.files.fopen(fn_, "w+b") as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
- ret = {'success': False}
- function_name = data['fun']
- executors = data.get('module_executors') or \
- getattr(minion_instance, 'module_executors', []) or \
- opts.get('module_executors', ['direct_call'])
- allow_missing_funcs = any([
- minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name)
- for executor in executors
- if '{0}.allow_missing_func'.format(executor) in minion_instance.executors
- ])
+ ret = {"success": False}
+ function_name = data["fun"]
+ executors = (
+ data.get("module_executors")
+ or getattr(minion_instance, "module_executors", [])
+ or opts.get("module_executors", ["direct_call"])
+ )
+ allow_missing_funcs = any(
+ [
+ minion_instance.executors["{0}.allow_missing_func".format(executor)](
+ function_name
+ )
+ for executor in executors
+ if "{0}.allow_missing_func".format(executor)
+ in minion_instance.executors
+ ]
+ )
if function_name in minion_instance.functions or allow_missing_funcs is True:
try:
minion_blackout_violation = False
- if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
- whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
+ if minion_instance.connected and minion_instance.opts["pillar"].get(
+ "minion_blackout", False
+ ):
+ whitelist = minion_instance.opts["pillar"].get(
+ "minion_blackout_whitelist", []
+ )
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
- if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
+ if (
+ function_name != "saltutil.refresh_pillar"
+ and function_name not in whitelist
+ ):
minion_blackout_violation = True
# use minion_blackout_whitelist from grains if it exists
- if minion_instance.opts['grains'].get('minion_blackout', False):
- whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
- if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
+ if minion_instance.opts["grains"].get("minion_blackout", False):
+ whitelist = minion_instance.opts["grains"].get(
+ "minion_blackout_whitelist", []
+ )
+ if (
+ function_name != "saltutil.refresh_pillar"
+ and function_name not in whitelist
+ ):
minion_blackout_violation = True
if minion_blackout_violation:
- raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
- 'to False in pillar or grains to resume operations. Only '
- 'saltutil.refresh_pillar allowed in blackout mode.')
+ raise SaltInvocationError(
+ "Minion in blackout mode. Set 'minion_blackout' "
+ "to False in pillar or grains to resume operations. Only "
+ "saltutil.refresh_pillar allowed in blackout mode."
+ )
if function_name in minion_instance.functions:
func = minion_instance.functions[function_name]
- args, kwargs = load_args_and_kwargs(
- func,
- data['arg'],
- data)
+ args, kwargs = load_args_and_kwargs(func, data["arg"], data)
else:
# only run if function_name is not in minion_instance.functions and allow_missing_funcs is True
func = function_name
- args, kwargs = data['arg'], data
- minion_instance.functions.pack['__context__']['retcode'] = 0
+ args, kwargs = data["arg"], data
+ minion_instance.functions.pack["__context__"]["retcode"] = 0
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
- raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
- format(executors))
- if opts.get('sudo_user', '') and executors[-1] != 'sudo':
- executors[-1] = 'sudo' # replace the last one with sudo
- log.trace('Executors list %s', executors) # pylint: disable=no-member
+ raise SaltInvocationError(
+ "Wrong executors specification: {0}. String or non-empty list expected".format(
+ executors
+ )
+ )
+ if opts.get("sudo_user", "") and executors[-1] != "sudo":
+ executors[-1] = "sudo" # replace the last one with sudo
+ log.trace("Executors list %s", executors) # pylint: disable=no-member
for name in executors:
- fname = '{0}.execute'.format(name)
+ fname = "{0}.execute".format(name)
if fname not in minion_instance.executors:
- raise SaltInvocationError("Executor '{0}' is not available".format(name))
- return_data = minion_instance.executors[fname](opts, data, func, args, kwargs)
+ raise SaltInvocationError(
+ "Executor '{0}' is not available".format(name)
+ )
+ return_data = minion_instance.executors[fname](
+ opts, data, func, args, kwargs
+ )
if return_data is not None:
break
@@ -1687,258 +1881,268 @@ class Minion(MinionBase):
if not iret:
iret = []
iret.append(single)
- tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job')
- event_data = {'return': single}
+ tag = tagify(
+ [data["jid"], "prog", opts["id"], six.text_type(ind)], "job"
+ )
+ event_data = {"return": single}
minion_instance._fire_master(event_data, tag)
ind += 1
- ret['return'] = iret
+ ret["return"] = iret
else:
- ret['return'] = return_data
+ ret["return"] = return_data
- retcode = minion_instance.functions.pack['__context__'].get(
- 'retcode',
- salt.defaults.exitcodes.EX_OK
+ retcode = minion_instance.functions.pack["__context__"].get(
+ "retcode", salt.defaults.exitcodes.EX_OK
)
if retcode == salt.defaults.exitcodes.EX_OK:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
- func_result = all(return_data.get(x, True)
- for x in ('result', 'success'))
+ func_result = all(
+ return_data.get(x, True) for x in ("result", "success")
+ )
except Exception: # pylint: disable=broad-except
# return data is not a dict
func_result = True
if not func_result:
retcode = salt.defaults.exitcodes.EX_GENERIC
- ret['retcode'] = retcode
- ret['success'] = retcode == salt.defaults.exitcodes.EX_OK
+ ret["retcode"] = retcode
+ ret["success"] = retcode == salt.defaults.exitcodes.EX_OK
except CommandNotFoundError as exc:
- msg = 'Command required for \'{0}\' not found'.format(
- function_name
- )
+ msg = "Command required for '{0}' not found".format(function_name)
log.debug(msg, exc_info=True)
- ret['return'] = '{0}: {1}'.format(msg, exc)
- ret['out'] = 'nested'
- ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
+ ret["return"] = "{0}: {1}".format(msg, exc)
+ ret["out"] = "nested"
+ ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
except CommandExecutionError as exc:
log.error(
- 'A command in \'%s\' had a problem: %s',
- function_name, exc,
- exc_info_on_loglevel=logging.DEBUG
+ "A command in '%s' had a problem: %s",
+ function_name,
+ exc,
+ exc_info_on_loglevel=logging.DEBUG,
)
- ret['return'] = 'ERROR: {0}'.format(exc)
- ret['out'] = 'nested'
- ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
+ ret["return"] = "ERROR: {0}".format(exc)
+ ret["out"] = "nested"
+ ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
except SaltInvocationError as exc:
log.error(
- 'Problem executing \'%s\': %s',
- function_name, exc,
- exc_info_on_loglevel=logging.DEBUG
+ "Problem executing '%s': %s",
+ function_name,
+ exc,
+ exc_info_on_loglevel=logging.DEBUG,
)
- ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
- function_name, exc
- )
- ret['out'] = 'nested'
- ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
+ ret["return"] = "ERROR executing '{0}': {1}".format(function_name, exc)
+ ret["out"] = "nested"
+ ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
except TypeError as exc:
- msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(
- function_name, exc, func.__doc__ or ''
+ msg = "Passed invalid arguments to {0}: {1}\n{2}".format(
+ function_name, exc, func.__doc__ or ""
)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
- ret['return'] = msg
- ret['out'] = 'nested'
- ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
+ ret["return"] = msg
+ ret["out"] = "nested"
+ ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
except Exception: # pylint: disable=broad-except
- msg = 'The minion function caused an exception'
+ msg = "The minion function caused an exception"
log.warning(msg, exc_info_on_loglevel=True)
- salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
- ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
- ret['out'] = 'nested'
- ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
+ salt.utils.error.fire_exception(
+ salt.exceptions.MinionError(msg), opts, job=data
+ )
+ ret["return"] = "{0}: {1}".format(msg, traceback.format_exc())
+ ret["out"] = "nested"
+ ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
else:
- docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name))
+ docs = minion_instance.functions["sys.doc"]("{0}*".format(function_name))
if docs:
- docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
- ret['return'] = docs
+ docs[function_name] = minion_instance.functions.missing_fun_string(
+ function_name
+ )
+ ret["return"] = docs
else:
- ret['return'] = minion_instance.functions.missing_fun_string(function_name)
- mod_name = function_name.split('.')[0]
+ ret["return"] = minion_instance.functions.missing_fun_string(
+ function_name
+ )
+ mod_name = function_name.split(".")[0]
if mod_name in minion_instance.function_errors:
- ret['return'] += ' Possible reasons: \'{0}\''.format(
+ ret["return"] += " Possible reasons: '{0}'".format(
minion_instance.function_errors[mod_name]
)
- ret['success'] = False
- ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
- ret['out'] = 'nested'
+ ret["success"] = False
+ ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
+ ret["out"] = "nested"
- ret['jid'] = data['jid']
- ret['fun'] = data['fun']
- ret['fun_args'] = data['arg']
- if 'master_id' in data:
- ret['master_id'] = data['master_id']
- if 'metadata' in data:
- if isinstance(data['metadata'], dict):
- ret['metadata'] = data['metadata']
+ ret["jid"] = data["jid"]
+ ret["fun"] = data["fun"]
+ ret["fun_args"] = data["arg"]
+ if "master_id" in data:
+ ret["master_id"] = data["master_id"]
+ if "metadata" in data:
+ if isinstance(data["metadata"], dict):
+ ret["metadata"] = data["metadata"]
else:
- log.warning('The metadata parameter must be a dictionary. Ignoring.')
+ log.warning("The metadata parameter must be a dictionary. Ignoring.")
if minion_instance.connected:
minion_instance._return_pub(
- ret,
- timeout=minion_instance._return_retry_timer()
+ ret, timeout=minion_instance._return_retry_timer()
)
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
- if isinstance(opts.get('return'), six.string_types):
- if data['ret']:
- data['ret'] = ','.join((data['ret'], opts['return']))
+ if isinstance(opts.get("return"), six.string_types):
+ if data["ret"]:
+ data["ret"] = ",".join((data["ret"], opts["return"]))
else:
- data['ret'] = opts['return']
+ data["ret"] = opts["return"]
- log.debug('minion return: %s', ret)
+ log.debug("minion return: %s", ret)
# TODO: make a list? Seems odd to split it this late :/
- if data['ret'] and isinstance(data['ret'], six.string_types):
- if 'ret_config' in data:
- ret['ret_config'] = data['ret_config']
- if 'ret_kwargs' in data:
- ret['ret_kwargs'] = data['ret_kwargs']
- ret['id'] = opts['id']
- for returner in set(data['ret'].split(',')):
+ if data["ret"] and isinstance(data["ret"], six.string_types):
+ if "ret_config" in data:
+ ret["ret_config"] = data["ret_config"]
+ if "ret_kwargs" in data:
+ ret["ret_kwargs"] = data["ret_kwargs"]
+ ret["id"] = opts["id"]
+ for returner in set(data["ret"].split(",")):
try:
- returner_str = '{0}.returner'.format(returner)
+ returner_str = "{0}.returner".format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
- returner_err = minion_instance.returners.missing_fun_string(returner_str)
+ returner_err = minion_instance.returners.missing_fun_string(
+ returner_str
+ )
log.error(
- 'Returner %s could not be loaded: %s',
- returner_str, returner_err
+ "Returner %s could not be loaded: %s",
+ returner_str,
+ returner_err,
)
except Exception as exc: # pylint: disable=broad-except
- log.exception(
- 'The return failed for job %s: %s', data['jid'], exc
- )
+ log.exception("The return failed for job %s: %s", data["jid"], exc)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
- '''
+ """
This method should be used as a threading target, start the actual
minion side execution.
- '''
+ """
minion_instance.gen_modules()
- fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
+ fn_ = os.path.join(minion_instance.proc_dir, data["jid"])
- salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
+ salt.utils.process.appendproctitle(
+ "{0}._thread_multi_return {1}".format(cls.__name__, data["jid"])
+ )
- sdata = {'pid': os.getpid()}
+ sdata = {"pid": os.getpid()}
sdata.update(data)
- log.info('Starting a new job with PID %s', sdata['pid'])
- with salt.utils.files.fopen(fn_, 'w+b') as fp_:
+ log.info("Starting a new job with PID %s", sdata["pid"])
+ with salt.utils.files.fopen(fn_, "w+b") as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
- multifunc_ordered = opts.get('multifunc_ordered', False)
- num_funcs = len(data['fun'])
+ multifunc_ordered = opts.get("multifunc_ordered", False)
+ num_funcs = len(data["fun"])
if multifunc_ordered:
ret = {
- 'return': [None] * num_funcs,
- 'retcode': [None] * num_funcs,
- 'success': [False] * num_funcs
+ "return": [None] * num_funcs,
+ "retcode": [None] * num_funcs,
+ "success": [False] * num_funcs,
}
else:
- ret = {
- 'return': {},
- 'retcode': {},
- 'success': {}
- }
+ ret = {"return": {}, "retcode": {}, "success": {}}
for ind in range(0, num_funcs):
if not multifunc_ordered:
- ret['success'][data['fun'][ind]] = False
+ ret["success"][data["fun"][ind]] = False
try:
minion_blackout_violation = False
- if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
- whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
+ if minion_instance.connected and minion_instance.opts["pillar"].get(
+ "minion_blackout", False
+ ):
+ whitelist = minion_instance.opts["pillar"].get(
+ "minion_blackout_whitelist", []
+ )
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
- if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
+ if (
+ data["fun"][ind] != "saltutil.refresh_pillar"
+ and data["fun"][ind] not in whitelist
+ ):
minion_blackout_violation = True
- elif minion_instance.opts['grains'].get('minion_blackout', False):
- whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
- if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
+ elif minion_instance.opts["grains"].get("minion_blackout", False):
+ whitelist = minion_instance.opts["grains"].get(
+ "minion_blackout_whitelist", []
+ )
+ if (
+ data["fun"][ind] != "saltutil.refresh_pillar"
+ and data["fun"][ind] not in whitelist
+ ):
minion_blackout_violation = True
if minion_blackout_violation:
- raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
- 'to False in pillar or grains to resume operations. Only '
- 'saltutil.refresh_pillar allowed in blackout mode.')
+ raise SaltInvocationError(
+ "Minion in blackout mode. Set 'minion_blackout' "
+ "to False in pillar or grains to resume operations. Only "
+ "saltutil.refresh_pillar allowed in blackout mode."
+ )
- func = minion_instance.functions[data['fun'][ind]]
+ func = minion_instance.functions[data["fun"][ind]]
- args, kwargs = load_args_and_kwargs(
- func,
- data['arg'][ind],
- data)
- minion_instance.functions.pack['__context__']['retcode'] = 0
- key = ind if multifunc_ordered else data['fun'][ind]
- ret['return'][key] = func(*args, **kwargs)
- retcode = minion_instance.functions.pack['__context__'].get(
- 'retcode',
- 0
+ args, kwargs = load_args_and_kwargs(func, data["arg"][ind], data)
+ minion_instance.functions.pack["__context__"]["retcode"] = 0
+ key = ind if multifunc_ordered else data["fun"][ind]
+ ret["return"][key] = func(*args, **kwargs)
+ retcode = minion_instance.functions.pack["__context__"].get(
+ "retcode", 0
)
if retcode == 0:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
- func_result = all(ret['return'][key].get(x, True)
- for x in ('result', 'success'))
+ func_result = all(
+ ret["return"][key].get(x, True)
+ for x in ("result", "success")
+ )
except Exception: # pylint: disable=broad-except
# return data is not a dict
func_result = True
if not func_result:
retcode = 1
- ret['retcode'][key] = retcode
- ret['success'][key] = retcode == 0
+ ret["retcode"][key] = retcode
+ ret["success"][key] = retcode == 0
except Exception as exc: # pylint: disable=broad-except
trb = traceback.format_exc()
- log.warning('The minion function caused an exception: %s', exc)
+ log.warning("The minion function caused an exception: %s", exc)
if multifunc_ordered:
- ret['return'][ind] = trb
+ ret["return"][ind] = trb
else:
- ret['return'][data['fun'][ind]] = trb
- ret['jid'] = data['jid']
- ret['fun'] = data['fun']
- ret['fun_args'] = data['arg']
- if 'metadata' in data:
- ret['metadata'] = data['metadata']
+ ret["return"][data["fun"][ind]] = trb
+ ret["jid"] = data["jid"]
+ ret["fun"] = data["fun"]
+ ret["fun_args"] = data["arg"]
+ if "metadata" in data:
+ ret["metadata"] = data["metadata"]
if minion_instance.connected:
minion_instance._return_pub(
- ret,
- timeout=minion_instance._return_retry_timer()
+ ret, timeout=minion_instance._return_retry_timer()
)
- if data['ret']:
- if 'ret_config' in data:
- ret['ret_config'] = data['ret_config']
- if 'ret_kwargs' in data:
- ret['ret_kwargs'] = data['ret_kwargs']
- for returner in set(data['ret'].split(',')):
- ret['id'] = opts['id']
+ if data["ret"]:
+ if "ret_config" in data:
+ ret["ret_config"] = data["ret_config"]
+ if "ret_kwargs" in data:
+ ret["ret_kwargs"] = data["ret_kwargs"]
+ for returner in set(data["ret"].split(",")):
+ ret["id"] = opts["id"]
try:
- minion_instance.returners['{0}.returner'.format(
- returner
- )](ret)
+ minion_instance.returners["{0}.returner".format(returner)](ret)
except Exception as exc: # pylint: disable=broad-except
- log.error(
- 'The return failed for job %s: %s',
- data['jid'], exc
- )
+ log.error("The return failed for job %s: %s", data["jid"], exc)
- def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
- '''
+ def _return_pub(self, ret, ret_cmd="_return", timeout=60, sync=True):
+ """
Return the data from the executed command to the master server
- '''
- jid = ret.get('jid', ret.get('__jid__'))
- fun = ret.get('fun', ret.get('__fun__'))
- if self.opts['multiprocessing']:
+ """
+ jid = ret.get("jid", ret.get("__jid__"))
+ fun = ret.get("fun", ret.get("__fun__"))
+ if self.opts["multiprocessing"]:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
@@ -1946,38 +2150,36 @@ class Minion(MinionBase):
except (OSError, IOError):
# The file is gone already
pass
- log.info('Returning information for job: %s', jid)
- log.trace('Return data: %s', ret)
- if ret_cmd == '_syndic_return':
- load = {'cmd': ret_cmd,
- 'id': self.opts['uid'],
- 'jid': jid,
- 'fun': fun,
- 'arg': ret.get('arg'),
- 'tgt': ret.get('tgt'),
- 'tgt_type': ret.get('tgt_type'),
- 'load': ret.get('__load__')}
- if '__master_id__' in ret:
- load['master_id'] = ret['__master_id__']
- load['return'] = {}
+ log.info("Returning information for job: %s", jid)
+ log.trace("Return data: %s", ret)
+ if ret_cmd == "_syndic_return":
+ load = {
+ "cmd": ret_cmd,
+ "id": self.opts["uid"],
+ "jid": jid,
+ "fun": fun,
+ "arg": ret.get("arg"),
+ "tgt": ret.get("tgt"),
+ "tgt_type": ret.get("tgt_type"),
+ "load": ret.get("__load__"),
+ }
+ if "__master_id__" in ret:
+ load["master_id"] = ret["__master_id__"]
+ load["return"] = {}
for key, value in six.iteritems(ret):
- if key.startswith('__'):
+ if key.startswith("__"):
continue
- load['return'][key] = value
+ load["return"][key] = value
else:
- load = {'cmd': ret_cmd,
- 'id': self.opts['id']}
+ load = {"cmd": ret_cmd, "id": self.opts["id"]}
for key, value in six.iteritems(ret):
load[key] = value
- if 'out' in ret:
- if isinstance(ret['out'], six.string_types):
- load['out'] = ret['out']
+ if "out" in ret:
+ if isinstance(ret["out"], six.string_types):
+ load["out"] = ret["out"]
else:
- log.error(
- 'Invalid outputter %s. This is likely a bug.',
- ret['out']
- )
+ log.error("Invalid outputter %s. This is likely a bug.", ret["out"])
else:
try:
oput = self.functions[fun].__outputter__
@@ -1985,22 +2187,23 @@ class Minion(MinionBase):
pass
else:
if isinstance(oput, six.string_types):
- load['out'] = oput
- if self.opts['cache_jobs']:
+ load["out"] = oput
+ if self.opts["cache_jobs"]:
# Local job cache has been enabled
- if ret['jid'] == 'req':
- ret['jid'] = salt.utils.jid.gen_jid(self.opts)
- salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret)
+ if ret["jid"] == "req":
+ ret["jid"] = salt.utils.jid.gen_jid(self.opts)
+ salt.utils.minion.cache_jobs(self.opts, ret["jid"], ret)
- if not self.opts['pub_ret']:
- return ''
+ if not self.opts["pub_ret"]:
+ return ""
def timeout_handler(*_):
log.warning(
- 'The minion failed to return the job information for job %s. '
- 'This is often due to the master being shut down or '
- 'overloaded. If the master is running, consider increasing '
- 'the worker_threads value.', jid
+ "The minion failed to return the job information for job %s. "
+ "This is often due to the master being shut down or "
+ "overloaded. If the master is running, consider increasing "
+ "the worker_threads value.",
+ jid,
)
return True
@@ -2009,25 +2212,29 @@ class Minion(MinionBase):
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
- return ''
+ return ""
else:
with salt.ext.tornado.stack_context.ExceptionStackContext(timeout_handler):
- ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
+ # pylint: disable=unexpected-keyword-arg
+ ret_val = self._send_req_async(
+ load, timeout=timeout, callback=lambda f: None
+ )
+ # pylint: enable=unexpected-keyword-arg
- log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
+ log.trace("ret_val = %s", ret_val) # pylint: disable=no-member
return ret_val
- def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True):
- '''
+ def _return_pub_multi(self, rets, ret_cmd="_return", timeout=60, sync=True):
+ """
Return the data from the executed command to the master server
- '''
+ """
if not isinstance(rets, list):
rets = [rets]
jids = {}
for ret in rets:
- jid = ret.get('jid', ret.get('__jid__'))
- fun = ret.get('fun', ret.get('__fun__'))
- if self.opts['multiprocessing']:
+ jid = ret.get("jid", ret.get("__jid__"))
+ fun = ret.get("fun", ret.get("__fun__"))
+ if self.opts["multiprocessing"]:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
@@ -2035,37 +2242,38 @@ class Minion(MinionBase):
except (OSError, IOError):
# The file is gone already
pass
- log.info('Returning information for job: %s', jid)
+ log.info("Returning information for job: %s", jid)
load = jids.setdefault(jid, {})
- if ret_cmd == '_syndic_return':
+ if ret_cmd == "_syndic_return":
if not load:
- load.update({'id': self.opts['id'],
- 'jid': jid,
- 'fun': fun,
- 'arg': ret.get('arg'),
- 'tgt': ret.get('tgt'),
- 'tgt_type': ret.get('tgt_type'),
- 'load': ret.get('__load__'),
- 'return': {}})
- if '__master_id__' in ret:
- load['master_id'] = ret['__master_id__']
+ load.update(
+ {
+ "id": self.opts["id"],
+ "jid": jid,
+ "fun": fun,
+ "arg": ret.get("arg"),
+ "tgt": ret.get("tgt"),
+ "tgt_type": ret.get("tgt_type"),
+ "load": ret.get("__load__"),
+ "return": {},
+ }
+ )
+ if "__master_id__" in ret:
+ load["master_id"] = ret["__master_id__"]
for key, value in six.iteritems(ret):
- if key.startswith('__'):
+ if key.startswith("__"):
continue
- load['return'][key] = value
+ load["return"][key] = value
else:
- load.update({'id': self.opts['id']})
+ load.update({"id": self.opts["id"]})
for key, value in six.iteritems(ret):
load[key] = value
- if 'out' in ret:
- if isinstance(ret['out'], six.string_types):
- load['out'] = ret['out']
+ if "out" in ret:
+ if isinstance(ret["out"], six.string_types):
+ load["out"] = ret["out"]
else:
- log.error(
- 'Invalid outputter %s. This is likely a bug.',
- ret['out']
- )
+ log.error("Invalid outputter %s. This is likely a bug.", ret["out"])
else:
try:
oput = self.functions[fun].__outputter__
@@ -2073,20 +2281,20 @@ class Minion(MinionBase):
pass
else:
if isinstance(oput, six.string_types):
- load['out'] = oput
- if self.opts['cache_jobs']:
+ load["out"] = oput
+ if self.opts["cache_jobs"]:
# Local job cache has been enabled
- salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
+ salt.utils.minion.cache_jobs(self.opts, load["jid"], ret)
- load = {'cmd': ret_cmd,
- 'load': list(six.itervalues(jids))}
+ load = {"cmd": ret_cmd, "load": list(six.itervalues(jids))}
def timeout_handler(*_):
log.warning(
- 'The minion failed to return the job information for job %s. '
- 'This is often due to the master being shut down or '
- 'overloaded. If the master is running, consider increasing '
- 'the worker_threads value.', jid
+ "The minion failed to return the job information for job %s. "
+ "This is often due to the master being shut down or "
+ "overloaded. If the master is running, consider increasing "
+ "the worker_threads value.",
+ jid,
)
return True
@@ -2095,362 +2303,391 @@ class Minion(MinionBase):
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
- return ''
+ return ""
else:
with salt.ext.tornado.stack_context.ExceptionStackContext(timeout_handler):
- ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
+ # pylint: disable=unexpected-keyword-arg
+ ret_val = self._send_req_async(
+ load, timeout=timeout, callback=lambda f: None
+ )
+ # pylint: enable=unexpected-keyword-arg
- log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
+ log.trace("ret_val = %s", ret_val) # pylint: disable=no-member
return ret_val
def _state_run(self):
- '''
+ """
Execute a state run based on information set in the minion config file
- '''
- if self.opts['startup_states']:
- if self.opts.get('master_type', 'str') == 'disable' and \
- self.opts.get('file_client', 'remote') == 'remote':
+ """
+ if self.opts["startup_states"]:
+ if (
+ self.opts.get("master_type", "str") == "disable"
+ and self.opts.get("file_client", "remote") == "remote"
+ ):
log.warning(
- 'Cannot run startup_states when \'master_type\' is set '
- 'to \'disable\' and \'file_client\' is set to '
- '\'remote\'. Skipping.'
+ "Cannot run startup_states when 'master_type' is set "
+ "to 'disable' and 'file_client' is set to "
+ "'remote'. Skipping."
)
else:
- data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
- if self.opts['startup_states'] == 'sls':
- data['fun'] = 'state.sls'
- data['arg'] = [self.opts['sls_list']]
- elif self.opts['startup_states'] == 'top':
- data['fun'] = 'state.top'
- data['arg'] = [self.opts['top_file']]
+ data = {"jid": "req", "ret": self.opts.get("ext_job_cache", "")}
+ if self.opts["startup_states"] == "sls":
+ data["fun"] = "state.sls"
+ data["arg"] = [self.opts["sls_list"]]
+ elif self.opts["startup_states"] == "top":
+ data["fun"] = "state.top"
+ data["arg"] = [self.opts["top_file"]]
else:
- data['fun'] = 'state.highstate'
- data['arg'] = []
+ data["fun"] = "state.highstate"
+ data["arg"] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
- '''
+ """
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
- '''
- if '__update_grains' not in self.opts.get('schedule', {}):
- if 'schedule' not in self.opts:
- self.opts['schedule'] = {}
- self.opts['schedule'].update({
- '__update_grains':
- {
- 'function': 'event.fire',
- 'args': [{}, 'grains_refresh'],
- 'minutes': refresh_interval_in_minutes
+ """
+ if "__update_grains" not in self.opts.get("schedule", {}):
+ if "schedule" not in self.opts:
+ self.opts["schedule"] = {}
+ self.opts["schedule"].update(
+ {
+ "__update_grains": {
+ "function": "event.fire",
+ "args": [{}, "grains_refresh"],
+ "minutes": refresh_interval_in_minutes,
}
- })
+ }
+ )
def _fire_master_minion_start(self):
include_grains = False
- if self.opts['start_event_grains']:
+ if self.opts["start_event_grains"]:
include_grains = True
# Send an event to the master that the minion is live
- if self.opts['enable_legacy_startup_events']:
+ if self.opts["enable_legacy_startup_events"]:
# Old style event. Defaults to False in Sodium release.
self._fire_master(
- 'Minion {0} started at {1}'.format(
- self.opts['id'],
- time.asctime()
- ),
- 'minion_start',
- include_startup_grains=include_grains
+ "Minion {0} started at {1}".format(self.opts["id"], time.asctime()),
+ "minion_start",
+ include_startup_grains=include_grains,
)
# send name spaced event
self._fire_master(
- 'Minion {0} started at {1}'.format(
- self.opts['id'],
- time.asctime()
- ),
- tagify([self.opts['id'], 'start'], 'minion'),
- include_startup_grains=include_grains
+ "Minion {0} started at {1}".format(self.opts["id"], time.asctime()),
+ tagify([self.opts["id"], "start"], "minion"),
+ include_startup_grains=include_grains,
)
def module_refresh(self, force_refresh=False, notify=False):
- '''
+ """
Refresh the functions and returners.
- '''
- log.debug('Refreshing modules. Notify=%s', notify)
- self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
+ """
+ log.debug("Refreshing modules. Notify=%s", notify)
+ self.functions, self.returners, _, self.executors = self._load_modules(
+ force_refresh, notify=notify
+ )
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def beacons_refresh(self):
- '''
+ """
Refresh the functions and returners.
- '''
+ """
if not self.beacons_leader:
return
- log.debug('Refreshing beacons.')
+ log.debug("Refreshing beacons.")
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def matchers_refresh(self):
- '''
+ """
Refresh the matchers
- '''
- log.debug('Refreshing matchers.')
+ """
+ log.debug("Refreshing matchers.")
self.matchers = salt.loader.matchers(self.opts)
# TODO: only allow one future in flight at a time?
@salt.ext.tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
- '''
+ """
Refresh the pillar
- '''
+ """
self.module_refresh(force_refresh)
if self.connected:
- log.debug('Refreshing pillar')
+ log.debug("Refreshing pillar")
async_pillar = salt.pillar.get_async_pillar(
self.opts,
- self.opts['grains'],
- self.opts['id'],
- self.opts['saltenv'],
- pillarenv=self.opts.get('pillarenv'),
+ self.opts["grains"],
+ self.opts["id"],
+ self.opts["saltenv"],
+ pillarenv=self.opts.get("pillarenv"),
)
try:
- self.opts['pillar'] = yield async_pillar.compile_pillar()
+ self.opts["pillar"] = yield async_pillar.compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
- log.error('Pillar data could not be refreshed. '
- 'One or more masters may be down!')
+ log.error(
+ "Pillar data could not be refreshed. "
+ "One or more masters may be down!"
+ )
finally:
async_pillar.destroy()
self.matchers_refresh()
self.beacons_refresh()
- evt = salt.utils.event.get_event('minion', opts=self.opts)
- evt.fire_event({'complete': True}, tag='/salt/minion/minion_pillar_refresh_complete')
+ evt = salt.utils.event.get_event("minion", opts=self.opts)
+ evt.fire_event(
+ {"complete": True}, tag="/salt/minion/minion_pillar_refresh_complete"
+ )
def manage_schedule(self, tag, data):
- '''
+ """
Refresh the functions and returners.
- '''
- func = data.get('func', None)
- name = data.get('name', None)
- schedule = data.get('schedule', None)
- where = data.get('where', None)
- persist = data.get('persist', None)
+ """
+ func = data.get("func", None)
+ name = data.get("name", None)
+ schedule = data.get("schedule", None)
+ where = data.get("where", None)
+ persist = data.get("persist", None)
- if func == 'delete':
+ if func == "delete":
self.schedule.delete_job(name, persist)
- elif func == 'add':
+ elif func == "add":
self.schedule.add_job(schedule, persist)
- elif func == 'modify':
+ elif func == "modify":
self.schedule.modify_job(name, schedule, persist)
- elif func == 'enable':
+ elif func == "enable":
self.schedule.enable_schedule(persist)
- elif func == 'disable':
+ elif func == "disable":
self.schedule.disable_schedule(persist)
- elif func == 'enable_job':
+ elif func == "enable_job":
self.schedule.enable_job(name, persist)
- elif func == 'run_job':
+ elif func == "run_job":
self.schedule.run_job(name)
- elif func == 'disable_job':
+ elif func == "disable_job":
self.schedule.disable_job(name, persist)
- elif func == 'postpone_job':
+ elif func == "postpone_job":
self.schedule.postpone_job(name, data)
- elif func == 'skip_job':
+ elif func == "skip_job":
self.schedule.skip_job(name, data)
- elif func == 'reload':
+ elif func == "reload":
self.schedule.reload(schedule)
- elif func == 'list':
+ elif func == "list":
self.schedule.list(where)
- elif func == 'save_schedule':
+ elif func == "save_schedule":
self.schedule.save_schedule()
- elif func == 'get_next_fire_time':
+ elif func == "get_next_fire_time":
self.schedule.get_next_fire_time(name)
def manage_beacons(self, tag, data):
- '''
+ """
Manage Beacons
- '''
+ """
if not self.beacons_leader:
return
- func = data.get('func', None)
- name = data.get('name', None)
- beacon_data = data.get('beacon_data', None)
- include_pillar = data.get('include_pillar', None)
- include_opts = data.get('include_opts', None)
+ func = data.get("func", None)
+ name = data.get("name", None)
+ beacon_data = data.get("beacon_data", None)
+ include_pillar = data.get("include_pillar", None)
+ include_opts = data.get("include_opts", None)
- if func == 'add':
+ if func == "add":
self.beacons.add_beacon(name, beacon_data)
- elif func == 'modify':
+ elif func == "modify":
self.beacons.modify_beacon(name, beacon_data)
- elif func == 'delete':
+ elif func == "delete":
self.beacons.delete_beacon(name)
- elif func == 'enable':
+ elif func == "enable":
self.beacons.enable_beacons()
- elif func == 'disable':
+ elif func == "disable":
self.beacons.disable_beacons()
- elif func == 'enable_beacon':
+ elif func == "enable_beacon":
self.beacons.enable_beacon(name)
- elif func == 'disable_beacon':
+ elif func == "disable_beacon":
self.beacons.disable_beacon(name)
- elif func == 'list':
- self.beacons.list_beacons(include_opts=include_opts,
- include_pillar=include_pillar)
- elif func == 'list_available':
+ elif func == "list":
+ self.beacons.list_beacons(
+ include_opts=include_opts, include_pillar=include_pillar
+ )
+ elif func == "list_available":
self.beacons.list_available_beacons()
- elif func == 'validate_beacon':
+ elif func == "validate_beacon":
self.beacons.validate_beacon(name, beacon_data)
- elif func == 'reset':
+ elif func == "reset":
self.beacons.reset()
def environ_setenv(self, tag, data):
- '''
+ """
Set the salt-minion main process environment according to
the data contained in the minion event data
- '''
- environ = data.get('environ', None)
+ """
+ environ = data.get("environ", None)
if environ is None:
return False
- false_unsets = data.get('false_unsets', False)
- clear_all = data.get('clear_all', False)
+ false_unsets = data.get("false_unsets", False)
+ clear_all = data.get("clear_all", False)
import salt.modules.environ as mod_environ
+
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
- '''
+ """
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
- '''
+ """
if self._running is None:
self._running = True
elif self._running is False:
log.error(
- 'This %s was scheduled to stop. Not running %s.tune_in()',
- self.__class__.__name__, self.__class__.__name__
+ "This %s was scheduled to stop. Not running %s.tune_in()",
+ self.__class__.__name__,
+ self.__class__.__name__,
)
return
elif self._running is True:
log.error(
- 'This %s is already running. Not running %s.tune_in()',
- self.__class__.__name__, self.__class__.__name__
+ "This %s is already running. Not running %s.tune_in()",
+ self.__class__.__name__,
+ self.__class__.__name__,
)
return
try:
log.info(
- '%s is starting as user \'%s\'',
- self.__class__.__name__, salt.utils.user.get_user()
+ "%s is starting as user '%s'",
+ self.__class__.__name__,
+ salt.utils.user.get_user(),
)
except Exception as err: # pylint: disable=broad-except
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR,
- 'Failed to get the user who is starting %s',
+ "Failed to get the user who is starting %s",
self.__class__.__name__,
- exc_info=err
+ exc_info=err,
)
def _mine_send(self, tag, data):
- '''
+ """
Send mine data to the master
- '''
+ """
with salt.transport.client.ReqChannel.factory(self.opts) as channel:
- data['tok'] = self.tok
+ data["tok"] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
- log.warning('Unable to send mine data to master.')
+ log.warning("Unable to send mine data to master.")
return None
@salt.ext.tornado.gen.coroutine
def handle_event(self, package):
- '''
+ """
Handle an event from the epull_sock (all local minion events)
- '''
+ """
if not self.ready:
raise salt.ext.tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
- log.debug(
- 'Minion of \'%s\' is handling event tag \'%s\'',
- self.opts['master'], tag
- )
- if tag.startswith('module_refresh'):
+ log.debug("Minion of '%s' is handling event tag '%s'", self.opts["master"], tag)
+ if tag.startswith("module_refresh"):
self.module_refresh(
- force_refresh=data.get('force_refresh', False),
- notify=data.get('notify', False)
+ force_refresh=data.get("force_refresh", False),
+ notify=data.get("notify", False),
)
- elif tag.startswith('pillar_refresh'):
- yield self.pillar_refresh(
- force_refresh=data.get('force_refresh', False)
- )
- elif tag.startswith('beacons_refresh'):
+ elif tag.startswith("pillar_refresh"):
+ yield self.pillar_refresh(force_refresh=data.get("force_refresh", False))
+ elif tag.startswith("beacons_refresh"):
self.beacons_refresh()
- elif tag.startswith('matchers_refresh'):
+ elif tag.startswith("matchers_refresh"):
self.matchers_refresh()
- elif tag.startswith('manage_schedule'):
+ elif tag.startswith("manage_schedule"):
self.manage_schedule(tag, data)
- elif tag.startswith('manage_beacons'):
+ elif tag.startswith("manage_beacons"):
self.manage_beacons(tag, data)
- elif tag.startswith('grains_refresh'):
- if (data.get('force_refresh', False) or
- self.grains_cache != self.opts['grains']):
+ elif tag.startswith("grains_refresh"):
+ if (
+ data.get("force_refresh", False)
+ or self.grains_cache != self.opts["grains"]
+ ):
self.pillar_refresh(force_refresh=True)
- self.grains_cache = self.opts['grains']
- elif tag.startswith('environ_setenv'):
+ self.grains_cache = self.opts["grains"]
+ elif tag.startswith("environ_setenv"):
self.environ_setenv(tag, data)
- elif tag.startswith('_minion_mine'):
+ elif tag.startswith("_minion_mine"):
self._mine_send(tag, data)
- elif tag.startswith('fire_master'):
+ elif tag.startswith("fire_master"):
if self.connected:
- log.debug('Forwarding master event tag=%s', data['tag'])
- self._fire_master(data['data'], data['tag'], data['events'], data['pretag'], sync=False)
- elif tag.startswith(master_event(type='disconnected')) or tag.startswith(master_event(type='failback')):
+ log.debug("Forwarding master event tag=%s", data["tag"])
+ self._fire_master(
+ data["data"],
+ data["tag"],
+ data["events"],
+ data["pretag"],
+ sync=False,
+ )
+ elif tag.startswith(master_event(type="disconnected")) or tag.startswith(
+ master_event(type="failback")
+ ):
# if the master disconnect event is for a different master, raise an exception
- if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']:
+ if (
+ tag.startswith(master_event(type="disconnected"))
+ and data["master"] != self.opts["master"]
+ ):
# not mine master, ignore
raise salt.ext.tornado.gen.Return()
- if tag.startswith(master_event(type='failback')):
+ if tag.startswith(master_event(type="failback")):
# if the master failback event is not for the top master, raise an exception
- if data['master'] != self.opts['master_list'][0]:
- raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
- data['master'], self.opts['master']))
+ if data["master"] != self.opts["master_list"][0]:
+ raise SaltException(
+ "Bad master '{0}' when mine failback is '{1}'".format(
+ data["master"], self.opts["master"]
+ )
+ )
# if the master failback event is for the current master, raise an exception
- elif data['master'] == self.opts['master'][0]:
- raise SaltException('Already connected to \'{0}\''.format(data['master']))
+ elif data["master"] == self.opts["master"][0]:
+ raise SaltException(
+ "Already connected to '{0}'".format(data["master"])
+ )
if self.connected:
# we are not connected anymore
self.connected = False
- log.info('Connection to master %s lost', self.opts['master'])
+ log.info("Connection to master %s lost", self.opts["master"])
- if self.opts['master_type'] != 'failover':
+ if self.opts["master_type"] != "failover":
# modify the scheduled job to fire on reconnect
- if self.opts['transport'] != 'tcp':
+ if self.opts["transport"] != "tcp":
schedule = {
- 'function': 'status.master',
- 'seconds': self.opts['master_alive_interval'],
- 'jid_include': True,
- 'maxrunning': 1,
- 'return_job': False,
- 'kwargs': {'master': self.opts['master'],
- 'connected': False}
+ "function": "status.master",
+ "seconds": self.opts["master_alive_interval"],
+ "jid_include": True,
+ "maxrunning": 1,
+ "return_job": False,
+ "kwargs": {
+ "master": self.opts["master"],
+ "connected": False,
+ },
}
- self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
- schedule=schedule)
+ self.schedule.modify_job(
+ name=master_event(type="alive", master=self.opts["master"]),
+ schedule=schedule,
+ )
else:
# delete the scheduled job to don't interfere with the failover process
- if self.opts['transport'] != 'tcp':
- self.schedule.delete_job(name=master_event(type='alive'))
+ if self.opts["transport"] != "tcp":
+ self.schedule.delete_job(name=master_event(type="alive"))
- log.info('Trying to tune in to next master from master-list')
+ log.info("Trying to tune in to next master from master-list")
- if hasattr(self, 'pub_channel'):
+ if hasattr(self, "pub_channel"):
self.pub_channel.on_recv(None)
- if hasattr(self.pub_channel, 'auth'):
+ if hasattr(self.pub_channel, "auth"):
self.pub_channel.auth.invalidate()
- if hasattr(self.pub_channel, 'close'):
+ if hasattr(self.pub_channel, "close"):
self.pub_channel.close()
del self.pub_channel
@@ -2458,113 +2695,141 @@ class Minion(MinionBase):
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
- opts=self.opts,
- failed=True,
- failback=tag.startswith(master_event(type='failback')))
+ opts=self.opts,
+ failed=True,
+ failback=tag.startswith(master_event(type="failback")),
+ )
except SaltClientError:
pass
if self.connected:
- self.opts['master'] = master
+ self.opts["master"] = master
# re-init the subsystems to work with the new master
log.info(
- 'Re-initialising subsystems for new master %s',
- self.opts['master']
+ "Re-initialising subsystems for new master %s",
+ self.opts["master"],
)
# put the current schedule into the new loaders
- self.opts['schedule'] = self.schedule.option('schedule')
- self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
+ self.opts["schedule"] = self.schedule.option("schedule")
+ (
+ self.functions,
+ self.returners,
+ self.function_errors,
+ self.executors,
+ ) = self._load_modules()
# make the schedule to use the new 'functions' loader
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
- log.info('Minion is ready to receive requests!')
+ log.info("Minion is ready to receive requests!")
# update scheduled job to run with the new master addr
- if self.opts['transport'] != 'tcp':
+ if self.opts["transport"] != "tcp":
schedule = {
- 'function': 'status.master',
- 'seconds': self.opts['master_alive_interval'],
- 'jid_include': True,
- 'maxrunning': 1,
- 'return_job': False,
- 'kwargs': {'master': self.opts['master'],
- 'connected': True}
+ "function": "status.master",
+ "seconds": self.opts["master_alive_interval"],
+ "jid_include": True,
+ "maxrunning": 1,
+ "return_job": False,
+ "kwargs": {
+ "master": self.opts["master"],
+ "connected": True,
+ },
}
- self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
- schedule=schedule)
+ self.schedule.modify_job(
+ name=master_event(
+ type="alive", master=self.opts["master"]
+ ),
+ schedule=schedule,
+ )
- if self.opts['master_failback'] and 'master_list' in self.opts:
- if self.opts['master'] != self.opts['master_list'][0]:
+ if (
+ self.opts["master_failback"]
+ and "master_list" in self.opts
+ ):
+ if self.opts["master"] != self.opts["master_list"][0]:
schedule = {
- 'function': 'status.ping_master',
- 'seconds': self.opts['master_failback_interval'],
- 'jid_include': True,
- 'maxrunning': 1,
- 'return_job': False,
- 'kwargs': {'master': self.opts['master_list'][0]}
+ "function": "status.ping_master",
+ "seconds": self.opts[
+ "master_failback_interval"
+ ],
+ "jid_include": True,
+ "maxrunning": 1,
+ "return_job": False,
+ "kwargs": {
+ "master": self.opts["master_list"][0]
+ },
}
- self.schedule.modify_job(name=master_event(type='failback'),
- schedule=schedule)
+ self.schedule.modify_job(
+ name=master_event(type="failback"),
+ schedule=schedule,
+ )
else:
- self.schedule.delete_job(name=master_event(type='failback'), persist=True)
+ self.schedule.delete_job(
+ name=master_event(type="failback"), persist=True
+ )
else:
self.restart = True
self.io_loop.stop()
- elif tag.startswith(master_event(type='connected')):
+ elif tag.startswith(master_event(type="connected")):
# handle this event only once. otherwise it will pollute the log
# also if master type is failover all the reconnection work is done
# by `disconnected` event handler and this event must never happen,
# anyway check it to be sure
- if not self.connected and self.opts['master_type'] != 'failover':
- log.info('Connection to master %s re-established', self.opts['master'])
+ if not self.connected and self.opts["master_type"] != "failover":
+ log.info("Connection to master %s re-established", self.opts["master"])
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
- if self.opts['transport'] != 'tcp':
+ if self.opts["transport"] != "tcp":
schedule = {
- 'function': 'status.master',
- 'seconds': self.opts['master_alive_interval'],
- 'jid_include': True,
- 'maxrunning': 1,
- 'return_job': False,
- 'kwargs': {'master': self.opts['master'],
- 'connected': True}
+ "function": "status.master",
+ "seconds": self.opts["master_alive_interval"],
+ "jid_include": True,
+ "maxrunning": 1,
+ "return_job": False,
+ "kwargs": {"master": self.opts["master"], "connected": True},
}
- self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
- schedule=schedule)
- elif tag.startswith('__schedule_return'):
- # reporting current connection with master
- if data['schedule'].startswith(master_event(type='alive', master='')):
- if data['return']:
- log.debug(
- 'Connected to master %s',
- data['schedule'].split(master_event(type='alive', master=''))[1]
+ self.schedule.modify_job(
+ name=master_event(type="alive", master=self.opts["master"]),
+ schedule=schedule,
)
- self._return_pub(data, ret_cmd='_return', sync=False)
- elif tag.startswith('_salt_error'):
+ elif tag.startswith("__schedule_return"):
+ # reporting current connection with master
+ if data["schedule"].startswith(master_event(type="alive", master="")):
+ if data["return"]:
+ log.debug(
+ "Connected to master %s",
+ data["schedule"].split(master_event(type="alive", master=""))[
+ 1
+ ],
+ )
+ self._return_pub(data, ret_cmd="_return", sync=False)
+ elif tag.startswith("_salt_error"):
if self.connected:
- log.debug('Forwarding salt error event tag=%s', tag)
+ log.debug("Forwarding salt error event tag=%s", tag)
self._fire_master(data, tag, sync=False)
- elif tag.startswith('salt/auth/creds'):
- key = tuple(data['key'])
+ elif tag.startswith("salt/auth/creds"):
+ key = tuple(data["key"])
log.debug(
- 'Updating auth data for %s: %s -> %s',
- key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds']
+ "Updating auth data for %s: %s -> %s",
+ key,
+ salt.crypt.AsyncAuth.creds_map.get(key),
+ data["creds"],
)
- salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
- elif tag.startswith('__beacons_return'):
+ salt.crypt.AsyncAuth.creds_map[tuple(data["key"])] = data["creds"]
+ elif tag.startswith("__beacons_return"):
if self.connected:
- log.debug('Firing beacons to master')
- self._fire_master(events=data['beacons'])
+ log.debug("Firing beacons to master")
+ self._fire_master(events=data["beacons"])
def cleanup_subprocesses(self):
- '''
+ """
Clean up subprocesses and spawned threads.
- '''
+ """
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
self.subprocess_list.cleanup()
@@ -2572,36 +2837,41 @@ class Minion(MinionBase):
self.schedule.cleanup_subprocesses()
def _setup_core(self):
- '''
+ """
Set up the core minion attributes.
This is safe to call multiple times.
- '''
+ """
if not self.ready:
# First call. Initialize.
- self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
+ (
+ self.functions,
+ self.returners,
+ self.function_errors,
+ self.executors,
+ ) = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
-# self.matcher = Matcher(self.opts, self.functions)
+ # self.matcher = Matcher(self.opts, self.functions)
self.matchers = salt.loader.matchers(self.opts)
if self.beacons_leader:
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
- uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
- self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
- self.grains_cache = self.opts['grains']
+ uid = salt.utils.user.get_uid(user=self.opts.get("user", None))
+ self.proc_dir = get_proc_dir(self.opts["cachedir"], uid=uid)
+ self.grains_cache = self.opts["grains"]
self.ready = True
def setup_beacons(self, before_connect=False):
- '''
+ """
Set up the beacons.
This is safe to call multiple times.
- '''
+ """
# In multimaster configuration the only one minion shall execute beacons
if not self.beacons_leader:
return
self._setup_core()
- loop_interval = self.opts['loop_interval']
- if 'beacons' not in self.periodic_callbacks:
+ loop_interval = self.opts["loop_interval"]
+ if "beacons" not in self.periodic_callbacks:
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def handle_beacons():
@@ -2610,51 +2880,53 @@ class Minion(MinionBase):
try:
beacons = self.process_beacons(self.functions)
except Exception: # pylint: disable=broad-except
- log.critical('The beacon errored: ', exc_info=True)
+ log.critical("The beacon errored: ", exc_info=True)
if beacons:
- event = salt.utils.event.get_event('minion',
- opts=self.opts,
- listen=False)
- event.fire_event({'beacons': beacons}, '__beacons_return')
+ event = salt.utils.event.get_event(
+ "minion", opts=self.opts, listen=False
+ )
+ event.fire_event({"beacons": beacons}, "__beacons_return")
event.destroy()
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
- self.add_periodic_callback('beacons', handle_beacons)
+ self.add_periodic_callback("beacons", handle_beacons)
def setup_scheduler(self, before_connect=False):
- '''
+ """
Set up the scheduler.
This is safe to call multiple times.
- '''
+ """
self._setup_core()
- loop_interval = self.opts['loop_interval']
+ loop_interval = self.opts["loop_interval"]
- if 'schedule' not in self.periodic_callbacks:
- if 'schedule' not in self.opts:
- self.opts['schedule'] = {}
- if not hasattr(self, 'schedule'):
+ if "schedule" not in self.periodic_callbacks:
+ if "schedule" not in self.opts:
+ self.opts["schedule"] = {}
+ if not hasattr(self, "schedule"):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
utils=self.utils,
- cleanup=[master_event(type='alive')])
+ cleanup=[master_event(type="alive")],
+ )
try:
- if self.opts['grains_refresh_every']: # In minutes, not seconds!
+ if self.opts["grains_refresh_every"]: # In minutes, not seconds!
log.debug(
- 'Enabling the grains refresher. Will run every %d minute(s).',
- self.opts['grains_refresh_every']
+ "Enabling the grains refresher. Will run every %d minute(s).",
+ self.opts["grains_refresh_every"],
)
- self._refresh_grains_watcher(abs(self.opts['grains_refresh_every']))
+ self._refresh_grains_watcher(abs(self.opts["grains_refresh_every"]))
except Exception as exc: # pylint: disable=broad-except
log.error(
- 'Exception occurred in attempt to initialize grain refresh '
- 'routine during minion tune-in: %s', exc
+ "Exception occurred in attempt to initialize grain refresh "
+ "routine during minion tune-in: %s",
+ exc,
)
# TODO: actually listen to the return and change period
@@ -2665,13 +2937,13 @@ class Minion(MinionBase):
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
- self.add_periodic_callback('schedule', handle_schedule)
+ self.add_periodic_callback("schedule", handle_schedule)
def add_periodic_callback(self, name, method, interval=1):
- '''
+ """
Add a periodic callback to the event loop and call its start method.
If a callback by the given name exists this method returns False
- '''
+ """
if name in self.periodic_callbacks:
return False
self.periodic_callbacks[name] = salt.ext.tornado.ioloop.PeriodicCallback(
@@ -2681,10 +2953,10 @@ class Minion(MinionBase):
return True
def remove_periodic_callback(self, name):
- '''
+ """
Remove a periodic callback.
If a callback by the given name does not exist this method returns False
- '''
+ """
callback = self.periodic_callbacks.pop(name, None)
if callback is None:
return False
@@ -2693,23 +2965,23 @@ class Minion(MinionBase):
# Main Minion Tune In
def tune_in(self, start=True):
- '''
+ """
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
- '''
+ """
self._pre_tune()
- log.debug('Minion \'%s\' trying to tune in', self.opts['id'])
+ log.debug("Minion '%s' trying to tune in", self.opts["id"])
if start:
- if self.opts.get('beacons_before_connect', False):
+ if self.opts.get("beacons_before_connect", False):
self.setup_beacons(before_connect=True)
- if self.opts.get('scheduler_before_connect', False):
+ if self.opts.get("scheduler_before_connect", False):
self.setup_scheduler(before_connect=True)
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
- log.info('Minion is ready to receive requests!')
+ log.info("Minion is ready to receive requests!")
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
@@ -2723,59 +2995,74 @@ class Minion(MinionBase):
self.setup_beacons()
self.setup_scheduler()
- self.add_periodic_callback('cleanup', self.cleanup_subprocesses)
+ self.add_periodic_callback("cleanup", self.cleanup_subprocesses)
# schedule the stuff that runs every interval
- ping_interval = self.opts.get('ping_interval', 0) * 60
+ ping_interval = self.opts.get("ping_interval", 0) * 60
if ping_interval > 0 and self.connected:
+
def ping_master():
try:
+
def ping_timeout_handler(*_):
- if self.opts.get('auth_safemode', False):
- log.error('** Master Ping failed. Attempting to restart minion**')
- delay = self.opts.get('random_reauth_delay', 5)
- log.info('delaying random_reauth_delay %ss', delay)
+ if self.opts.get("auth_safemode", False):
+ log.error(
+ "** Master Ping failed. Attempting to restart minion**"
+ )
+ delay = self.opts.get("random_reauth_delay", 5)
+ log.info("delaying random_reauth_delay %ss", delay)
try:
- self.functions['service.restart'](service_name())
+ self.functions["service.restart"](service_name())
except KeyError:
# Probably no init system (running in docker?)
log.warning(
- 'ping_interval reached without response '
- 'from the master, but service.restart '
- 'could not be run to restart the minion '
- 'daemon. ping_interval requires that the '
- 'minion is running under an init system.'
+ "ping_interval reached without response "
+ "from the master, but service.restart "
+ "could not be run to restart the minion "
+ "daemon. ping_interval requires that the "
+ "minion is running under an init system."
)
- self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
+ self._fire_master(
+ "ping",
+ "minion_ping",
+ sync=False,
+ timeout_handler=ping_timeout_handler,
+ )
except Exception: # pylint: disable=broad-except
- log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
- self.remove_periodic_callback('ping')
- self.add_periodic_callback('ping', ping_master, ping_interval)
+ log.warning(
+ "Attempt to ping master failed.", exc_on_loglevel=logging.DEBUG
+ )
+
+ self.remove_periodic_callback("ping")
+ self.add_periodic_callback("ping", ping_master, ping_interval)
# add handler to subscriber
- if hasattr(self, 'pub_channel') and self.pub_channel is not None:
+ if hasattr(self, "pub_channel") and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
- elif self.opts.get('master_type') != 'disable':
- log.error('No connection to master found. Scheduled jobs will not run.')
+ elif self.opts.get("master_type") != "disable":
+ log.error("No connection to master found. Scheduled jobs will not run.")
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
- except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
+ except (
+ KeyboardInterrupt,
+ RuntimeError,
+ ): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
- if payload is not None and payload['enc'] == 'aes':
- if self._target_load(payload['load']):
- self._handle_decoded_payload(payload['load'])
- elif self.opts['zmq_filtering']:
+ if payload is not None and payload["enc"] == "aes":
+ if self._target_load(payload["load"]):
+ self._handle_decoded_payload(payload["load"])
+ elif self.opts["zmq_filtering"]:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace(
- 'Broadcast message received not for this minion, Load: %s',
- payload['load']
+ "Broadcast message received not for this minion, Load: %s",
+ payload["load"],
)
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
@@ -2783,8 +3070,12 @@ class Minion(MinionBase):
def _target_load(self, load):
# Verify that the publication is valid
- if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
- or 'arg' not in load:
+ if (
+ "tgt" not in load
+ or "jid" not in load
+ or "fun" not in load
+ or "arg" not in load
+ ):
return False
# Verify that the publication applies to this minion
@@ -2794,58 +3085,62 @@ class Minion(MinionBase):
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
- if 'tgt_type' in load:
- match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None)
+ if "tgt_type" in load:
+ match_func = self.matchers.get(
+ "{0}_match.match".format(load["tgt_type"]), None
+ )
if match_func is None:
return False
- if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
- delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
- if not match_func(load['tgt'], delimiter=delimiter):
+ if load["tgt_type"] in ("grain", "grain_pcre", "pillar"):
+ delimiter = load.get("delimiter", DEFAULT_TARGET_DELIM)
+ if not match_func(load["tgt"], delimiter=delimiter):
return False
- elif not match_func(load['tgt']):
+ elif not match_func(load["tgt"]):
return False
else:
- if not self.matchers['glob_match.match'](load['tgt']):
+ if not self.matchers["glob_match.match"](load["tgt"]):
return False
return True
def destroy(self):
- '''
+ """
Tear down the minion
- '''
+ """
if self._running is False:
return
self._running = False
- if hasattr(self, 'schedule'):
+ if hasattr(self, "schedule"):
del self.schedule
- if hasattr(self, 'pub_channel') and self.pub_channel is not None:
+ if hasattr(self, "pub_channel") and self.pub_channel is not None:
self.pub_channel.on_recv(None)
- if hasattr(self.pub_channel, 'close'):
+ if hasattr(self.pub_channel, "close"):
self.pub_channel.close()
del self.pub_channel
- if hasattr(self, 'periodic_callbacks'):
+ if hasattr(self, "periodic_callbacks"):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
# pylint: disable=W1701
def __del__(self):
self.destroy()
+
# pylint: enable=W1701
class Syndic(Minion):
- '''
+ """
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
- '''
+ """
+
def __init__(self, opts, **kwargs):
- self._syndic_interface = opts.get('interface')
+ self._syndic_interface = opts.get("interface")
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
- opts['auth_safemode'] = True
- opts['loop_interval'] = 1
+ opts["auth_safemode"] = True
+ opts["loop_interval"] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
@@ -2854,96 +3149,94 @@ class Syndic(Minion):
self.pub_future = None
def _handle_decoded_payload(self, data):
- '''
+ """
Override this method if you wish to handle the decoded data
differently.
- '''
+ """
# TODO: even do this??
- data['to'] = int(data.get('to', self.opts['timeout'])) - 1
+ data["to"] = int(data.get("to", self.opts["timeout"])) - 1
# Only forward the command if it didn't originate from ourselves
- if data.get('master_id', 0) != self.opts.get('master_id', 1):
+ if data.get("master_id", 0) != self.opts.get("master_id", 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
- '''
+ """
Take the now clear load and forward it on to the client cmd
- '''
+ """
# Set up default tgt_type
- if 'tgt_type' not in data:
- data['tgt_type'] = 'glob'
+ if "tgt_type" not in data:
+ data["tgt_type"] = "glob"
kwargs = {}
# optionally add a few fields to the publish data
- for field in ('master_id', # which master the job came from
- 'user', # which user ran the job
- ):
+ for field in (
+ "master_id", # which master the job came from
+ "user", # which user ran the job
+ ):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
- log.warning('Unable to forward pub data: %s', args[1])
+ log.warning("Unable to forward pub data: %s", args[1])
return True
with salt.ext.tornado.stack_context.ExceptionStackContext(timeout_handler):
- self.local.pub_async(data['tgt'],
- data['fun'],
- data['arg'],
- data['tgt_type'],
- data['ret'],
- data['jid'],
- data['to'],
- io_loop=self.io_loop,
- callback=lambda _: None,
- **kwargs)
+ self.local.pub_async(
+ data["tgt"],
+ data["fun"],
+ data["arg"],
+ data["tgt_type"],
+ data["ret"],
+ data["jid"],
+ data["to"],
+ io_loop=self.io_loop,
+ callback=lambda _: None,
+ **kwargs
+ )
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
- if self.opts['enable_legacy_startup_events']:
+ if self.opts["enable_legacy_startup_events"]:
# Old style event. Defaults to false in Sodium release.
self._fire_master(
- 'Syndic {0} started at {1}'.format(
- self.opts['id'],
- time.asctime()
- ),
- 'syndic_start',
+ "Syndic {0} started at {1}".format(self.opts["id"], time.asctime()),
+ "syndic_start",
sync=False,
)
self._fire_master(
- 'Syndic {0} started at {1}'.format(
- self.opts['id'],
- time.asctime()
- ),
- tagify([self.opts['id'], 'start'], 'syndic'),
+ "Syndic {0} started at {1}".format(self.opts["id"], time.asctime()),
+ tagify([self.opts["id"], "start"], "syndic"),
sync=False,
)
# TODO: clean up docs
def tune_in_no_block(self):
- '''
+ """
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
- '''
+ """
# Instantiate the local client
self.local = salt.client.get_local_client(
- self.opts['_minion_conf_file'], io_loop=self.io_loop)
+ self.opts["_minion_conf_file"], io_loop=self.io_loop
+ )
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
- if payload is not None and payload['enc'] == 'aes':
- log.trace('Handling payload')
- self._handle_decoded_payload(payload['load'])
+ if payload is not None and payload["enc"] == "aes":
+ log.trace("Handling payload")
+ self._handle_decoded_payload(payload["load"])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
@salt.ext.tornado.gen.coroutine
def reconnect(self):
- if hasattr(self, 'pub_channel'):
+ if hasattr(self, "pub_channel"):
self.pub_channel.on_recv(None)
- if hasattr(self.pub_channel, 'close'):
+ if hasattr(self.pub_channel, "close"):
self.pub_channel.close()
del self.pub_channel
@@ -2952,29 +3245,29 @@ class Syndic(Minion):
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
- self.opts['master'] = master
+ self.opts["master"] = master
self.pub_channel.on_recv(self._process_cmd_socket)
- log.info('Minion is ready to receive requests!')
+ log.info("Minion is ready to receive requests!")
raise salt.ext.tornado.gen.Return(self)
def destroy(self):
- '''
+ """
Tear down the syndic minion
- '''
+ """
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
- if hasattr(self, 'local'):
+ if hasattr(self, "local"):
del self.local
- if hasattr(self, 'forward_events'):
+ if hasattr(self, "forward_events"):
self.forward_events.stop()
# TODO: need a way of knowing if the syndic connection is busted
class SyndicManager(MinionBase):
- '''
+ """
Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
@@ -2991,21 +3284,22 @@ class SyndicManager(MinionBase):
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
- '''
+ """
+
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
- opts['loop_interval'] = 1
+ opts["loop_interval"] = 1
super(SyndicManager, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
- self.syndic_mode = self.opts.get('syndic_mode', 'sync')
- self.syndic_failover = self.opts.get('syndic_failover', 'random')
+ self.syndic_mode = self.opts.get("syndic_mode", "sync")
+ self.syndic_failover = self.opts.get("syndic_failover", "random")
- self.auth_wait = self.opts['acceptance_wait_time']
- self.max_auth_wait = self.opts['acceptance_wait_time_max']
+ self.auth_wait = self.opts["acceptance_wait_time"]
+ self.max_auth_wait = self.opts["acceptance_wait_time_max"]
self._has_master = threading.Event()
self.jid_forward_cache = set()
@@ -3027,37 +3321,35 @@ class SyndicManager(MinionBase):
self.pub_futures = {}
def _spawn_syndics(self):
- '''
+ """
Spawn all the coroutines which will sign in the syndics
- '''
+ """
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
- masters = self.opts['master']
+ masters = self.opts["master"]
if not isinstance(masters, list):
masters = [masters]
for master in masters:
s_opts = copy.copy(self.opts)
- s_opts['master'] = master
+ s_opts["master"] = master
self._syndics[master] = self._connect_syndic(s_opts)
@salt.ext.tornado.gen.coroutine
def _connect_syndic(self, opts):
- '''
+ """
Create a syndic, and asynchronously connect it to a master
- '''
+ """
last = 0 # never have we signed in
- auth_wait = opts['acceptance_wait_time']
+ auth_wait = opts["acceptance_wait_time"]
failed = False
while True:
- log.debug(
- 'Syndic attempting to connect to %s',
- opts['master']
- )
+ log.debug("Syndic attempting to connect to %s", opts["master"])
try:
- syndic = Syndic(opts,
- timeout=self.SYNDIC_CONNECT_TIMEOUT,
- safe=False,
- io_loop=self.io_loop,
- )
+ syndic = Syndic(
+ opts,
+ timeout=self.SYNDIC_CONNECT_TIMEOUT,
+ safe=False,
+ io_loop=self.io_loop,
+ )
yield syndic.connect_master(failed=failed)
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
@@ -3065,16 +3357,14 @@ class SyndicManager(MinionBase):
# Send an event to the master that the minion is live
syndic.fire_master_syndic_start()
- log.info(
- 'Syndic successfully connected to %s',
- opts['master']
- )
+ log.info("Syndic successfully connected to %s", opts["master"])
break
except SaltClientError as exc:
failed = True
log.error(
- 'Error while bringing up syndic for multi-syndic. Is the '
- 'master at %s responding?', opts['master']
+ "Error while bringing up syndic for multi-syndic. Is the "
+ "master at %s responding?",
+ opts["master"],
)
last = time.time()
if auth_wait < self.max_auth_wait:
@@ -3085,16 +3375,17 @@ class SyndicManager(MinionBase):
except Exception: # pylint: disable=broad-except
failed = True
log.critical(
- 'Unexpected error while connecting to %s',
- opts['master'], exc_info=True
+ "Unexpected error while connecting to %s",
+ opts["master"],
+ exc_info=True,
)
raise salt.ext.tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
- '''
+ """
Mark a master as dead. This will start the sign-in routine
- '''
+ """
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
@@ -3102,14 +3393,14 @@ class SyndicManager(MinionBase):
else:
# TODO: debug?
log.info(
- 'Attempting to mark %s as dead, although it is already '
- 'marked dead', master
+ "Attempting to mark %s as dead, although it is already " "marked dead",
+ master,
)
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
- '''
+ """
Wrapper to call a given func on a syndic, best effort to get the one you asked for
- '''
+ """
if kwargs is None:
kwargs = {}
successful = False
@@ -3117,8 +3408,9 @@ class SyndicManager(MinionBase):
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
- 'Unable to call %s on %s, that syndic is not connected',
- func, master
+ "Unable to call %s on %s, that syndic is not connected",
+ func,
+ master,
)
continue
@@ -3126,24 +3418,22 @@ class SyndicManager(MinionBase):
getattr(syndic_future.result(), func)(*args, **kwargs)
successful = True
except SaltClientError:
- log.error(
- 'Unable to call %s on %s, trying another...',
- func, master
- )
+ log.error("Unable to call %s on %s, trying another...", func, master)
self._mark_master_dead(master)
if not successful:
- log.critical('Unable to call %s on any masters!', func)
+ log.critical("Unable to call %s on any masters!", func)
def _return_pub_syndic(self, values, master_id=None):
- '''
+ """
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
- '''
- func = '_return_pub_multi'
+ """
+ func = "_return_pub_multi"
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
- 'Unable to call %s on %s, that syndic is not connected',
- func, master
+ "Unable to call %s on %s, that syndic is not connected",
+ func,
+ master,
)
continue
@@ -3159,29 +3449,27 @@ class SyndicManager(MinionBase):
elif future.exception():
# Previous execution on this master returned an error
log.error(
- 'Unable to call %s on %s, trying another...',
- func, master
+ "Unable to call %s on %s, trying another...", func, master
)
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
- future = getattr(syndic_future.result(), func)(values,
- '_syndic_return',
- timeout=self._return_retry_timer(),
- sync=False)
+ future = getattr(syndic_future.result(), func)(
+ values, "_syndic_return", timeout=self._return_retry_timer(), sync=False
+ )
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False
def iter_master_options(self, master_id=None):
- '''
+ """
Iterate (in order) over your options for master
- '''
+ """
masters = list(self._syndics.keys())
- if self.opts['syndic_failover'] == 'random':
+ if self.opts["syndic_failover"] == "random":
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
@@ -3204,16 +3492,17 @@ class SyndicManager(MinionBase):
# Syndic Tune In
def tune_in(self):
- '''
+ """
Lock onto the publisher. This is the main event loop for the syndic
- '''
+ """
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
- self.opts['_minion_conf_file'], io_loop=self.io_loop)
- self.local.event.subscribe('')
+ self.opts["_minion_conf_file"], io_loop=self.io_loop
+ )
+ self.local.event.subscribe("")
- log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id'])
+ log.debug("SyndicManager '%s' trying to tune in", self.opts["id"])
# register the event sub to the poller
self.job_rets = {}
@@ -3223,9 +3512,9 @@ class SyndicManager(MinionBase):
self.io_loop.add_future(future, self.reconnect_event_bus)
# forward events every syndic_event_forward_timeout
- self.forward_events = salt.ext.tornado.ioloop.PeriodicCallback(self._forward_events,
- self.opts['syndic_event_forward_timeout'] * 1000,
- )
+ self.forward_events = salt.ext.tornado.ioloop.PeriodicCallback(
+ self._forward_events, self.opts["syndic_event_forward_timeout"] * 1000,
+ )
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
@@ -3236,67 +3525,76 @@ class SyndicManager(MinionBase):
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
- log.trace('Got event %s', mtag) # pylint: disable=no-member
+ log.trace("Got event %s", mtag) # pylint: disable=no-member
- tag_parts = mtag.split('/')
- if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
- salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
- 'return' in data:
- if 'jid' not in data:
+ tag_parts = mtag.split("/")
+ if (
+ len(tag_parts) >= 4
+ and tag_parts[1] == "job"
+ and salt.utils.jid.is_jid(tag_parts[2])
+ and tag_parts[3] == "ret"
+ and "return" in data
+ ):
+ if "jid" not in data:
# Not a job return
return
- if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1):
- log.debug('Return received with matching master_id, not forwarding')
+ if self.syndic_mode == "cluster" and data.get(
+ "master_id", 0
+ ) == self.opts.get("master_id", 1):
+ log.debug("Return received with matching master_id, not forwarding")
return
- master = data.get('master_id')
+ master = data.get("master_id")
jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {})
if not jdict:
- jdict['__fun__'] = data.get('fun')
- jdict['__jid__'] = data['jid']
- jdict['__load__'] = {}
- fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
+ jdict["__fun__"] = data.get("fun")
+ jdict["__jid__"] = data["jid"]
+ jdict["__load__"] = {}
+ fstr = "{0}.get_load".format(self.opts["master_job_cache"])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
- if data['jid'] not in self.jid_forward_cache:
- jdict['__load__'].update(
- self.mminion.returners[fstr](data['jid'])
- )
- self.jid_forward_cache.add(data['jid'])
- if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
+ if data["jid"] not in self.jid_forward_cache:
+ jdict["__load__"].update(self.mminion.returners[fstr](data["jid"]))
+ self.jid_forward_cache.add(data["jid"])
+ if (
+ len(self.jid_forward_cache)
+ > self.opts["syndic_jid_forward_cache_hwm"]
+ ):
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
- jdict['__master_id__'] = master
+ jdict["__master_id__"] = master
ret = {}
- for key in 'return', 'retcode', 'success':
+ for key in "return", "retcode", "success":
if key in data:
ret[key] = data[key]
- jdict[data['id']] = ret
+ jdict[data["id"]] = ret
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
- if self.syndic_mode == 'sync':
+ if self.syndic_mode == "sync":
# Add generic event aggregation here
- if 'retcode' not in data:
- self.raw_events.append({'data': data, 'tag': mtag})
+ if "retcode" not in data:
+ self.raw_events.append({"data": data, "tag": mtag})
def _forward_events(self):
- log.trace('Forwarding events') # pylint: disable=no-member
+ log.trace("Forwarding events") # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
- self._call_syndic('_fire_master',
- kwargs={'events': events,
- 'pretag': tagify(self.opts['id'], base='syndic'),
- 'timeout': self._return_retry_timer(),
- 'sync': False,
- },
- )
+ self._call_syndic(
+ "_fire_master",
+ kwargs={
+ "events": events,
+ "pretag": tagify(self.opts["id"], base="syndic"),
+ "timeout": self._return_retry_timer(),
+ "sync": False,
+ },
+ )
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
@@ -3309,47 +3607,54 @@ class SyndicManager(MinionBase):
class ProxyMinionManager(MinionManager):
- '''
+ """
Create the multi-minion interface but for proxy minions
- '''
- def _create_minion_object(self, opts, timeout, safe,
- io_loop=None, loaded_base_name=None,
- jid_queue=None):
- '''
+ """
+
+ def _create_minion_object(
+ self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None
+ ):
+ """
Helper function to return the correct type of object
- '''
- return ProxyMinion(opts,
- timeout,
- safe,
- io_loop=io_loop,
- loaded_base_name=loaded_base_name,
- jid_queue=jid_queue)
+ """
+ return ProxyMinion(
+ opts,
+ timeout,
+ safe,
+ io_loop=io_loop,
+ loaded_base_name=loaded_base_name,
+ jid_queue=jid_queue,
+ )
def _metaproxy_call(opts, fn_name):
metaproxy = salt.loader.metaproxy(opts)
try:
- metaproxy_name = opts['metaproxy']
+ metaproxy_name = opts["metaproxy"]
except KeyError:
- metaproxy_name = 'proxy'
- errmsg = 'No metaproxy key found in opts for id ' + opts['id'] + '. ' + \
- 'Defaulting to standard proxy minion'
+ metaproxy_name = "proxy"
+ errmsg = (
+ "No metaproxy key found in opts for id "
+ + opts["id"]
+ + ". "
+ + "Defaulting to standard proxy minion"
+ )
log.trace(errmsg)
- metaproxy_fn = metaproxy_name + '.' + fn_name
+ metaproxy_fn = metaproxy_name + "." + fn_name
return metaproxy[metaproxy_fn]
class ProxyMinion(Minion):
- '''
+ """
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
- '''
+ """
# TODO: better name...
@salt.ext.tornado.gen.coroutine
def _post_master_init(self, master):
- '''
+ """
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
@@ -3361,52 +3666,53 @@ class ProxyMinion(Minion):
ProxyMinions need a significantly different post master setup,
which is why the differences are not factored out into separate helper
functions.
- '''
- mp_call = _metaproxy_call(self.opts, 'post_master_init')
+ """
+ mp_call = _metaproxy_call(self.opts, "post_master_init")
return mp_call(self, master)
def _target_load(self, load):
- '''
+ """
Verify that the publication is valid and applies to this minion
- '''
- mp_call = _metaproxy_call(self.opts, 'target_load')
+ """
+ mp_call = _metaproxy_call(self.opts, "target_load")
return mp_call(self, load)
def _handle_payload(self, payload):
- mp_call = _metaproxy_call(self.opts, 'handle_payload')
+ mp_call = _metaproxy_call(self.opts, "handle_payload")
return mp_call(self, payload)
@salt.ext.tornado.gen.coroutine
def _handle_decoded_payload(self, data):
- mp_call = _metaproxy_call(self.opts, 'handle_decoded_payload')
+ mp_call = _metaproxy_call(self.opts, "handle_decoded_payload")
return mp_call(self, data)
@classmethod
def _target(cls, minion_instance, opts, data, connected):
- mp_call = _metaproxy_call(opts, 'target')
+ mp_call = _metaproxy_call(opts, "target")
return mp_call(cls, minion_instance, opts, data, connected)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
- mp_call = _metaproxy_call(opts, 'thread_return')
+ mp_call = _metaproxy_call(opts, "thread_return")
return mp_call(cls, minion_instance, opts, data)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
- mp_call = _metaproxy_call(opts, 'thread_multi_return')
+ mp_call = _metaproxy_call(opts, "thread_multi_return")
return mp_call(cls, minion_instance, opts, data)
class SProxyMinion(SMinion):
- '''
+ """
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SProxyMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
- '''
+ """
+
def gen_modules(self, initial_load=False, context=None):
- '''
+ """
Tell the minion to reload the execution modules
CLI Example:
@@ -3414,84 +3720,88 @@ class SProxyMinion(SMinion):
.. code-block:: bash
salt '*' sys.reload_modules
- '''
- self.opts['grains'] = salt.loader.grains(self.opts)
- self.opts['pillar'] = salt.pillar.get_pillar(
+ """
+ self.opts["grains"] = salt.loader.grains(self.opts)
+ self.opts["pillar"] = salt.pillar.get_pillar(
self.opts,
- self.opts['grains'],
- self.opts['id'],
- saltenv=self.opts['saltenv'],
- pillarenv=self.opts.get('pillarenv'),
+ self.opts["grains"],
+ self.opts["id"],
+ saltenv=self.opts["saltenv"],
+ pillarenv=self.opts.get("pillarenv"),
).compile_pillar()
- if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
+ if "proxy" not in self.opts["pillar"] and "proxy" not in self.opts:
errmsg = (
'No "proxy" configuration key found in pillar or opts '
- 'dictionaries for id {id}. Check your pillar/options '
- 'configuration and contents. Salt-proxy aborted.'
- ).format(id=self.opts['id'])
+ "dictionaries for id {id}. Check your pillar/options "
+ "configuration and contents. Salt-proxy aborted."
+ ).format(id=self.opts["id"])
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
- if 'proxy' not in self.opts:
- self.opts['proxy'] = self.opts['pillar']['proxy']
+ if "proxy" not in self.opts:
+ self.opts["proxy"] = self.opts["pillar"]["proxy"]
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
self.utils = salt.loader.utils(self.opts, proxy=self.proxy, context=context)
- self.functions = salt.loader.minion_mods(self.opts,
- utils=self.utils,
- notify=False,
- proxy=self.proxy,
- context=context)
- self.returners = salt.loader.returners(self.opts,
- functions=self.functions,
- proxy=self.proxy,
- context=context)
+ self.functions = salt.loader.minion_mods(
+ self.opts, utils=self.utils, notify=False, proxy=self.proxy, context=context
+ )
+ self.returners = salt.loader.returners(
+ self.opts, functions=self.functions, proxy=self.proxy, context=context
+ )
self.matchers = salt.loader.matchers(self.opts)
- self.functions['sys.reload_modules'] = self.gen_modules
- self.executors = salt.loader.executors(self.opts,
- functions=self.functions,
- proxy=self.proxy,
- context=context)
+ self.functions["sys.reload_modules"] = self.gen_modules
+ self.executors = salt.loader.executors(
+ self.opts, functions=self.functions, proxy=self.proxy, context=context
+ )
- fq_proxyname = self.opts['proxy']['proxytype']
+ fq_proxyname = self.opts["proxy"]["proxytype"]
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
- self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
+ self.functions["saltutil.sync_all"](saltenv=self.opts["saltenv"])
- self.functions.pack['__proxy__'] = self.proxy
- self.proxy.pack['__salt__'] = self.functions
- self.proxy.pack['__ret__'] = self.returners
- self.proxy.pack['__pillar__'] = self.opts['pillar']
+ self.functions.pack["__proxy__"] = self.proxy
+ self.proxy.pack["__salt__"] = self.functions
+ self.proxy.pack["__ret__"] = self.returners
+ self.proxy.pack["__pillar__"] = self.opts["pillar"]
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy, context=context)
- self.proxy.pack['__utils__'] = self.utils
+ self.proxy.pack["__utils__"] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
- if ('{0}.init'.format(fq_proxyname) not in self.proxy
- or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
- errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
- 'Check your proxymodule. Salt-proxy aborted.'
+ if (
+ "{0}.init".format(fq_proxyname) not in self.proxy
+ or "{0}.shutdown".format(fq_proxyname) not in self.proxy
+ ):
+ errmsg = (
+ "Proxymodule {0} is missing an init() or a shutdown() or both. ".format(
+ fq_proxyname
+ )
+ + "Check your proxymodule. Salt-proxy aborted."
+ )
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
- self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
- proxy_init_fn = self.proxy[fq_proxyname + '.init']
+ self.module_executors = self.proxy.get(
+ "{0}.module_executors".format(fq_proxyname), lambda: []
+ )()
+ proxy_init_fn = self.proxy[fq_proxyname + ".init"]
proxy_init_fn(self.opts)
- self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
+ self.opts["grains"] = salt.loader.grains(self.opts, proxy=self.proxy)
# Sync the grains here so the proxy can communicate them to the master
- self.functions['saltutil.sync_grains'](saltenv='base')
- self.grains_cache = self.opts['grains']
+ self.functions["saltutil.sync_grains"](saltenv="base")
+ self.grains_cache = self.opts["grains"]
self.ready = True
diff --git a/salt/modules/__init__.py b/salt/modules/__init__.py
index 0d649bc1522..cb0f27fee8a 100644
--- a/salt/modules/__init__.py
+++ b/salt/modules/__init__.py
@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
-'''
+"""
Execution Module Directory
-'''
+"""
diff --git a/salt/modules/acme.py b/salt/modules/acme.py
index 76552281f55..5fb0062dab8 100644
--- a/salt/modules/acme.py
+++ b/salt/modules/acme.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
ACME / Let's Encrypt module
===========================
@@ -33,11 +33,12 @@ plugin credentials file needs to be passed in using the
Make sure the appropriate certbot plugin for the wanted DNS provider is
installed before using this module.
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
-import logging
+
import datetime
+import logging
import os
# Import salt libs
@@ -46,56 +47,65 @@ from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
-LEA = salt.utils.path.which_bin(['certbot', 'letsencrypt',
- 'certbot-auto', 'letsencrypt-auto',
- '/opt/letsencrypt/letsencrypt-auto'])
-LE_LIVE = '/etc/letsencrypt/live/'
+LEA = salt.utils.path.which_bin(
+ [
+ "certbot",
+ "letsencrypt",
+ "certbot-auto",
+ "letsencrypt-auto",
+ "/opt/letsencrypt/letsencrypt-auto",
+ ]
+)
+LE_LIVE = "/etc/letsencrypt/live/"
def __virtual__():
- '''
+ """
Only work when letsencrypt-auto is installed
- '''
- return LEA is not None, 'The ACME execution module cannot be loaded: letsencrypt-auto not installed.'
+ """
+ return (
+ LEA is not None,
+ "The ACME execution module cannot be loaded: letsencrypt-auto not installed.",
+ )
def _cert_file(name, cert_type):
- '''
+ """
Return expected path of a Let's Encrypt live cert
- '''
- return os.path.join(LE_LIVE, name, '{0}.pem'.format(cert_type))
+ """
+ return os.path.join(LE_LIVE, name, "{0}.pem".format(cert_type))
def _expires(name):
- '''
+ """
Return the expiry date of a cert
:rtype: datetime
:return: Expiry date
- '''
- cert_file = _cert_file(name, 'cert')
+ """
+ cert_file = _cert_file(name, "cert")
# Use the salt module if available
- if 'tls.cert_info' in __salt__:
- expiry = __salt__['tls.cert_info'](cert_file).get('not_after', 0)
+ if "tls.cert_info" in __salt__:
+ expiry = __salt__["tls.cert_info"](cert_file).get("not_after", 0)
# Cobble it together using the openssl binary
else:
- openssl_cmd = 'openssl x509 -in {0} -noout -enddate'.format(cert_file)
+ openssl_cmd = "openssl x509 -in {0} -noout -enddate".format(cert_file)
# No %e format on my Linux'es here
strptime_sux_cmd = 'date --date="$({0} | cut -d= -f2)" +%s'.format(openssl_cmd)
- expiry = float(__salt__['cmd.shell'](strptime_sux_cmd, output_loglevel='quiet'))
+ expiry = float(__salt__["cmd.shell"](strptime_sux_cmd, output_loglevel="quiet"))
# expiry = datetime.datetime.strptime(expiry.split('=', 1)[-1], '%b %e %H:%M:%S %Y %Z')
return datetime.datetime.fromtimestamp(expiry)
def _renew_by(name, window=None):
- '''
+ """
Date before a certificate should be renewed
:param str name: Common Name of the certificate (DNS name of certificate)
:param int window: days before expiry date to renew
:rtype: datetime
:return: First renewal date
- '''
+ """
expiry = _expires(name)
if window is not None:
expiry = expiry - datetime.timedelta(days=window)
@@ -103,26 +113,28 @@ def _renew_by(name, window=None):
return expiry
-def cert(name,
- aliases=None,
- email=None,
- webroot=None,
- test_cert=False,
- renew=None,
- keysize=None,
- server=None,
- owner='root',
- group='root',
- mode='0640',
- certname=None,
- preferred_challenges=None,
- tls_sni_01_port=None,
- tls_sni_01_address=None,
- http_01_port=None,
- http_01_address=None,
- dns_plugin=None,
- dns_plugin_credentials=None):
- '''
+def cert(
+ name,
+ aliases=None,
+ email=None,
+ webroot=None,
+ test_cert=False,
+ renew=None,
+ keysize=None,
+ server=None,
+ owner="root",
+ group="root",
+ mode="0640",
+ certname=None,
+ preferred_challenges=None,
+ tls_sni_01_port=None,
+ tls_sni_01_address=None,
+ http_01_port=None,
+ http_01_address=None,
+ dns_plugin=None,
+ dns_plugin_credentials=None,
+):
+ """
Obtain/renew a certificate from an ACME CA, probably Let's Encrypt.
:param name: Common Name of the certificate (DNS name of certificate)
@@ -167,103 +179,123 @@ def cert(name,
salt 'gitlab.example.com' acme.cert dev.example.com "[gitlab.example.com]" test_cert=True \
renew=14 webroot=/opt/gitlab/embedded/service/gitlab-rails/public
- '''
+ """
- cmd = [LEA, 'certonly', '--non-interactive', '--agree-tos']
+ cmd = [LEA, "certonly", "--non-interactive", "--agree-tos"]
- supported_dns_plugins = ['cloudflare']
+ supported_dns_plugins = ["cloudflare"]
- cert_file = _cert_file(name, 'cert')
- if not __salt__['file.file_exists'](cert_file):
- log.debug('Certificate %s does not exist (yet)', cert_file)
+ cert_file = _cert_file(name, "cert")
+ if not __salt__["file.file_exists"](cert_file):
+ log.debug("Certificate %s does not exist (yet)", cert_file)
renew = False
elif needs_renewal(name, renew):
- log.debug('Certificate %s will be renewed', cert_file)
- cmd.append('--renew-by-default')
+ log.debug("Certificate %s will be renewed", cert_file)
+ cmd.append("--renew-by-default")
renew = True
if server:
- cmd.append('--server {0}'.format(server))
+ cmd.append("--server {0}".format(server))
if certname:
- cmd.append('--cert-name {0}'.format(certname))
+ cmd.append("--cert-name {0}".format(certname))
if test_cert:
if server:
- return {'result': False, 'comment': 'Use either server or test_cert, not both'}
- cmd.append('--test-cert')
+ return {
+ "result": False,
+ "comment": "Use either server or test_cert, not both",
+ }
+ cmd.append("--test-cert")
if webroot:
- cmd.append('--authenticator webroot')
+ cmd.append("--authenticator webroot")
if webroot is not True:
- cmd.append('--webroot-path {0}'.format(webroot))
+ cmd.append("--webroot-path {0}".format(webroot))
elif dns_plugin in supported_dns_plugins:
- if dns_plugin == 'cloudflare':
- cmd.append('--dns-cloudflare')
- cmd.append('--dns-cloudflare-credentials {0}'.format(dns_plugin_credentials))
+ if dns_plugin == "cloudflare":
+ cmd.append("--dns-cloudflare")
+ cmd.append(
+ "--dns-cloudflare-credentials {0}".format(dns_plugin_credentials)
+ )
else:
- return {'result': False, 'comment': 'DNS plugin \'{0}\' is not supported'.format(dns_plugin)}
+ return {
+ "result": False,
+ "comment": "DNS plugin '{0}' is not supported".format(dns_plugin),
+ }
else:
- cmd.append('--authenticator standalone')
+ cmd.append("--authenticator standalone")
if email:
- cmd.append('--email {0}'.format(email))
+ cmd.append("--email {0}".format(email))
if keysize:
- cmd.append('--rsa-key-size {0}'.format(keysize))
+ cmd.append("--rsa-key-size {0}".format(keysize))
- cmd.append('--domains {0}'.format(name))
+ cmd.append("--domains {0}".format(name))
if aliases is not None:
for dns in aliases:
- cmd.append('--domains {0}'.format(dns))
+ cmd.append("--domains {0}".format(dns))
if preferred_challenges:
- cmd.append('--preferred-challenges {}'.format(preferred_challenges))
+ cmd.append("--preferred-challenges {}".format(preferred_challenges))
if tls_sni_01_port:
- cmd.append('--tls-sni-01-port {}'.format(tls_sni_01_port))
+ cmd.append("--tls-sni-01-port {}".format(tls_sni_01_port))
if tls_sni_01_address:
- cmd.append('--tls-sni-01-address {}'.format(tls_sni_01_address))
+ cmd.append("--tls-sni-01-address {}".format(tls_sni_01_address))
if http_01_port:
- cmd.append('--http-01-port {}'.format(http_01_port))
+ cmd.append("--http-01-port {}".format(http_01_port))
if http_01_address:
- cmd.append('--http-01-address {}'.format(http_01_address))
+ cmd.append("--http-01-address {}".format(http_01_address))
- res = __salt__['cmd.run_all'](' '.join(cmd))
+ res = __salt__["cmd.run_all"](" ".join(cmd))
- if res['retcode'] != 0:
- if 'expand' in res['stderr']:
- cmd.append('--expand')
- res = __salt__['cmd.run_all'](' '.join(cmd))
- if res['retcode'] != 0:
- return {'result': False,
- 'comment': ('Certificate {0} renewal failed with:\n{1}'
- ''.format(name, res['stderr']))}
+ if res["retcode"] != 0:
+ if "expand" in res["stderr"]:
+ cmd.append("--expand")
+ res = __salt__["cmd.run_all"](" ".join(cmd))
+ if res["retcode"] != 0:
+ return {
+ "result": False,
+ "comment": (
+ "Certificate {0} renewal failed with:\n{1}"
+ "".format(name, res["stderr"])
+ ),
+ }
else:
- return {'result': False,
- 'comment': ('Certificate {0} renewal failed with:\n{1}'
- ''.format(name, res['stderr']))}
+ return {
+ "result": False,
+ "comment": (
+ "Certificate {0} renewal failed with:\n{1}"
+ "".format(name, res["stderr"])
+ ),
+ }
- if 'no action taken' in res['stdout']:
- comment = 'Certificate {0} unchanged'.format(cert_file)
+ if "no action taken" in res["stdout"]:
+ comment = "Certificate {0} unchanged".format(cert_file)
result = None
elif renew:
- comment = 'Certificate {0} renewed'.format(name)
+ comment = "Certificate {0} renewed".format(name)
result = True
else:
- comment = 'Certificate {0} obtained'.format(name)
+ comment = "Certificate {0} obtained".format(name)
result = True
- ret = {'comment': comment, 'not_after': expires(name), 'changes': {}, 'result': result}
- ret, _ = __salt__['file.check_perms'](_cert_file(name, 'privkey'),
- ret,
- owner, group, mode,
- follow_symlinks=True)
+ ret = {
+ "comment": comment,
+ "not_after": expires(name),
+ "changes": {},
+ "result": result,
+ }
+ ret, _ = __salt__["file.check_perms"](
+ _cert_file(name, "privkey"), ret, owner, group, mode, follow_symlinks=True
+ )
return ret
def certs():
- '''
+ """
Return a list of active certificates
CLI example:
@@ -271,12 +303,14 @@ def certs():
.. code-block:: bash
salt 'vhost.example.com' acme.certs
- '''
- return [item for item in __salt__['file.readdir'](LE_LIVE)[2:] if os.path.isdir(item)]
+ """
+ return [
+ item for item in __salt__["file.readdir"](LE_LIVE)[2:] if os.path.isdir(item)
+ ]
def info(name):
- '''
+ """
Return information about a certificate
:param str name: CommonName of certificate
@@ -291,28 +325,28 @@ def info(name):
.. code-block:: bash
salt 'gitlab.example.com' acme.info dev.example.com
- '''
+ """
if not has(name):
return {}
- cert_file = _cert_file(name, 'cert')
+ cert_file = _cert_file(name, "cert")
# Use the tls salt module if available
- if 'tls.cert_info' in __salt__:
- cert_info = __salt__['tls.cert_info'](cert_file)
+ if "tls.cert_info" in __salt__:
+ cert_info = __salt__["tls.cert_info"](cert_file)
# Strip out the extensions object contents;
# these trip over our poor state output
# and they serve no real purpose here anyway
- cert_info['extensions'] = cert_info['extensions'].keys()
- elif 'x509.read_certificate' in __salt__:
- cert_info = __salt__['x509.read_certificate'](cert_file)
+ cert_info["extensions"] = cert_info["extensions"].keys()
+ elif "x509.read_certificate" in __salt__:
+ cert_info = __salt__["x509.read_certificate"](cert_file)
else:
# Cobble it together using the openssl binary
- openssl_cmd = 'openssl x509 -in {0} -noout -text'.format(cert_file)
- cert_info = {'text': __salt__['cmd.run'](openssl_cmd, output_loglevel='quiet')}
+ openssl_cmd = "openssl x509 -in {0} -noout -text".format(cert_file)
+ cert_info = {"text": __salt__["cmd.run"](openssl_cmd, output_loglevel="quiet")}
return cert_info
def expires(name):
- '''
+ """
The expiry date of a certificate in ISO format
:param str name: CommonName of certificate
@@ -324,12 +358,12 @@ def expires(name):
.. code-block:: bash
salt 'gitlab.example.com' acme.expires dev.example.com
- '''
+ """
return _expires(name).isoformat()
def has(name):
- '''
+ """
Test if a certificate is in the Let's Encrypt Live directory
:param str name: CommonName of certificate
@@ -341,24 +375,24 @@ def has(name):
if __salt__['acme.has']('dev.example.com'):
log.info('That is one nice certificate you have there!')
- '''
- return __salt__['file.file_exists'](_cert_file(name, 'cert'))
+ """
+ return __salt__["file.file_exists"](_cert_file(name, "cert"))
def renew_by(name, window=None):
- '''
+ """
Date in ISO format when a certificate should first be renewed
:param str name: CommonName of certificate
:param int window: number of days before expiry when renewal should take place
:rtype: str
:return: Date of certificate renewal in ISO format.
- '''
+ """
return _renew_by(name, window).isoformat()
def needs_renewal(name, window=None):
- '''
+ """
Check if a certificate needs renewal
:param str name: CommonName of certificate
@@ -374,11 +408,13 @@ def needs_renewal(name, window=None):
__salt__['acme.cert']('dev.example.com', **kwargs)
else:
log.info('Your certificate is still good')
- '''
+ """
if window:
- if str(window).lower in ('force', 'true'):
+ if str(window).lower() in ("force", "true"):
return True
- if not (isinstance(window, int) or (hasattr(window, 'isdigit') and window.isdigit())):
+ if not (
+ isinstance(window, int) or (hasattr(window, "isdigit") and window.isdigit())
+ ):
raise SaltInvocationError(
'The argument "window", if provided, must be one of the following : '
'True (boolean), "force" or "Force" (str) or a numerical value in days.'
diff --git a/salt/modules/aix_group.py b/salt/modules/aix_group.py
index 18229107c5f..e1587152122 100644
--- a/salt/modules/aix_group.py
+++ b/salt/modules/aix_group.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Manage groups on Solaris
.. important::
@@ -7,13 +7,12 @@ Manage groups on Solaris
minion, and it is using a different module (or gives an error similar to
*'group.info' is not available*), see :ref:`here
`.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
-
log = logging.getLogger(__name__)
@@ -23,21 +22,24 @@ except ImportError:
pass
# Define the module's virtual name
-__virtualname__ = 'group'
+__virtualname__ = "group"
def __virtual__():
- '''
+ """
Set the group module if the kernel is AIX
- '''
- if __grains__['kernel'] == 'AIX':
+ """
+ if __grains__["kernel"] == "AIX":
return __virtualname__
- return (False, 'The aix_group execution module failed to load: '
- 'only available on AIX systems.')
+ return (
+ False,
+ "The aix_group execution module failed to load: "
+ "only available on AIX systems.",
+ )
def add(name, gid=None, system=False, root=None):
- '''
+ """
Add the specified group
CLI Example:
@@ -45,23 +47,23 @@ def add(name, gid=None, system=False, root=None):
.. code-block:: bash
salt '*' group.add foo 3456
- '''
- cmd = 'mkgroup '
+ """
+ cmd = "mkgroup "
if system and root is not None:
- cmd += '-a '
+ cmd += "-a "
if gid:
- cmd += 'id={0} '.format(gid)
+ cmd += "id={0} ".format(gid)
cmd += name
- ret = __salt__['cmd.run_all'](cmd, python_shell=False)
+ ret = __salt__["cmd.run_all"](cmd, python_shell=False)
- return not ret['retcode']
+ return not ret["retcode"]
def delete(name):
- '''
+ """
Remove the named group
CLI Example:
@@ -69,14 +71,14 @@ def delete(name):
.. code-block:: bash
salt '*' group.delete foo
- '''
- ret = __salt__['cmd.run_all']('rmgroup {0}'.format(name), python_shell=False)
+ """
+ ret = __salt__["cmd.run_all"]("rmgroup {0}".format(name), python_shell=False)
- return not ret['retcode']
+ return not ret["retcode"]
def info(name):
- '''
+ """
Return information about a group
CLI Example:
@@ -84,20 +86,22 @@ def info(name):
.. code-block:: bash
salt '*' group.info foo
- '''
+ """
try:
grinfo = grp.getgrnam(name)
except KeyError:
return {}
else:
- return {'name': grinfo.gr_name,
- 'passwd': grinfo.gr_passwd,
- 'gid': grinfo.gr_gid,
- 'members': grinfo.gr_mem}
+ return {
+ "name": grinfo.gr_name,
+ "passwd": grinfo.gr_passwd,
+ "gid": grinfo.gr_gid,
+ "members": grinfo.gr_mem,
+ }
def getent(refresh=False):
- '''
+ """
Return info on all groups
CLI Example:
@@ -105,20 +109,20 @@ def getent(refresh=False):
.. code-block:: bash
salt '*' group.getent
- '''
- if 'group.getent' in __context__ and not refresh:
- return __context__['group.getent']
+ """
+ if "group.getent" in __context__ and not refresh:
+ return __context__["group.getent"]
ret = []
for grinfo in grp.getgrall():
ret.append(info(grinfo.gr_name))
- __context__['group.getent'] = ret
+ __context__["group.getent"] = ret
return ret
def chgid(name, gid):
- '''
+ """
Change the gid for a named group
CLI Example:
@@ -126,20 +130,20 @@ def chgid(name, gid):
.. code-block:: bash
salt '*' group.chgid foo 4376
- '''
- pre_gid = __salt__['file.group_to_gid'](name)
+ """
+ pre_gid = __salt__["file.group_to_gid"](name)
if gid == pre_gid:
return True
- cmd = 'chgroup id={0} {1}'.format(gid, name)
- __salt__['cmd.run'](cmd, python_shell=False)
- post_gid = __salt__['file.group_to_gid'](name)
+ cmd = "chgroup id={0} {1}".format(gid, name)
+ __salt__["cmd.run"](cmd, python_shell=False)
+ post_gid = __salt__["file.group_to_gid"](name)
if post_gid != pre_gid:
return post_gid == gid
return False
def adduser(name, username, root=None):
- '''
+ """
Add a user in the group.
CLI Example:
@@ -150,16 +154,16 @@ def adduser(name, username, root=None):
Verifies if a valid username 'bar' as a member of an existing group 'foo',
if not then adds it.
- '''
- cmd = 'chgrpmem -m + {0} {1}'.format(username, name)
+ """
+ cmd = "chgrpmem -m + {0} {1}".format(username, name)
- retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
+ retcode = __salt__["cmd.retcode"](cmd, python_shell=False)
return not retcode
def deluser(name, username, root=None):
- '''
+ """
Remove a user from the group.
CLI Example:
@@ -170,13 +174,13 @@ def deluser(name, username, root=None):
Removes a member user 'bar' from a group 'foo'. If group is not present
then returns True.
- '''
- grp_info = __salt__['group.info'](name)
+ """
+ grp_info = __salt__["group.info"](name)
try:
- if username in grp_info['members']:
- cmd = 'chgrpmem -m - {0} {1}'.format(username, name)
- ret = __salt__['cmd.run'](cmd, python_shell=False)
- return not ret['retcode']
+ if username in grp_info["members"]:
+ cmd = "chgrpmem -m - {0} {1}".format(username, name)
+ ret = __salt__["cmd.run"](cmd, python_shell=False)
+ return not ret["retcode"]
else:
return True
except Exception: # pylint: disable=broad-except
@@ -184,7 +188,7 @@ def deluser(name, username, root=None):
def members(name, members_list, root=None):
- '''
+ """
Replaces members of the group with a provided list.
CLI Example:
@@ -193,8 +197,8 @@ def members(name, members_list, root=None):
Replaces a membership list for a local group 'foo'.
foo:x:1234:user1,user2,user3,...
- '''
- cmd = 'chgrpmem -m = {0} {1}'.format(members_list, name)
- retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
+ """
+ cmd = "chgrpmem -m = {0} {1}".format(members_list, name)
+ retcode = __salt__["cmd.retcode"](cmd, python_shell=False)
return not retcode
diff --git a/salt/modules/aix_shadow.py b/salt/modules/aix_shadow.py
index dd6e9c9d4c5..be0955e3e2e 100644
--- a/salt/modules/aix_shadow.py
+++ b/salt/modules/aix_shadow.py
@@ -1,37 +1,39 @@
# -*- coding: utf-8 -*-
-'''
+"""
Manage account locks on AIX systems
.. versionadded:: 2018.3.0
:depends: none
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python librarie
import logging
-
log = logging.getLogger(__name__)
# Define the module's virtual name
-__virtualname__ = 'shadow'
+__virtualname__ = "shadow"
def __virtual__():
- '''
+ """
Only load if kernel is AIX
- '''
- if __grains__['kernel'] == 'AIX':
+ """
+ if __grains__["kernel"] == "AIX":
return __virtualname__
- return (False, 'The aix_shadow execution module failed to load: '
- 'only available on AIX systems.')
+ return (
+ False,
+ "The aix_shadow execution module failed to load: "
+ "only available on AIX systems.",
+ )
def login_failures(user):
- '''
+ """
Query for all accounts which have 3 or more login failures.
CLI Example:
@@ -39,15 +41,15 @@ def login_failures(user):
.. code-block:: bash
salt shadow.login_failures ALL
- '''
+ """
- cmd = 'lsuser -a unsuccessful_login_count {0}'.format(user)
+ cmd = "lsuser -a unsuccessful_login_count {0}".format(user)
cmd += " | grep -E 'unsuccessful_login_count=([3-9]|[0-9][0-9]+)'"
- out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=True)
+ out = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=True)
ret = []
- lines = out['stdout'].splitlines()
+ lines = out["stdout"].splitlines()
for line in lines:
ret.append(line.split()[0])
@@ -55,7 +57,7 @@ def login_failures(user):
def locked(user):
- '''
+ """
Query for all accounts which are flagged as locked.
CLI Example:
@@ -63,15 +65,15 @@ def locked(user):
.. code-block:: bash
salt shadow.locked ALL
- '''
+ """
- cmd = 'lsuser -a account_locked {0}'.format(user)
+ cmd = "lsuser -a account_locked {0}".format(user)
cmd += ' | grep "account_locked=true"'
- out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=True)
+ out = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=True)
ret = []
- lines = out['stdout'].splitlines()
+ lines = out["stdout"].splitlines()
for line in lines:
ret.append(line.split()[0])
@@ -79,7 +81,7 @@ def locked(user):
def unlock(user):
- '''
+ """
Unlock user for locked account
CLI Example:
@@ -87,10 +89,14 @@ def unlock(user):
.. code-block:: bash
salt shadow.unlock user
- '''
+ """
- cmd = 'chuser account_locked=false {0} | ' \
- 'chsec -f /etc/security/lastlog -a "unsuccessful_login_count=0" -s {0}'.format(user)
- ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=True)
+ cmd = (
+ "chuser account_locked=false {0} | "
+ 'chsec -f /etc/security/lastlog -a "unsuccessful_login_count=0" -s {0}'.format(
+ user
+ )
+ )
+ ret = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=True)
return ret
diff --git a/salt/modules/aixpkg.py b/salt/modules/aixpkg.py
index 4f9852b504d..fba8d785849 100644
--- a/salt/modules/aixpkg.py
+++ b/salt/modules/aixpkg.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Package support for AIX
.. important::
@@ -7,13 +7,13 @@ Package support for AIX
rpm packages on a minion, and it is using a different module (or gives an
error similar to *'pkg.install' is not available*), see :ref:`here
`.
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
-import os
+
import copy
import logging
-
+import os
# Import salt libs
import salt.utils.data
@@ -22,52 +22,49 @@ import salt.utils.path
import salt.utils.pkg
from salt.exceptions import CommandExecutionError
-
log = logging.getLogger(__name__)
# Define the module's virtual name
-__virtualname__ = 'pkg'
+__virtualname__ = "pkg"
def __virtual__():
- '''
+ """
Set the virtual pkg module if the os is AIX
- '''
- if __grains__['os_family'] == 'AIX':
+ """
+ if __grains__["os_family"] == "AIX":
return __virtualname__
- return (False,
- 'Did not load AIX module on non-AIX OS.')
+ return (False, "Did not load AIX module on non-AIX OS.")
def _check_pkg(target):
- '''
+ """
Return name, version and if rpm package for specified target
- '''
+ """
ret = {}
- cmd = ['/usr/bin/lslpp', '-Lc', target]
- lines = __salt__['cmd.run'](
- cmd,
- python_shell=False).splitlines()
+ cmd = ["/usr/bin/lslpp", "-Lc", target]
+ lines = __salt__["cmd.run"](cmd, python_shell=False).splitlines()
- name = ''
- version_num = ''
+ name = ""
+ version_num = ""
rpmpkg = False
for line in lines:
- if line.startswith('#'):
+ if line.startswith("#"):
continue
- comps = line.split(':')
+ comps = line.split(":")
if len(comps) < 7:
raise CommandExecutionError(
- 'Error occurred finding fileset/package',
- info={'errors': comps[1].strip()})
+ "Error occurred finding fileset/package",
+ info={"errors": comps[1].strip()},
+ )
# handle first matching line
- if 'R' in comps[6]:
+ if "R" in comps[6]:
name = comps[0]
rpmpkg = True
else:
- name = comps[1] # use fileset rather than rpm package
+ name = comps[1] # use fileset rather than rpm package
version_num = comps[2]
break
@@ -76,15 +73,15 @@ def _check_pkg(target):
def _is_installed_rpm(name):
- '''
+ """
Returns True if the rpm package is installed. Otherwise returns False.
- '''
- cmd = ['/usr/bin/rpm', '-q', name]
- return __salt__['cmd.retcode'](cmd) == 0
+ """
+ cmd = ["/usr/bin/rpm", "-q", name]
+ return __salt__["cmd.retcode"](cmd) == 0
def list_pkgs(versions_as_list=False, **kwargs):
- '''
+ """
List the filesets/rpm packages currently installed as a dict:
.. code-block:: python
@@ -96,21 +93,21 @@ def list_pkgs(versions_as_list=False, **kwargs):
.. code-block:: bash
salt '*' pkg.list_pkgs
- '''
+ """
ret = {}
versions_as_list = salt.utils.data.is_true(versions_as_list)
# not yet implemented or not applicable
- if any([salt.utils.data.is_true(kwargs.get(x))
- for x in ('removed', 'purge_desired')]):
+ if any(
+ [salt.utils.data.is_true(kwargs.get(x)) for x in ("removed", "purge_desired")]
+ ):
return ret
- if 'pkg.list_pkgs' in __context__:
+ if "pkg.list_pkgs" in __context__:
if versions_as_list:
- return __context__['pkg.list_pkgs']
+ return __context__["pkg.list_pkgs"]
else:
- ret = copy.deepcopy(
- __context__['pkg.list_pkgs'])
- __salt__['pkg_resource.stringify'](ret)
+ ret = copy.deepcopy(__context__["pkg.list_pkgs"])
+ __salt__["pkg_resource.stringify"](ret)
return ret
# cmd returns information colon delimited in a single linei, format
@@ -124,41 +121,36 @@ def list_pkgs(versions_as_list=False, **kwargs):
#
# where Type codes: F -- Installp Fileset, P -- Product, C -- Component,
# T -- Feature, R -- RPM Package
- cmd = '/usr/bin/lslpp -Lc'
- lines = __salt__['cmd.run'](
- cmd,
- python_shell=False).splitlines()
+ cmd = "/usr/bin/lslpp -Lc"
+ lines = __salt__["cmd.run"](cmd, python_shell=False).splitlines()
for line in lines:
- if line.startswith('#'):
+ if line.startswith("#"):
continue
- comps = line.split(':')
+ comps = line.split(":")
if len(comps) < 7:
continue
- if 'R' in comps[6]:
+ if "R" in comps[6]:
name = comps[0]
else:
- name = comps[1] # use fileset rather than rpm package
+ name = comps[1] # use fileset rather than rpm package
version_num = comps[2]
- __salt__['pkg_resource.add_pkg'](
- ret,
- name,
- version_num)
+ __salt__["pkg_resource.add_pkg"](ret, name, version_num)
- __salt__['pkg_resource.sort_pkglist'](ret)
- __context__['pkg.list_pkgs'] = copy.deepcopy(ret)
+ __salt__["pkg_resource.sort_pkglist"](ret)
+ __context__["pkg.list_pkgs"] = copy.deepcopy(ret)
if not versions_as_list:
- __salt__['pkg_resource.stringify'](ret)
+ __salt__["pkg_resource.stringify"](ret)
return ret
def version(*names, **kwargs):
- '''
+ """
Common interface for obtaining the version of installed fileset/rpm package.
CLI Example:
@@ -167,12 +159,12 @@ def version(*names, **kwargs):
salt '*' pkg.version vim
salt '*' pkg.version foo bar baz
- '''
- return __salt__['pkg_resource.version'](*names, **kwargs)
+ """
+ return __salt__["pkg_resource.version"](*names, **kwargs)
def _is_installed(name, **kwargs):
- '''
+ """
Returns True if the fileset/rpm package is installed. Otherwise returns False.
CLI Example:
@@ -180,13 +172,13 @@ def _is_installed(name, **kwargs):
.. code-block:: bash
salt '*' pkg._is_installed bash
- '''
- cmd = ['/usr/bin/lslpp', '-Lc', name]
- return __salt__['cmd.retcode'](cmd) == 0
+ """
+ cmd = ["/usr/bin/lslpp", "-Lc", name]
+ return __salt__["cmd.retcode"](cmd) == 0
def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs):
- '''
+ """
Install the named fileset(s)/rpm package(s).
name
@@ -225,14 +217,15 @@ def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwa
salt '*' pkg.install /stage/middleware/AIX/Xlc/usr/sys/inst.images/xlC.rte
salt '*' pkg.install /stage/middleware/AIX/Firefox/ppc-AIX53/Firefox.base
salt '*' pkg.install pkgs='["foo", "bar"]'
- '''
+ """
targets = salt.utils.args.split_input(pkgs) if pkgs else [name]
if not targets:
return {}
if pkgs:
- log.debug('Removing these fileset(s)/rpm package(s) {0}: {1}'
- .format(name, targets))
+ log.debug(
+ "Removing these fileset(s)/rpm package(s) {0}: {1}".format(name, targets)
+ )
# Get a list of the currently installed pkgs.
old = list_pkgs()
@@ -241,54 +234,51 @@ def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwa
errors = []
for target in targets:
filename = os.path.basename(target)
- if filename.endswith('.rpm'):
- if _is_installed_rpm(filename.split('.aix')[0]):
+ if filename.endswith(".rpm"):
+ if _is_installed_rpm(filename.split(".aix")[0]):
continue
- cmdflags = ' -Uivh '
+ cmdflags = " -Uivh "
if test:
- cmdflags += ' --test'
+ cmdflags += " --test"
- cmd = ['/usr/bin/rpm', cmdflags, target]
- out = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
+ cmd = ["/usr/bin/rpm", cmdflags, target]
+ out = __salt__["cmd.run_all"](cmd, output_loglevel="trace")
else:
if _is_installed(target):
continue
- cmd = '/usr/sbin/installp -acYXg'
+ cmd = "/usr/sbin/installp -acYXg"
if test:
- cmd += 'p'
- cmd += ' -d '
+ cmd += "p"
+ cmd += " -d "
dirpath = os.path.dirname(target)
- cmd += dirpath +' '+ filename
- out = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
+ cmd += dirpath + " " + filename
+ out = __salt__["cmd.run_all"](cmd, output_loglevel="trace")
- if 0 != out['retcode']:
- errors.append(out['stderr'])
+ if 0 != out["retcode"]:
+ errors.append(out["stderr"])
# Get a list of the packages after the uninstall
- __context__.pop('pkg.list_pkgs', None)
+ __context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
- 'Problems encountered installing filesets(s)/package(s)',
- info={
- 'changes': ret,
- 'errors': errors
- }
+ "Problems encountered installing filesets(s)/package(s)",
+ info={"changes": ret, "errors": errors},
)
# No error occurred
if test:
- return 'Test succeeded.'
+ return "Test succeeded."
return ret
def remove(name=None, pkgs=None, **kwargs):
- '''
+ """
Remove specified fileset(s)/rpm package(s).
name
@@ -314,14 +304,15 @@ def remove(name=None, pkgs=None, **kwargs):
salt '*' pkg.remove xlC.rte
salt '*' pkg.remove Firefox.base.adt
salt '*' pkg.remove pkgs='["foo", "bar"]'
- '''
+ """
targets = salt.utils.args.split_input(pkgs) if pkgs else [name]
if not targets:
return {}
if pkgs:
- log.debug('Removing these fileset(s)/rpm package(s) {0}: {1}'
- .format(name, targets))
+ log.debug(
+ "Removing these fileset(s)/rpm package(s) {0}: {1}".format(name, targets)
+ )
errors = []
@@ -334,35 +325,32 @@ def remove(name=None, pkgs=None, **kwargs):
named, versionpkg, rpmpkg = _check_pkg(target)
except CommandExecutionError as exc:
if exc.info:
- errors.append(exc.info['errors'])
+ errors.append(exc.info["errors"])
continue
if rpmpkg:
- cmd = ['/usr/bin/rpm', '-e', named]
- out = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
+ cmd = ["/usr/bin/rpm", "-e", named]
+ out = __salt__["cmd.run_all"](cmd, output_loglevel="trace")
else:
- cmd = ['/usr/sbin/installp', '-u', named]
- out = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
+ cmd = ["/usr/sbin/installp", "-u", named]
+ out = __salt__["cmd.run_all"](cmd, output_loglevel="trace")
# Get a list of the packages after the uninstall
- __context__.pop('pkg.list_pkgs', None)
+ __context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
- 'Problems encountered removing filesets(s)/package(s)',
- info={
- 'changes': ret,
- 'errors': errors
- }
+ "Problems encountered removing filesets(s)/package(s)",
+ info={"changes": ret, "errors": errors},
)
return ret
def latest_version(*names, **kwargs):
- '''
+ """
Return the latest version of the named fileset/rpm package available for
upgrade or installation. If more than one fileset/rpm package name is
specified, a dict of name/version pairs is returned.
@@ -381,14 +369,14 @@ def latest_version(*names, **kwargs):
NOTE: Repositories are not presently supported for AIX.
This function will always return an empty string for a given
fileset/rpm package.
- '''
- kwargs.pop('refresh', True)
+ """
+ kwargs.pop("refresh", True)
ret = {}
if not names:
- return ''
+ return ""
for name in names:
- ret[name] = ''
+ ret[name] = ""
# Return a string if only one package name passed
if len(names) == 1:
@@ -397,11 +385,13 @@ def latest_version(*names, **kwargs):
# available_version is being deprecated
-available_version = salt.utils.functools.alias_function(latest_version, 'available_version')
+available_version = salt.utils.functools.alias_function(
+ latest_version, "available_version"
+)
def upgrade_available(name):
- '''
+ """
Check whether or not an upgrade is available for a given package
CLI Example:
@@ -409,5 +399,5 @@ def upgrade_available(name):
.. code-block:: bash
salt '*' pkg.upgrade_available
- '''
- return latest_version(name) != ''
+ """
+ return latest_version(name) != ""
diff --git a/salt/modules/aliases.py b/salt/modules/aliases.py
index 33922803a2c..d05fb70dd67 100644
--- a/salt/modules/aliases.py
+++ b/salt/modules/aliases.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
-'''
+"""
Manage the information in the aliases file
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
@@ -20,37 +20,37 @@ from salt.exceptions import SaltInvocationError
from salt.ext import six
__outputter__ = {
- 'rm_alias': 'txt',
- 'has_target': 'txt',
- 'get_target': 'txt',
- 'set_target': 'txt',
- 'list_aliases': 'yaml',
+ "rm_alias": "txt",
+ "has_target": "txt",
+ "get_target": "txt",
+ "set_target": "txt",
+ "list_aliases": "yaml",
}
-__ALIAS_RE = re.compile(r'([^:#]*)\s*:?\s*([^#]*?)(\s+#.*|$)')
+__ALIAS_RE = re.compile(r"([^:#]*)\s*:?\s*([^#]*?)(\s+#.*|$)")
def __get_aliases_filename():
- '''
+ """
Return the path to the appropriate aliases file
- '''
- return os.path.realpath(__salt__['config.option']('aliases.file'))
+ """
+ return os.path.realpath(__salt__["config.option"]("aliases.file"))
def __parse_aliases():
- '''
+ """
Parse the aliases file, and return a list of line components:
[
(alias1, target1, comment1),
(alias2, target2, comment2),
]
- '''
+ """
afn = __get_aliases_filename()
ret = []
if not os.path.isfile(afn):
return ret
- with salt.utils.files.fopen(afn, 'r') as ifile:
+ with salt.utils.files.fopen(afn, "r") as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line)
match = __ALIAS_RE.match(line)
@@ -62,16 +62,16 @@ def __parse_aliases():
def __write_aliases_file(lines):
- '''
+ """
Write a new copy of the aliases file. Lines is a list of lines
as returned by __parse_aliases.
- '''
+ """
afn = __get_aliases_filename()
adir = os.path.dirname(afn)
out = tempfile.NamedTemporaryFile(dir=adir, delete=False)
- if not __opts__.get('integration.test', False):
+ if not __opts__.get("integration.test", False):
if os.path.isfile(afn):
afn_st = os.stat(afn)
os.chmod(out.name, stat.S_IMODE(afn_st.st_mode))
@@ -82,15 +82,13 @@ def __write_aliases_file(lines):
for (line_alias, line_target, line_comment) in lines:
if isinstance(line_target, list):
- line_target = ', '.join(line_target)
+ line_target = ", ".join(line_target)
if not line_comment:
- line_comment = ''
+ line_comment = ""
if line_alias and line_target:
- write_line = '{0}: {1}{2}\n'.format(
- line_alias, line_target, line_comment
- )
+ write_line = "{0}: {1}{2}\n".format(line_alias, line_target, line_comment)
else:
- write_line = '{0}\n'.format(line_comment)
+ write_line = "{0}\n".format(line_comment)
if six.PY3:
write_line = write_line.encode(__salt_system_encoding__)
out.write(write_line)
@@ -99,15 +97,15 @@ def __write_aliases_file(lines):
os.rename(out.name, afn)
# Search $PATH for the newalises command
- newaliases = salt.utils.path.which('newaliases')
+ newaliases = salt.utils.path.which("newaliases")
if newaliases is not None:
- __salt__['cmd.run'](newaliases)
+ __salt__["cmd.run"](newaliases)
return True
def list_aliases():
- '''
+ """
Return the aliases found in the aliases file in this format::
{'alias': 'target'}
@@ -117,13 +115,13 @@ def list_aliases():
.. code-block:: bash
salt '*' aliases.list_aliases
- '''
+ """
ret = dict((alias, target) for alias, target, comment in __parse_aliases() if alias)
return ret
def get_target(alias):
- '''
+ """
Return the target associated with an alias
CLI Example:
@@ -131,15 +129,15 @@ def get_target(alias):
.. code-block:: bash
salt '*' aliases.get_target alias
- '''
+ """
aliases = list_aliases()
if alias in aliases:
return aliases[alias]
- return ''
+ return ""
def has_target(alias, target):
- '''
+ """
Return true if the alias/target is set
CLI Example:
@@ -147,19 +145,19 @@ def has_target(alias, target):
.. code-block:: bash
salt '*' aliases.has_target alias target
- '''
- if target == '':
- raise SaltInvocationError('target can not be an empty string')
+ """
+ if target == "":
+ raise SaltInvocationError("target can not be an empty string")
aliases = list_aliases()
if alias not in aliases:
return False
if isinstance(target, list):
- target = ', '.join(target)
+ target = ", ".join(target)
return target == aliases[alias]
def set_target(alias, target):
- '''
+ """
Set the entry in the aliases file for the given alias, this will overwrite
any previous entry for the given alias or create a new one if it does not
exist.
@@ -169,13 +167,13 @@ def set_target(alias, target):
.. code-block:: bash
salt '*' aliases.set_target alias target
- '''
+ """
- if alias == '':
- raise SaltInvocationError('alias can not be an empty string')
+ if alias == "":
+ raise SaltInvocationError("alias can not be an empty string")
- if target == '':
- raise SaltInvocationError('target can not be an empty string')
+ if target == "":
+ raise SaltInvocationError("target can not be an empty string")
if get_target(alias) == target:
return True
@@ -191,14 +189,14 @@ def set_target(alias, target):
else:
out.append((line_alias, line_target, line_comment))
if not ovr:
- out.append((alias, target, ''))
+ out.append((alias, target, ""))
__write_aliases_file(out)
return True
def rm_alias(alias):
- '''
+ """
Remove an entry from the aliases file
CLI Example:
@@ -206,7 +204,7 @@ def rm_alias(alias):
.. code-block:: bash
salt '*' aliases.rm_alias alias
- '''
+ """
if not get_target(alias):
return True
diff --git a/salt/modules/alternatives.py b/salt/modules/alternatives.py
index 606d493efc5..ed66a3b6e42 100644
--- a/salt/modules/alternatives.py
+++ b/salt/modules/alternatives.py
@@ -1,14 +1,15 @@
# -*- coding: utf-8 -*-
-'''
+"""
Support for Alternatives system
:codeauthor: Radek Rada
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
+import logging
+
# Import python libs
import os
-import logging
# Import Salt libs
import salt.utils.files
@@ -17,41 +18,38 @@ import salt.utils.path
# Import 3rd-party libs
from salt.ext import six
-
__outputter__ = {
- 'display': 'txt',
- 'install': 'txt',
- 'remove': 'txt',
+ "display": "txt",
+ "install": "txt",
+ "remove": "txt",
}
log = logging.getLogger(__name__)
# Don't shadow built-in's.
-__func_alias__ = {
- 'set_': 'set'
-}
+__func_alias__ = {"set_": "set"}
def __virtual__():
- '''
+ """
Only if alternatives dir is available
- '''
- if os.path.isdir('/etc/alternatives'):
+ """
+ if os.path.isdir("/etc/alternatives"):
return True
- return (False, 'Cannot load alternatives module: /etc/alternatives dir not found')
+ return (False, "Cannot load alternatives module: /etc/alternatives dir not found")
def _get_cmd():
- '''
+ """
Alteratives commands and differ across distributions
- '''
- if __grains__['os_family'] == 'RedHat':
- return 'alternatives'
- return 'update-alternatives'
+ """
+ if __grains__["os_family"] == "RedHat":
+ return "alternatives"
+ return "update-alternatives"
def display(name):
- '''
+ """
Display alternatives settings for defined command name
CLI Example:
@@ -59,16 +57,16 @@ def display(name):
.. code-block:: bash
salt '*' alternatives.display editor
- '''
- cmd = [_get_cmd(), '--display', name]
- out = __salt__['cmd.run_all'](cmd, python_shell=False)
- if out['retcode'] > 0 and out['stderr'] != '':
- return out['stderr']
- return out['stdout']
+ """
+ cmd = [_get_cmd(), "--display", name]
+ out = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True)
+ if out["retcode"] > 0 and out["stderr"] != "":
+ return out["stderr"]
+ return out["stdout"]
def show_link(name):
- '''
+ """
Display master link for the alternative
.. versionadded:: 2015.8.13,2016.3.4,2016.11.0
@@ -78,34 +76,35 @@ def show_link(name):
.. code-block:: bash
salt '*' alternatives.show_link editor
- '''
+ """
- if __grains__['os_family'] == 'RedHat':
- path = '/var/lib/'
- elif __grains__['os_family'] == 'Suse':
- path = '/var/lib/rpm/'
+ if __grains__["os_family"] == "RedHat":
+ path = "/var/lib/"
+ elif __grains__["os_family"] == "Suse":
+ path = "/var/lib/rpm/"
else:
- path = '/var/lib/dpkg/'
+ path = "/var/lib/dpkg/"
- path += 'alternatives/{0}'.format(name)
+ path += "alternatives/{0}".format(name)
try:
- with salt.utils.files.fopen(path, 'rb') as r_file:
+ with salt.utils.files.fopen(path, "rb") as r_file:
contents = salt.utils.stringutils.to_unicode(r_file.read())
- return contents.splitlines(True)[1].rstrip('\n')
+ return contents.splitlines(True)[1].rstrip("\n")
except OSError:
- log.error('alternatives: %s does not exist', name)
+ log.error("alternatives: %s does not exist", name)
except (IOError, IndexError) as exc: # pylint: disable=duplicate-except
log.error(
- 'alternatives: unable to get master link for %s. '
- 'Exception: %s', name, exc
+ "alternatives: unable to get master link for %s. " "Exception: %s",
+ name,
+ exc,
)
return False
def show_current(name):
- '''
+ """
Display the current highest-priority alternative for a given alternatives
link
@@ -114,16 +113,16 @@ def show_current(name):
.. code-block:: bash
salt '*' alternatives.show_current editor
- '''
+ """
try:
return _read_link(name)
except OSError:
- log.error('alternative: %s does not exist', name)
+ log.error("alternative: %s does not exist", name)
return False
def check_exists(name, path):
- '''
+ """
Check if the given path is an alternative for a name.
.. versionadded:: 2015.8.4
@@ -133,18 +132,18 @@ def check_exists(name, path):
.. code-block:: bash
salt '*' alternatives.check_exists name path
- '''
- cmd = [_get_cmd(), '--display', name]
- out = __salt__['cmd.run_all'](cmd, python_shell=False)
+ """
+ cmd = [_get_cmd(), "--display", name]
+ out = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True)
- if out['retcode'] > 0 and out['stderr'] != '':
+ if out["retcode"] > 0 and out["stderr"] != "":
return False
- return any((line.startswith(path) for line in out['stdout'].splitlines()))
+ return any((line.startswith(path) for line in out["stdout"].splitlines()))
def check_installed(name, path):
- '''
+ """
Check if the current highest-priority match for a given alternatives link
is set to the desired path
@@ -153,7 +152,7 @@ def check_installed(name, path):
.. code-block:: bash
salt '*' alternatives.check_installed name path
- '''
+ """
try:
return _read_link(name) == path
except OSError:
@@ -161,7 +160,7 @@ def check_installed(name, path):
def install(name, link, path, priority):
- '''
+ """
Install symbolic links determining default commands
CLI Example:
@@ -169,16 +168,16 @@ def install(name, link, path, priority):
.. code-block:: bash
salt '*' alternatives.install editor /usr/bin/editor /usr/bin/emacs23 50
- '''
- cmd = [_get_cmd(), '--install', link, name, path, six.text_type(priority)]
- out = __salt__['cmd.run_all'](cmd, python_shell=False)
- if out['retcode'] > 0 and out['stderr'] != '':
- return out['stderr']
- return out['stdout']
+ """
+ cmd = [_get_cmd(), "--install", link, name, path, six.text_type(priority)]
+ out = __salt__["cmd.run_all"](cmd, python_shell=False)
+ if out["retcode"] > 0 and out["stderr"] != "":
+ return out["stderr"]
+ return out["stdout"]
def remove(name, path):
- '''
+ """
Remove symbolic links determining the default commands.
CLI Example:
@@ -186,16 +185,16 @@ def remove(name, path):
.. code-block:: bash
salt '*' alternatives.remove name path
- '''
- cmd = [_get_cmd(), '--remove', name, path]
- out = __salt__['cmd.run_all'](cmd, python_shell=False)
- if out['retcode'] > 0:
- return out['stderr']
- return out['stdout']
+ """
+ cmd = [_get_cmd(), "--remove", name, path]
+ out = __salt__["cmd.run_all"](cmd, python_shell=False)
+ if out["retcode"] > 0:
+ return out["stderr"]
+ return out["stdout"]
def auto(name):
- '''
+ """
Trigger alternatives to set the path for as
specified by priority.
@@ -204,16 +203,16 @@ def auto(name):
.. code-block:: bash
salt '*' alternatives.auto name
- '''
- cmd = [_get_cmd(), '--auto', name]
- out = __salt__['cmd.run_all'](cmd, python_shell=False)
- if out['retcode'] > 0:
- return out['stderr']
- return out['stdout']
+ """
+ cmd = [_get_cmd(), "--auto", name]
+ out = __salt__["cmd.run_all"](cmd, python_shell=False)
+ if out["retcode"] > 0:
+ return out["stderr"]
+ return out["stdout"]
def set_(name, path):
- '''
+ """
Manually set the alternative for .
CLI Example:
@@ -221,19 +220,19 @@ def set_(name, path):
.. code-block:: bash
salt '*' alternatives.set name path
- '''
- cmd = [_get_cmd(), '--set', name, path]
- out = __salt__['cmd.run_all'](cmd, python_shell=False)
- if out['retcode'] > 0:
- return out['stderr']
- return out['stdout']
+ """
+ cmd = [_get_cmd(), "--set", name, path]
+ out = __salt__["cmd.run_all"](cmd, python_shell=False)
+ if out["retcode"] > 0:
+ return out["stderr"]
+ return out["stdout"]
def _read_link(name):
- '''
+ """
Read the link from /etc/alternatives
Throws an OSError if the link does not exist
- '''
- alt_link_path = '/etc/alternatives/{0}'.format(name)
+ """
+ alt_link_path = "/etc/alternatives/{0}".format(name)
return salt.utils.path.readlink(alt_link_path)
diff --git a/salt/modules/ansiblegate.py b/salt/modules/ansiblegate.py
index 6b903c2b941..44a07f58f38 100644
--- a/salt/modules/ansiblegate.py
+++ b/salt/modules/ansiblegate.py
@@ -15,7 +15,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-'''
+"""
Ansible Support
===============
@@ -26,25 +26,26 @@ configuration in /etc/salt/minion.d/ as follows:
The timeout is how many seconds Salt should wait for
any Ansible module to respond.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
-import json
-import os
-import sys
-import logging
-import importlib
-import fnmatch
-import subprocess
-import salt.utils.json
-from salt.exceptions import LoaderError, CommandExecutionError
-from salt.utils.decorators import depends
+import fnmatch
+import importlib
+import json
+import logging
+import os
+import subprocess
+import sys
+
import salt.utils.decorators.path
+import salt.utils.json
import salt.utils.platform
import salt.utils.timed_subprocess
import salt.utils.yaml
+from salt.exceptions import CommandExecutionError, LoaderError
from salt.ext import six
+from salt.utils.decorators import depends
try:
import ansible
@@ -53,23 +54,24 @@ try:
except ImportError:
ansible = None
-__virtualname__ = 'ansible'
+__virtualname__ = "ansible"
log = logging.getLogger(__name__)
class AnsibleModuleResolver(object):
- '''
+ """
This class is to resolve all available modules in Ansible.
- '''
+ """
+
def __init__(self, opts):
self.opts = opts
self._modules_map = {}
def _get_modules_map(self, path=None):
- '''
+ """
Get installed Ansible modules
:return:
- '''
+ """
paths = {}
root = ansible.modules.__path__[0]
if not path:
@@ -81,104 +83,127 @@ class AnsibleModuleResolver(object):
if os.path.isdir(p_el_path):
paths.update(self._get_modules_map(p_el_path))
else:
- if (any(p_el.startswith(elm) for elm in ['__', '.']) or
- not p_el.endswith('.py') or
- p_el in ansible.constants.IGNORE_FILES):
+ if (
+ any(p_el.startswith(elm) for elm in ["__", "."])
+ or not p_el.endswith(".py")
+ or p_el in ansible.constants.IGNORE_FILES
+ ):
continue
- p_el_path = p_el_path.replace(root, '').split('.')[0]
- als_name = p_el_path.replace('.', '').replace('/', '', 1).replace('/', '.')
+ p_el_path = p_el_path.replace(root, "").split(".")[0]
+ als_name = (
+ p_el_path.replace(".", "").replace("/", "", 1).replace("/", ".")
+ )
paths[als_name] = p_el_path
return paths
def load_module(self, module):
- '''
+ """
Introspect Ansible module.
:param module:
:return:
- '''
+ """
m_ref = self._modules_map.get(module)
if m_ref is None:
raise LoaderError('Module "{0}" was not found'.format(module))
- mod = importlib.import_module('ansible.modules{0}'.format(
- '.'.join([elm.split('.')[0] for elm in m_ref.split(os.path.sep)])))
+ mod = importlib.import_module(
+ "ansible.modules{0}".format(
+ ".".join([elm.split(".")[0] for elm in m_ref.split(os.path.sep)])
+ )
+ )
return mod
def get_modules_list(self, pattern=None):
- '''
+ """
Return module map references.
:return:
- '''
- if pattern and '*' not in pattern:
- pattern = '*{0}*'.format(pattern)
+ """
+ if pattern and "*" not in pattern:
+ pattern = "*{0}*".format(pattern)
modules = []
for m_name, m_path in self._modules_map.items():
- m_path = m_path.split('.')[0]
- m_name = '.'.join([elm for elm in m_path.split(os.path.sep) if elm])
+ m_path = m_path.split(".")[0]
+ m_name = ".".join([elm for elm in m_path.split(os.path.sep) if elm])
if pattern and fnmatch.fnmatch(m_name, pattern) or not pattern:
modules.append(m_name)
return sorted(modules)
def resolve(self):
- log.debug('Resolving Ansible modules')
+ log.debug("Resolving Ansible modules")
self._modules_map = self._get_modules_map()
return self
def install(self):
- log.debug('Installing Ansible modules')
+ log.debug("Installing Ansible modules")
return self
class AnsibleModuleCaller(object):
DEFAULT_TIMEOUT = 1200 # seconds (20 minutes)
- OPT_TIMEOUT_KEY = 'ansible_timeout'
+ OPT_TIMEOUT_KEY = "ansible_timeout"
def __init__(self, resolver):
self._resolver = resolver
- self.timeout = self._resolver.opts.get(self.OPT_TIMEOUT_KEY, self.DEFAULT_TIMEOUT)
+ self.timeout = self._resolver.opts.get(
+ self.OPT_TIMEOUT_KEY, self.DEFAULT_TIMEOUT
+ )
def call(self, module, *args, **kwargs):
- '''
+ """
Call an Ansible module by invoking it.
:param module: the name of the module.
:param args: Arguments to the module
:param kwargs: keywords to the module
:return:
- '''
+ """
module = self._resolver.load_module(module)
- if not hasattr(module, 'main'):
- raise CommandExecutionError('This module is not callable '
- '(see "ansible.help {0}")'.format(module.__name__.replace('ansible.modules.',
- '')))
+ if not hasattr(module, "main"):
+ raise CommandExecutionError(
+ "This module is not callable "
+ '(see "ansible.help {0}")'.format(
+ module.__name__.replace("ansible.modules.", "")
+ )
+ )
if args:
- kwargs['_raw_params'] = ' '.join(args)
- js_args = str('{{"ANSIBLE_MODULE_ARGS": {args}}}') # future lint: disable=blacklisted-function
+ kwargs["_raw_params"] = " ".join(args)
+ js_args = str(
+ '{{"ANSIBLE_MODULE_ARGS": {args}}}'
+ ) # future lint: disable=blacklisted-function
js_args = js_args.format(args=salt.utils.json.dumps(kwargs))
proc_out = salt.utils.timed_subprocess.TimedProc(
["echo", "{0}".format(js_args)],
- stdout=subprocess.PIPE, timeout=self.timeout)
+ stdout=subprocess.PIPE,
+ timeout=self.timeout,
+ )
proc_out.run()
proc_exc = salt.utils.timed_subprocess.TimedProc(
- ['python', module.__file__],
- stdin=proc_out.stdout, stdout=subprocess.PIPE, timeout=self.timeout)
+ ["python", module.__file__],
+ stdin=proc_out.stdout,
+ stdout=subprocess.PIPE,
+ timeout=self.timeout,
+ )
proc_exc.run()
try:
out = salt.utils.json.loads(proc_exc.stdout)
except ValueError as ex:
- out = {'Error': (proc_exc.stderr and (proc_exc.stderr + '.') or six.text_type(ex))}
+ out = {
+ "Error": (
+ proc_exc.stderr and (proc_exc.stderr + ".") or six.text_type(ex)
+ )
+ }
if proc_exc.stdout:
- out['Given JSON output'] = proc_exc.stdout
+ out["Given JSON output"] = proc_exc.stdout
return out
- if 'invocation' in out:
- del out['invocation']
+ if "invocation" in out:
+ del out["invocation"]
- out['timeout'] = self.timeout
+ out["timeout"] = self.timeout
return out
@@ -188,38 +213,41 @@ _caller = None
def _set_callables(modules):
- '''
+ """
Set all Ansible modules callables
:return:
- '''
+ """
+
def _set_function(cmd_name, doc):
- '''
+ """
Create a Salt function for the Ansible module.
- '''
+ """
+
def _cmd(*args, **kw):
- '''
+ """
Call an Ansible module as a function from the Salt.
- '''
+ """
kwargs = {}
- if kw.get('__pub_arg'):
- for _kw in kw.get('__pub_arg', []):
+ if kw.get("__pub_arg"):
+ for _kw in kw.get("__pub_arg", []):
if isinstance(_kw, dict):
kwargs = _kw
break
return _caller.call(cmd_name, *args, **kwargs)
+
_cmd.__doc__ = doc
return _cmd
for mod in modules:
- setattr(sys.modules[__name__], mod, _set_function(mod, 'Available'))
+ setattr(sys.modules[__name__], mod, _set_function(mod, "Available"))
def __virtual__():
- '''
+ """
Ansible module caller.
:return:
- '''
+ """
if salt.utils.platform.is_windows():
return False, "The ansiblegate module isn't supported on Windows"
ret = ansible is not None
@@ -233,25 +261,29 @@ def __virtual__():
return __virtualname__
-@depends('ansible')
+@depends("ansible")
def help(module=None, *args):
- '''
+ """
Display help on Ansible standard module.
:param module:
:return:
- '''
+ """
if not module:
- raise CommandExecutionError('Please tell me what module you want to have helped with. '
- 'Or call "ansible.list" to know what is available.')
+ raise CommandExecutionError(
+ "Please tell me what module you want to have helped with. "
+ 'Or call "ansible.list" to know what is available.'
+ )
try:
module = _resolver.load_module(module)
except (ImportError, LoaderError) as err:
- raise CommandExecutionError('Module "{0}" is currently not functional on your system.'.format(module))
+ raise CommandExecutionError(
+ 'Module "{0}" is currently not functional on your system.'.format(module)
+ )
doc = {}
ret = {}
- for docset in module.DOCUMENTATION.split('---'):
+ for docset in module.DOCUMENTATION.split("---"):
try:
docset = salt.utils.yaml.safe_load(docset)
if docset:
@@ -259,11 +291,15 @@ def help(module=None, *args):
except Exception as err: # pylint: disable=broad-except
log.error("Error parsing doc section: %s", err)
if not args:
- if 'description' in doc:
- description = doc.get('description') or ''
- del doc['description']
- ret['Description'] = description
- ret['Available sections on module "{}"'.format(module.__name__.replace('ansible.modules.', ''))] = doc.keys()
+ if "description" in doc:
+ description = doc.get("description") or ""
+ del doc["description"]
+ ret["Description"] = description
+ ret[
+ 'Available sections on module "{}"'.format(
+ module.__name__.replace("ansible.modules.", "")
+ )
+ ] = doc.keys()
else:
for arg in args:
info = doc.get(arg)
@@ -273,22 +309,37 @@ def help(module=None, *args):
return ret
-@depends('ansible')
+@depends("ansible")
def list(pattern=None):
- '''
+ """
Lists available modules.
:return:
- '''
+ """
return _resolver.get_modules_list(pattern=pattern)
-@salt.utils.decorators.path.which('ansible-playbook')
-def playbooks(playbook, rundir=None, check=False, diff=False, extra_vars=None,
- flush_cache=False, forks=5, inventory=None, limit=None,
- list_hosts=False, list_tags=False, list_tasks=False,
- module_path=None, skip_tags=None, start_at_task=None,
- syntax_check=False, tags=None, playbook_kwargs=None):
- '''
+@salt.utils.decorators.path.which("ansible-playbook")
+def playbooks(
+ playbook,
+ rundir=None,
+ check=False,
+ diff=False,
+ extra_vars=None,
+ flush_cache=False,
+ forks=5,
+ inventory=None,
+ limit=None,
+ list_hosts=False,
+ list_tags=False,
+ list_tasks=False,
+ module_path=None,
+ skip_tags=None,
+ start_at_task=None,
+ syntax_check=False,
+ tags=None,
+ playbook_kwargs=None,
+):
+ """
Run Ansible Playbooks
:param playbook: Which playbook to run.
@@ -326,56 +377,56 @@ def playbooks(playbook, rundir=None, check=False, diff=False, extra_vars=None,
.. code-block:: bash
salt 'ansiblehost' ansible.playbook playbook=/srv/playbooks/play.yml
- '''
- command = ['ansible-playbook', playbook]
+ """
+ command = ["ansible-playbook", playbook]
if check:
- command.append('--check')
+ command.append("--check")
if diff:
- command.append('--diff')
+ command.append("--diff")
if isinstance(extra_vars, dict):
command.append("--extra-vars='{0}'".format(json.dumps(extra_vars)))
- elif isinstance(extra_vars, six.text_type) and extra_vars.startswith('@'):
- command.append('--extra-vars={0}'.format(extra_vars))
+ elif isinstance(extra_vars, six.text_type) and extra_vars.startswith("@"):
+ command.append("--extra-vars={0}".format(extra_vars))
if flush_cache:
- command.append('--flush-cache')
+ command.append("--flush-cache")
if inventory:
- command.append('--inventory={0}'.format(inventory))
+ command.append("--inventory={0}".format(inventory))
if limit:
- command.append('--limit={0}'.format(limit))
+ command.append("--limit={0}".format(limit))
if list_hosts:
- command.append('--list-hosts')
+ command.append("--list-hosts")
if list_tags:
- command.append('--list-tags')
+ command.append("--list-tags")
if list_tasks:
- command.append('--list-tasks')
+ command.append("--list-tasks")
if module_path:
- command.append('--module-path={0}'.format(module_path))
+ command.append("--module-path={0}".format(module_path))
if skip_tags:
- command.append('--skip-tags={0}'.format(skip_tags))
+ command.append("--skip-tags={0}".format(skip_tags))
if start_at_task:
- command.append('--start-at-task={0}'.format(start_at_task))
+ command.append("--start-at-task={0}".format(start_at_task))
if syntax_check:
- command.append('--syntax-check')
+ command.append("--syntax-check")
if tags:
- command.append('--tags={0}'.format(tags))
+ command.append("--tags={0}".format(tags))
if playbook_kwargs:
for key, value in six.iteritems(playbook_kwargs):
- key = key.replace('_', '-')
+ key = key.replace("_", "-")
if value is True:
- command.append('--{0}'.format(key))
+ command.append("--{0}".format(key))
elif isinstance(value, six.text_type):
- command.append('--{0}={1}'.format(key, value))
+ command.append("--{0}={1}".format(key, value))
elif isinstance(value, dict):
- command.append('--{0}={1}'.format(key, json.dumps(value)))
- command.append('--forks={0}'.format(forks))
+ command.append("--{0}={1}".format(key, json.dumps(value)))
+ command.append("--forks={0}".format(forks))
cmd_kwargs = {
- 'env': {'ANSIBLE_STDOUT_CALLBACK': 'json', 'ANSIBLE_RETRY_FILES_ENABLED': '0'},
- 'cwd': rundir,
- 'cmd': ' '.join(command)
+ "env": {"ANSIBLE_STDOUT_CALLBACK": "json", "ANSIBLE_RETRY_FILES_ENABLED": "0"},
+ "cwd": rundir,
+ "cmd": " ".join(command),
}
- ret = __salt__['cmd.run_all'](**cmd_kwargs)
- log.debug('Ansible Playbook Return: %s', ret)
- retdata = json.loads(ret['stdout'])
- if ret['retcode']:
- __context__['retcode'] = ret['retcode']
+ ret = __salt__["cmd.run_all"](**cmd_kwargs)
+ log.debug("Ansible Playbook Return: %s", ret)
+ retdata = json.loads(ret["stdout"])
+ if ret["retcode"]:
+ __context__["retcode"] = ret["retcode"]
return retdata
diff --git a/salt/modules/apache.py b/salt/modules/apache.py
index 83df8a09483..a8d716fe6e9 100644
--- a/salt/modules/apache.py
+++ b/salt/modules/apache.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Support for Apache
.. note::
@@ -7,26 +7,19 @@ Support for Apache
all implementations of Apache. Debian-specific functions have been moved into
deb_apache.py, but will still load under the ``apache`` namespace when a
Debian-based system is detected.
-'''
+"""
# Import python libs
-from __future__ import absolute_import, generators, print_function, with_statement, unicode_literals
-import re
-import logging
-
-# Import 3rd-party libs
-# pylint: disable=import-error,no-name-in-module
-from salt.ext import six
-from salt.ext.six.moves import cStringIO
-from salt.ext.six.moves.urllib.error import URLError
-from salt.ext.six.moves.urllib.request import (
- HTTPBasicAuthHandler as _HTTPBasicAuthHandler,
- HTTPDigestAuthHandler as _HTTPDigestAuthHandler,
- urlopen as _urlopen,
- build_opener as _build_opener,
- install_opener as _install_opener
+from __future__ import (
+ absolute_import,
+ generators,
+ print_function,
+ unicode_literals,
+ with_statement,
)
-# pylint: enable=import-error,no-name-in-module
+
+import logging
+import re
# Import salt libs
import salt.utils.data
@@ -35,35 +28,56 @@ import salt.utils.path
import salt.utils.stringutils
from salt.exceptions import SaltException
+# Import 3rd-party libs
+# pylint: disable=import-error,no-name-in-module
+from salt.ext import six
+from salt.ext.six.moves import cStringIO
+from salt.ext.six.moves.urllib.error import URLError
+from salt.ext.six.moves.urllib.request import (
+ HTTPBasicAuthHandler as _HTTPBasicAuthHandler,
+)
+from salt.ext.six.moves.urllib.request import (
+ HTTPDigestAuthHandler as _HTTPDigestAuthHandler,
+)
+from salt.ext.six.moves.urllib.request import build_opener as _build_opener
+from salt.ext.six.moves.urllib.request import install_opener as _install_opener
+from salt.ext.six.moves.urllib.request import urlopen as _urlopen
+
+# pylint: enable=import-error,no-name-in-module
+
+
log = logging.getLogger(__name__)
def __virtual__():
- '''
+ """
Only load the module if apache is installed
- '''
+ """
cmd = _detect_os()
if salt.utils.path.which(cmd):
- return 'apache'
- return (False, 'The apache execution module cannot be loaded: apache is not installed.')
+ return "apache"
+ return (
+ False,
+ "The apache execution module cannot be loaded: apache is not installed.",
+ )
def _detect_os():
- '''
+ """
Apache commands and paths differ depending on packaging
- '''
+ """
# TODO: Add pillar support for the apachectl location
- os_family = __grains__['os_family']
- if os_family == 'RedHat':
- return 'apachectl'
- elif os_family == 'Debian' or os_family == 'Suse':
- return 'apache2ctl'
+ os_family = __grains__["os_family"]
+ if os_family == "RedHat":
+ return "apachectl"
+ elif os_family == "Debian" or os_family == "Suse":
+ return "apache2ctl"
else:
- return 'apachectl'
+ return "apachectl"
def version():
- '''
+ """
Return server version (``apachectl -v``)
CLI Example:
@@ -71,15 +85,15 @@ def version():
.. code-block:: bash
salt '*' apache.version
- '''
- cmd = '{0} -v'.format(_detect_os())
- out = __salt__['cmd.run'](cmd).splitlines()
- ret = out[0].split(': ')
+ """
+ cmd = "{0} -v".format(_detect_os())
+ out = __salt__["cmd.run"](cmd).splitlines()
+ ret = out[0].split(": ")
return ret[1]
def fullversion():
- '''
+ """
Return server version (``apachectl -V``)
CLI Example:
@@ -87,28 +101,28 @@ def fullversion():
.. code-block:: bash
salt '*' apache.fullversion
- '''
- cmd = '{0} -V'.format(_detect_os())
+ """
+ cmd = "{0} -V".format(_detect_os())
ret = {}
- ret['compiled_with'] = []
- out = __salt__['cmd.run'](cmd).splitlines()
+ ret["compiled_with"] = []
+ out = __salt__["cmd.run"](cmd).splitlines()
# Example
# -D APR_HAS_MMAP
- define_re = re.compile(r'^\s+-D\s+')
+ define_re = re.compile(r"^\s+-D\s+")
for line in out:
- if ': ' in line:
- comps = line.split(': ')
+ if ": " in line:
+ comps = line.split(": ")
if not comps:
continue
- ret[comps[0].strip().lower().replace(' ', '_')] = comps[1].strip()
- elif ' -D' in line:
- cwith = define_re.sub('', line)
- ret['compiled_with'].append(cwith)
+ ret[comps[0].strip().lower().replace(" ", "_")] = comps[1].strip()
+ elif " -D" in line:
+ cwith = define_re.sub("", line)
+ ret["compiled_with"].append(cwith)
return ret
def modules():
- '''
+ """
Return list of static and shared modules (``apachectl -M``)
CLI Example:
@@ -116,25 +130,25 @@ def modules():
.. code-block:: bash
salt '*' apache.modules
- '''
- cmd = '{0} -M'.format(_detect_os())
+ """
+ cmd = "{0} -M".format(_detect_os())
ret = {}
- ret['static'] = []
- ret['shared'] = []
- out = __salt__['cmd.run'](cmd).splitlines()
+ ret["static"] = []
+ ret["shared"] = []
+ out = __salt__["cmd.run"](cmd).splitlines()
for line in out:
comps = line.split()
if not comps:
continue
- if '(static)' in line:
- ret['static'].append(comps[0])
- if '(shared)' in line:
- ret['shared'].append(comps[0])
+ if "(static)" in line:
+ ret["static"].append(comps[0])
+ if "(shared)" in line:
+ ret["shared"].append(comps[0])
return ret
def servermods():
- '''
+ """
Return list of modules compiled into the server (``apachectl -l``)
CLI Example:
@@ -142,20 +156,20 @@ def servermods():
.. code-block:: bash
salt '*' apache.servermods
- '''
- cmd = '{0} -l'.format(_detect_os())
+ """
+ cmd = "{0} -l".format(_detect_os())
ret = []
- out = __salt__['cmd.run'](cmd).splitlines()
+ out = __salt__["cmd.run"](cmd).splitlines()
for line in out:
if not line:
continue
- if '.c' in line:
+ if ".c" in line:
ret.append(line.strip())
return ret
def directives():
- '''
+ """
Return list of directives together with expected arguments
and places where the directive is valid (``apachectl -L``)
@@ -164,22 +178,22 @@ def directives():
.. code-block:: bash
salt '*' apache.directives
- '''
- cmd = '{0} -L'.format(_detect_os())
+ """
+ cmd = "{0} -L".format(_detect_os())
ret = {}
- out = __salt__['cmd.run'](cmd)
- out = out.replace('\n\t', '\t')
+ out = __salt__["cmd.run"](cmd)
+ out = out.replace("\n\t", "\t")
for line in out.splitlines():
if not line:
continue
- comps = line.split('\t')
- desc = '\n'.join(comps[1:])
+ comps = line.split("\t")
+ desc = "\n".join(comps[1:])
ret[comps[0]] = desc
return ret
def vhosts():
- '''
+ """
Show the settings as parsed from the config file (currently
only shows the virtualhost settings) (``apachectl -S``).
Because each additional virtual host adds to the execution
@@ -191,41 +205,33 @@ def vhosts():
.. code-block:: bash
salt -t 10 '*' apache.vhosts
- '''
- cmd = '{0} -S'.format(_detect_os())
+ """
+ cmd = "{0} -S".format(_detect_os())
ret = {}
- namevhost = ''
- out = __salt__['cmd.run'](cmd)
+ namevhost = ""
+ out = __salt__["cmd.run"](cmd)
for line in out.splitlines():
if not line:
continue
comps = line.split()
- if 'is a NameVirtualHost' in line:
+ if "is a NameVirtualHost" in line:
namevhost = comps[0]
ret[namevhost] = {}
else:
- if comps[0] == 'default':
- ret[namevhost]['default'] = {}
- ret[namevhost]['default']['vhost'] = comps[2]
- ret[namevhost]['default']['conf'] = re.sub(
- r'\(|\)',
- '',
- comps[3]
- )
- if comps[0] == 'port':
+ if comps[0] == "default":
+ ret[namevhost]["default"] = {}
+ ret[namevhost]["default"]["vhost"] = comps[2]
+ ret[namevhost]["default"]["conf"] = re.sub(r"\(|\)", "", comps[3])
+ if comps[0] == "port":
ret[namevhost][comps[3]] = {}
- ret[namevhost][comps[3]]['vhost'] = comps[3]
- ret[namevhost][comps[3]]['conf'] = re.sub(
- r'\(|\)',
- '',
- comps[4]
- )
- ret[namevhost][comps[3]]['port'] = comps[1]
+ ret[namevhost][comps[3]]["vhost"] = comps[3]
+ ret[namevhost][comps[3]]["conf"] = re.sub(r"\(|\)", "", comps[4])
+ ret[namevhost][comps[3]]["port"] = comps[1]
return ret
def signal(signal=None):
- '''
+ """
Signals httpd to start, restart, or stop.
CLI Example:
@@ -233,36 +239,36 @@ def signal(signal=None):
.. code-block:: bash
salt '*' apache.signal restart
- '''
- no_extra_args = ('configtest', 'status', 'fullstatus')
- valid_signals = ('start', 'stop', 'restart', 'graceful', 'graceful-stop')
+ """
+ no_extra_args = ("configtest", "status", "fullstatus")
+ valid_signals = ("start", "stop", "restart", "graceful", "graceful-stop")
if signal not in valid_signals and signal not in no_extra_args:
return
# Make sure you use the right arguments
if signal in valid_signals:
- arguments = ' -k {0}'.format(signal)
+ arguments = " -k {0}".format(signal)
else:
- arguments = ' {0}'.format(signal)
+ arguments = " {0}".format(signal)
cmd = _detect_os() + arguments
- out = __salt__['cmd.run_all'](cmd)
+ out = __salt__["cmd.run_all"](cmd)
# A non-zero return code means fail
- if out['retcode'] and out['stderr']:
- ret = out['stderr'].strip()
+ if out["retcode"] and out["stderr"]:
+ ret = out["stderr"].strip()
# 'apachectl configtest' returns 'Syntax OK' to stderr
- elif out['stderr']:
- ret = out['stderr'].strip()
- elif out['stdout']:
- ret = out['stdout'].strip()
+ elif out["stderr"]:
+ ret = out["stderr"].strip()
+ elif out["stdout"]:
+ ret = out["stdout"].strip()
# No output for something like: apachectl graceful
else:
ret = 'Command: "{0}" completed successfully!'.format(cmd)
return ret
-def useradd(pwfile, user, password, opts=''):
- '''
+def useradd(pwfile, user, password, opts=""):
+ """
Add HTTP user using the ``htpasswd`` command. If the ``htpasswd`` file does not
exist, it will be created. Valid options that can be passed are:
@@ -280,12 +286,12 @@ def useradd(pwfile, user, password, opts=''):
salt '*' apache.useradd /etc/httpd/htpasswd larry badpassword
salt '*' apache.useradd /etc/httpd/htpasswd larry badpass opts=ns
- '''
- return __salt__['webutil.useradd'](pwfile, user, password, opts)
+ """
+ return __salt__["webutil.useradd"](pwfile, user, password, opts)
def userdel(pwfile, user):
- '''
+ """
Delete HTTP user from the specified ``htpasswd`` file.
CLI Example:
@@ -293,12 +299,12 @@ def userdel(pwfile, user):
.. code-block:: bash
salt '*' apache.userdel /etc/httpd/htpasswd larry
- '''
- return __salt__['webutil.userdel'](pwfile, user)
+ """
+ return __salt__["webutil.userdel"](pwfile, user)
-def server_status(profile='default'):
- '''
+def server_status(profile="default"):
+ """
Get Information from the Apache server-status handler
.. note::
@@ -327,43 +333,32 @@ def server_status(profile='default'):
salt '*' apache.server_status
salt '*' apache.server_status other-profile
- '''
+ """
ret = {
- 'Scoreboard': {
- '_': 0,
- 'S': 0,
- 'R': 0,
- 'W': 0,
- 'K': 0,
- 'D': 0,
- 'C': 0,
- 'L': 0,
- 'G': 0,
- 'I': 0,
- '.': 0,
+ "Scoreboard": {
+ "_": 0,
+ "S": 0,
+ "R": 0,
+ "W": 0,
+ "K": 0,
+ "D": 0,
+ "C": 0,
+ "L": 0,
+ "G": 0,
+ "I": 0,
+ ".": 0,
},
}
# Get configuration from pillar
- url = __salt__['config.get'](
- 'apache.server-status:{0}:url'.format(profile),
- 'http://localhost/server-status'
+ url = __salt__["config.get"](
+ "apache.server-status:{0}:url".format(profile), "http://localhost/server-status"
)
- user = __salt__['config.get'](
- 'apache.server-status:{0}:user'.format(profile),
- ''
- )
- passwd = __salt__['config.get'](
- 'apache.server-status:{0}:pass'.format(profile),
- ''
- )
- realm = __salt__['config.get'](
- 'apache.server-status:{0}:realm'.format(profile),
- ''
- )
- timeout = __salt__['config.get'](
- 'apache.server-status:{0}:timeout'.format(profile),
- 5
+ user = __salt__["config.get"]("apache.server-status:{0}:user".format(profile), "")
+ passwd = __salt__["config.get"]("apache.server-status:{0}:pass".format(profile), "")
+ realm = __salt__["config.get"]("apache.server-status:{0}:realm".format(profile), "")
+ timeout = __salt__["config.get"](
+ "apache.server-status:{0}:timeout".format(profile), 5
)
# create authentication handler if configuration exists
@@ -375,21 +370,21 @@ def server_status(profile='default'):
_install_opener(_build_opener(basic, digest))
# get http data
- url += '?auto'
+ url += "?auto"
try:
response = _urlopen(url, timeout=timeout).read().splitlines()
except URLError:
- return 'error'
+ return "error"
# parse the data
for line in response:
- splt = line.split(':', 1)
+ splt = line.split(":", 1)
splt[0] = splt[0].strip()
splt[1] = splt[1].strip()
- if splt[0] == 'Scoreboard':
+ if splt[0] == "Scoreboard":
for c in splt[1]:
- ret['Scoreboard'][c] += 1
+ ret["Scoreboard"][c] += 1
else:
if splt[1].isdigit():
ret[splt[0]] = int(splt[1])
@@ -401,56 +396,58 @@ def server_status(profile='default'):
def _parse_config(conf, slot=None):
- '''
+ """
Recursively goes through config structure and builds final Apache configuration
:param conf: defined config structure
:param slot: name of section container if needed
- '''
+ """
ret = cStringIO()
if isinstance(conf, six.string_types):
if slot:
- print('{0} {1}'.format(slot, conf), file=ret, end='')
+ print("{0} {1}".format(slot, conf), file=ret, end="")
else:
- print('{0}'.format(conf), file=ret, end='')
+ print("{0}".format(conf), file=ret, end="")
elif isinstance(conf, list):
is_section = False
for item in conf:
- if 'this' in item:
+ if "this" in item:
is_section = True
- slot_this = six.text_type(item['this'])
+ slot_this = six.text_type(item["this"])
if is_section:
- print('<{0} {1}>'.format(slot, slot_this), file=ret)
+ print("<{0} {1}>".format(slot, slot_this), file=ret)
for item in conf:
for key, val in item.items():
- if key != 'this':
+ if key != "this":
print(_parse_config(val, six.text_type(key)), file=ret)
- print('{0}>'.format(slot), file=ret)
+ print("{0}>".format(slot), file=ret)
else:
for value in conf:
print(_parse_config(value, six.text_type(slot)), file=ret)
elif isinstance(conf, dict):
try:
- print('<{0} {1}>'.format(slot, conf['this']), file=ret)
+ print("<{0} {1}>".format(slot, conf["this"]), file=ret)
except KeyError:
- raise SaltException('Apache section container "<{0}>" expects attribute. '
- 'Specify it using key "this".'.format(slot))
+ raise SaltException(
+ 'Apache section container "<{0}>" expects attribute. '
+ 'Specify it using key "this".'.format(slot)
+ )
for key, value in six.iteritems(conf):
- if key != 'this':
+ if key != "this":
if isinstance(value, six.string_types):
- print('{0} {1}'.format(key, value), file=ret)
+ print("{0} {1}".format(key, value), file=ret)
elif isinstance(value, list):
print(_parse_config(value, key), file=ret)
elif isinstance(value, dict):
print(_parse_config(value, key), file=ret)
- print('{0}>'.format(slot), file=ret)
+ print("{0}>".format(slot), file=ret)
ret.seek(0)
return ret.read()
def config(name, config, edit=True):
- '''
+ """
Create VirtualHost configuration files
name
@@ -468,7 +465,7 @@ def config(name, config, edit=True):
.. code-block:: bash
salt '*' apache.config /etc/httpd/conf.d/ports.conf config="[{'Listen': '22'}]"
- '''
+ """
configs = []
for entry in config:
@@ -476,9 +473,9 @@ def config(name, config, edit=True):
configs.append(_parse_config(entry[key], key))
# Python auto-correct line endings
- configstext = '\n'.join(salt.utils.data.decode(configs))
+ configstext = "\n".join(salt.utils.data.decode(configs))
if edit:
- with salt.utils.files.fopen(name, 'w') as configfile:
- configfile.write('# This file is managed by Salt.\n')
+ with salt.utils.files.fopen(name, "w") as configfile:
+ configfile.write("# This file is managed by Salt.\n")
configfile.write(salt.utils.stringutils.to_str(configstext))
return configstext
diff --git a/salt/modules/apcups.py b/salt/modules/apcups.py
index b6b972add20..2792604d895 100644
--- a/salt/modules/apcups.py
+++ b/salt/modules/apcups.py
@@ -1,46 +1,47 @@
# -*- coding: utf-8 -*-
-'''
+"""
Module for apcupsd
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
+import salt.utils.decorators as decorators
+
# Import Salt libs
import salt.utils.path
-import salt.utils.decorators as decorators
log = logging.getLogger(__name__)
# Define the module's virtual name
-__virtualname__ = 'apcups'
+__virtualname__ = "apcups"
@decorators.memoize
def _check_apcaccess():
- '''
+ """
Looks to see if apcaccess is present on the system
- '''
- return salt.utils.path.which('apcaccess')
+ """
+ return salt.utils.path.which("apcaccess")
def __virtual__():
- '''
+ """
Provides apcupsd only if apcaccess is present
- '''
+ """
if _check_apcaccess():
return __virtualname__
return (
False,
- '{0} module can only be loaded on when apcupsd is installed'.format(
+ "{0} module can only be loaded on when apcupsd is installed".format(
__virtualname__
- )
+ ),
)
def status():
- '''
+ """
Return apcaccess output
CLI Example:
@@ -48,24 +49,24 @@ def status():
.. code-block:: bash
salt '*' apcups.status
- '''
+ """
ret = {}
apcaccess = _check_apcaccess()
- res = __salt__['cmd.run_all'](apcaccess)
- retcode = res['retcode']
+ res = __salt__["cmd.run_all"](apcaccess)
+ retcode = res["retcode"]
if retcode != 0:
- ret['Error'] = 'Something with wrong executing apcaccess, is apcupsd running?'
+ ret["Error"] = "Something with wrong executing apcaccess, is apcupsd running?"
return ret
- for line in res['stdout'].splitlines():
- line = line.split(':')
+ for line in res["stdout"].splitlines():
+ line = line.split(":")
ret[line[0].strip()] = line[1].strip()
return ret
def status_load():
- '''
+ """
Return load
CLI Example:
@@ -73,18 +74,18 @@ def status_load():
.. code-block:: bash
salt '*' apcups.status_load
- '''
+ """
data = status()
- if 'LOADPCT' in data:
- load = data['LOADPCT'].split()
- if load[1].lower() == 'percent':
+ if "LOADPCT" in data:
+ load = data["LOADPCT"].split()
+ if load[1].lower() == "percent":
return float(load[0])
- return {'Error': 'Load not available.'}
+ return {"Error": "Load not available."}
def status_charge():
- '''
+ """
Return battery charge
CLI Example:
@@ -92,18 +93,18 @@ def status_charge():
.. code-block:: bash
salt '*' apcups.status_charge
- '''
+ """
data = status()
- if 'BCHARGE' in data:
- charge = data['BCHARGE'].split()
- if charge[1].lower() == 'percent':
+ if "BCHARGE" in data:
+ charge = data["BCHARGE"].split()
+ if charge[1].lower() == "percent":
return float(charge[0])
- return {'Error': 'Load not available.'}
+ return {"Error": "Load not available."}
def status_battery():
- '''
+ """
Return true if running on battery power
CLI Example:
@@ -111,12 +112,12 @@ def status_battery():
.. code-block:: bash
salt '*' apcups.status_battery
- '''
+ """
data = status()
- if 'TONBATT' in data:
- return not data['TONBATT'] == '0 Seconds'
+ if "TONBATT" in data:
+ return not data["TONBATT"] == "0 Seconds"
- return {'Error': 'Battery status not available.'}
+ return {"Error": "Battery status not available."}
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
diff --git a/salt/modules/apf.py b/salt/modules/apf.py
index 8fa574bece1..9a5c1e4707d 100644
--- a/salt/modules/apf.py
+++ b/salt/modules/apf.py
@@ -1,166 +1,166 @@
# -*- coding: utf-8 -*-
-'''
+"""
Support for Advanced Policy Firewall (APF)
==========================================
:maintainer: Mostafa Hussein
:maturity: new
:depends: python-iptables
:platform: Linux
-'''
+"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
-try:
- import iptc
- IPTC_IMPORTED = True
-except ImportError:
- IPTC_IMPORTED = False
-
# Import Salt Libs
import salt.utils.path
from salt.exceptions import CommandExecutionError
+try:
+ import iptc
+
+ IPTC_IMPORTED = True
+except ImportError:
+ IPTC_IMPORTED = False
+
def __virtual__():
- '''
+ """
Only load if apf exists on the system
- '''
- if salt.utils.path.which('apf') is None:
- return (False,
- 'The apf execution module cannot be loaded: apf unavailable.')
+ """
+ if salt.utils.path.which("apf") is None:
+ return (False, "The apf execution module cannot be loaded: apf unavailable.")
elif not IPTC_IMPORTED:
- return (False,
- 'The apf execution module cannot be loaded: python-iptables is missing.')
+ return (
+ False,
+ "The apf execution module cannot be loaded: python-iptables is missing.",
+ )
else:
return True
def __apf_cmd(cmd):
- '''
+ """
Return the apf location
- '''
- apf_cmd = '{0} {1}'.format(salt.utils.path.which('apf'), cmd)
- out = __salt__['cmd.run_all'](apf_cmd)
+ """
+ apf_cmd = "{0} {1}".format(salt.utils.path.which("apf"), cmd)
+ out = __salt__["cmd.run_all"](apf_cmd)
- if out['retcode'] != 0:
- if not out['stderr']:
- msg = out['stdout']
+ if out["retcode"] != 0:
+ if not out["stderr"]:
+ msg = out["stdout"]
else:
- msg = out['stderr']
- raise CommandExecutionError(
- 'apf failed: {0}'.format(msg)
- )
- return out['stdout']
+ msg = out["stderr"]
+ raise CommandExecutionError("apf failed: {0}".format(msg))
+ return out["stdout"]
def _status_apf():
- '''
+ """
Return True if apf is running otherwise return False
- '''
+ """
status = 0
table = iptc.Table(iptc.Table.FILTER)
for chain in table.chains:
- if 'sanity' in chain.name.lower():
+ if "sanity" in chain.name.lower():
status = 1
return True if status else False
def running():
- '''
+ """
Check apf status
CLI Example:
.. code-block:: bash
salt '*' apf.running
- '''
+ """
return True if _status_apf() else False
def disable():
- '''
+ """
Stop (flush) all firewall rules
CLI Example:
.. code-block:: bash
salt '*' apf.disable
- '''
+ """
if _status_apf():
- return __apf_cmd('-f')
+ return __apf_cmd("-f")
def enable():
- '''
+ """
Load all firewall rules
CLI Example:
.. code-block:: bash
salt '*' apf.enable
- '''
+ """
if not _status_apf():
- return __apf_cmd('-s')
+ return __apf_cmd("-s")
def reload():
- '''
+ """
Stop (flush) & reload firewall rules
CLI Example:
.. code-block:: bash
salt '*' apf.reload
- '''
+ """
if not _status_apf():
- return __apf_cmd('-r')
+ return __apf_cmd("-r")
def refresh():
- '''
+ """
Refresh & resolve dns names in trust rules
CLI Example:
.. code-block:: bash
salt '*' apf.refresh
- '''
- return __apf_cmd('-e')
+ """
+ return __apf_cmd("-e")
def allow(ip, port=None):
- '''
+ """
Add host (IP/FQDN) to allow_hosts.rules and immediately load new rule into firewall
CLI Example:
.. code-block:: bash
salt '*' apf.allow 127.0.0.1
- '''
+ """
if port is None:
- return __apf_cmd('-a {0}'.format(ip))
+ return __apf_cmd("-a {0}".format(ip))
def deny(ip):
- '''
+ """
Add host (IP/FQDN) to deny_hosts.rules and immediately load new rule into firewall
CLI Example:
.. code-block:: bash
salt '*' apf.deny 1.2.3.4
- '''
- return __apf_cmd('-d {0}'.format(ip))
+ """
+ return __apf_cmd("-d {0}".format(ip))
def remove(ip):
- '''
+ """
Remove host from [glob]*_hosts.rules and immediately remove rule from firewall
CLI Example:
.. code-block:: bash
salt '*' apf.remove 1.2.3.4
- '''
- return __apf_cmd('-u {0}'.format(ip))
+ """
+ return __apf_cmd("-u {0}".format(ip))
diff --git a/salt/modules/apkpkg.py b/salt/modules/apkpkg.py
index 2e9a2a952e7..42720674e6b 100644
--- a/salt/modules/apkpkg.py
+++ b/salt/modules/apkpkg.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Support for apk
.. important::
@@ -10,7 +10,7 @@ Support for apk
.. versionadded: 2017.7.0
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
@@ -20,55 +20,55 @@ import logging
# Import salt libs
import salt.utils.data
import salt.utils.itertools
-
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
# Define the module's virtual name
-__virtualname__ = 'pkg'
+__virtualname__ = "pkg"
def __virtual__():
- '''
+ """
Confirm this module is running on an Alpine Linux distribution
- '''
- if __grains__.get('os_family', False) == 'Alpine':
+ """
+ if __grains__.get("os_family", False) == "Alpine":
return __virtualname__
return (False, "Module apk only works on Alpine Linux based systems")
-#def autoremove(list_only=False, purge=False):
+
+# def autoremove(list_only=False, purge=False):
# return 'Not available'
-#def hold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
+# def hold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
# return 'Not available'
-#def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
+# def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
# return 'Not available'
-#def upgrade_available(name):
+# def upgrade_available(name):
# return 'Not available'
-#def version_cmp(pkg1, pkg2, ignore_epoch=False):
+# def version_cmp(pkg1, pkg2, ignore_epoch=False):
# return 'Not available'
-#def list_repos():
+# def list_repos():
# return 'Not available'
-#def get_repo(repo, **kwargs):
+# def get_repo(repo, **kwargs):
# return 'Not available'
-#def del_repo(repo, **kwargs):
+# def del_repo(repo, **kwargs):
# return 'Not available'
-#def del_repo_key(name=None, **kwargs):
+# def del_repo_key(name=None, **kwargs):
# return 'Not available'
-#def mod_repo(repo, saltenv='base', **kwargs):
+# def mod_repo(repo, saltenv='base', **kwargs):
# return 'Not available'
-#def expand_repo_def(**kwargs):
+# def expand_repo_def(**kwargs):
# return 'Not available'
-#def get_selections(pattern=None, state=None):
+# def get_selections(pattern=None, state=None):
# return 'Not available'
-#def set_selections(path=None, selection=None, clear=False, saltenv='base'):
+# def set_selections(path=None, selection=None, clear=False, saltenv='base'):
# return 'Not available'
-#def info_installed(*names):
+# def info_installed(*names):
# return 'Not available'
def version(*names, **kwargs):
- '''
+ """
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
@@ -79,12 +79,12 @@ def version(*names, **kwargs):
salt '*' pkg.version
salt '*' pkg.version ...
- '''
- return __salt__['pkg_resource.version'](*names, **kwargs)
+ """
+ return __salt__["pkg_resource.version"](*names, **kwargs)
def refresh_db():
- '''
+ """
Updates the package list
- ``True``: Database updated successfully
@@ -95,30 +95,28 @@ def refresh_db():
.. code-block:: bash
salt '*' pkg.refresh_db
- '''
+ """
ret = {}
- cmd = ['apk', 'update']
- call = __salt__['cmd.run_all'](cmd,
- output_loglevel='trace',
- python_shell=False)
- if call['retcode'] == 0:
+ cmd = ["apk", "update"]
+ call = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
+ if call["retcode"] == 0:
errors = []
ret = True
else:
- errors = [call['stdout']]
+ errors = [call["stdout"]]
ret = False
if errors:
raise CommandExecutionError(
- 'Problem encountered installing package(s)',
- info={'errors': errors, 'changes': ret}
+ "Problem encountered installing package(s)",
+ info={"errors": errors, "changes": ret},
)
return ret
def list_pkgs(versions_as_list=False, **kwargs):
- '''
+ """
List the packages currently installed in a dict::
{'': ''}
@@ -129,38 +127,39 @@ def list_pkgs(versions_as_list=False, **kwargs):
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs versions_as_list=True
- '''
+ """
versions_as_list = salt.utils.data.is_true(versions_as_list)
# not yet implemented or not applicable
- if any([salt.utils.data.is_true(kwargs.get(x))
- for x in ('removed', 'purge_desired')]):
+ if any(
+ [salt.utils.data.is_true(kwargs.get(x)) for x in ("removed", "purge_desired")]
+ ):
return {}
- if 'pkg.list_pkgs' in __context__:
+ if "pkg.list_pkgs" in __context__:
if versions_as_list:
- return __context__['pkg.list_pkgs']
+ return __context__["pkg.list_pkgs"]
else:
- ret = copy.deepcopy(__context__['pkg.list_pkgs'])
- __salt__['pkg_resource.stringify'](ret)
+ ret = copy.deepcopy(__context__["pkg.list_pkgs"])
+ __salt__["pkg_resource.stringify"](ret)
return ret
- cmd = ['apk', 'info', '-v']
+ cmd = ["apk", "info", "-v"]
ret = {}
- out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False)
- for line in salt.utils.itertools.split(out, '\n'):
- pkg_version = '-'.join(line.split('-')[-2:])
- pkg_name = '-'.join(line.split('-')[:-2])
- __salt__['pkg_resource.add_pkg'](ret, pkg_name, pkg_version)
+ out = __salt__["cmd.run"](cmd, output_loglevel="trace", python_shell=False)
+ for line in salt.utils.itertools.split(out, "\n"):
+ pkg_version = "-".join(line.split("-")[-2:])
+ pkg_name = "-".join(line.split("-")[:-2])
+ __salt__["pkg_resource.add_pkg"](ret, pkg_name, pkg_version)
- __salt__['pkg_resource.sort_pkglist'](ret)
- __context__['pkg.list_pkgs'] = copy.deepcopy(ret)
+ __salt__["pkg_resource.sort_pkglist"](ret)
+ __context__["pkg.list_pkgs"] = copy.deepcopy(ret)
if not versions_as_list:
- __salt__['pkg_resource.stringify'](ret)
+ __salt__["pkg_resource.stringify"](ret)
return ret
def latest_version(*names, **kwargs):
- '''
+ """
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
@@ -175,15 +174,15 @@ def latest_version(*names, **kwargs):
salt '*' pkg.latest_version
salt '*' pkg.latest_version
salt '*' pkg.latest_version ...
- '''
- refresh = salt.utils.data.is_true(kwargs.pop('refresh', True))
+ """
+ refresh = salt.utils.data.is_true(kwargs.pop("refresh", True))
if len(names) == 0:
- return ''
+ return ""
ret = {}
for name in names:
- ret[name] = ''
+ ret[name] = ""
pkgs = list_pkgs()
# Refresh before looking for the latest version available
@@ -191,15 +190,13 @@ def latest_version(*names, **kwargs):
refresh_db()
# Upgrade check
- cmd = ['apk', 'upgrade', '-s']
- out = __salt__['cmd.run_stdout'](cmd,
- output_loglevel='trace',
- python_shell=False)
- for line in salt.utils.itertools.split(out, '\n'):
+ cmd = ["apk", "upgrade", "-s"]
+ out = __salt__["cmd.run_stdout"](cmd, output_loglevel="trace", python_shell=False)
+ for line in salt.utils.itertools.split(out, "\n"):
try:
- name = line.split(' ')[2]
- _oldversion = line.split(' ')[3].strip('(')
- newversion = line.split(' ')[5].strip(')')
+ name = line.split(" ")[2]
+ _oldversion = line.split(" ")[3].strip("(")
+ newversion = line.split(" ")[5].strip(")")
if name in names:
ret[name] = newversion
except (ValueError, IndexError):
@@ -209,17 +206,17 @@ def latest_version(*names, **kwargs):
for pkg in ret:
if not ret[pkg]:
installed = pkgs.get(pkg)
- cmd = ['apk', 'search', pkg]
- out = __salt__['cmd.run_stdout'](cmd,
- output_loglevel='trace',
- python_shell=False)
- for line in salt.utils.itertools.split(out, '\n'):
+ cmd = ["apk", "search", pkg]
+ out = __salt__["cmd.run_stdout"](
+ cmd, output_loglevel="trace", python_shell=False
+ )
+ for line in salt.utils.itertools.split(out, "\n"):
try:
- pkg_version = '-'.join(line.split('-')[-2:])
- pkg_name = '-'.join(line.split('-')[:-2])
+ pkg_version = "-".join(line.split("-")[-2:])
+ pkg_name = "-".join(line.split("-")[:-2])
if pkg == pkg_name:
if installed == pkg_version:
- ret[pkg] = ''
+ ret[pkg] = ""
else:
ret[pkg] = pkg_version
except ValueError:
@@ -232,12 +229,8 @@ def latest_version(*names, **kwargs):
# TODO: Support specific version installation
-def install(name=None,
- refresh=False,
- pkgs=None,
- sources=None,
- **kwargs):
- '''
+def install(name=None, refresh=False, pkgs=None, sources=None, **kwargs):
+ """
Install the passed package, add refresh=True to update the apk database.
name
@@ -288,25 +281,22 @@ def install(name=None,
{'': {'old': '',
'new': ''}}
- '''
+ """
refreshdb = salt.utils.data.is_true(refresh)
pkg_to_install = []
old = list_pkgs()
if name and not (pkgs or sources):
- if ',' in name:
- pkg_to_install = name.split(',')
+ if "," in name:
+ pkg_to_install = name.split(",")
else:
pkg_to_install = [name]
if pkgs:
# We don't support installing specific version for now
# so transform the dict in list ignoring version provided
- pkgs = [
- next(iter(p)) for p in pkgs
- if isinstance(p, dict)
- ]
+ pkgs = [next(iter(p)) for p in pkgs if isinstance(p, dict)]
pkg_to_install.extend(pkgs)
if not pkg_to_install:
@@ -315,49 +305,47 @@ def install(name=None,
if refreshdb:
refresh_db()
- cmd = ['apk', 'add']
+ cmd = ["apk", "add"]
# Switch in update mode if a package is already installed
for _pkg in pkg_to_install:
if old.get(_pkg):
- cmd.append('-u')
+ cmd.append("-u")
break
cmd.extend(pkg_to_install)
- out = __salt__['cmd.run_all'](
- cmd,
- output_loglevel='trace',
- python_shell=False
- )
+ out = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
- if out['retcode'] != 0 and out['stderr']:
- errors = [out['stderr']]
+ if out["retcode"] != 0 and out["stderr"]:
+ errors = [out["stderr"]]
else:
errors = []
- __context__.pop('pkg.list_pkgs', None)
+ __context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
- 'Problem encountered installing package(s)',
- info={'errors': errors, 'changes': ret}
+ "Problem encountered installing package(s)",
+ info={"errors": errors, "changes": ret},
)
return ret
def purge(name=None, pkgs=None, **kwargs):
- '''
+ """
Alias to remove
- '''
+ """
return remove(name=name, pkgs=pkgs, purge=True)
-def remove(name=None, pkgs=None, purge=False, **kwargs): # pylint: disable=unused-argument
- '''
+def remove(
+ name=None, pkgs=None, purge=False, **kwargs
+): # pylint: disable=unused-argument
+ """
Remove packages using ``apk del``.
name
@@ -379,13 +367,13 @@ def remove(name=None, pkgs=None, purge=False, **kwargs): # pylint: disable=unus
salt '*' pkg.remove
salt '*' pkg.remove ,,
salt '*' pkg.remove pkgs='["foo", "bar"]'
- '''
+ """
old = list_pkgs()
pkg_to_remove = []
if name:
- if ',' in name:
- pkg_to_remove = name.split(',')
+ if "," in name:
+ pkg_to_remove = name.split(",")
else:
pkg_to_remove = [name]
@@ -396,37 +384,33 @@ def remove(name=None, pkgs=None, purge=False, **kwargs): # pylint: disable=unus
return {}
if purge:
- cmd = ['apk', 'del', '--purge']
+ cmd = ["apk", "del", "--purge"]
else:
- cmd = ['apk', 'del']
+ cmd = ["apk", "del"]
cmd.extend(pkg_to_remove)
- out = __salt__['cmd.run_all'](
- cmd,
- output_loglevel='trace',
- python_shell=False
- )
- if out['retcode'] != 0 and out['stderr']:
- errors = [out['stderr']]
+ out = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
+ if out["retcode"] != 0 and out["stderr"]:
+ errors = [out["stderr"]]
else:
errors = []
- __context__.pop('pkg.list_pkgs', None)
+ __context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
- 'Problem encountered removing package(s)',
- info={'errors': errors, 'changes': ret}
+ "Problem encountered removing package(s)",
+ info={"errors": errors, "changes": ret},
)
return ret
def upgrade(name=None, pkgs=None, refresh=True):
- '''
+ """
Upgrades all packages via ``apk upgrade`` or a specific package if name or
pkgs is specified. Name is ignored if pkgs is specified
@@ -440,11 +424,12 @@ def upgrade(name=None, pkgs=None, refresh=True):
.. code-block:: bash
salt '*' pkg.upgrade
- '''
- ret = {'changes': {},
- 'result': True,
- 'comment': '',
- }
+ """
+ ret = {
+ "changes": {},
+ "result": True,
+ "comment": "",
+ }
if salt.utils.data.is_true(refresh):
refresh_db()
@@ -454,8 +439,8 @@ def upgrade(name=None, pkgs=None, refresh=True):
pkg_to_upgrade = []
if name and not pkgs:
- if ',' in name:
- pkg_to_upgrade = name.split(',')
+ if "," in name:
+ pkg_to_upgrade = name.split(",")
else:
pkg_to_upgrade = [name]
@@ -463,30 +448,29 @@ def upgrade(name=None, pkgs=None, refresh=True):
pkg_to_upgrade.extend(pkgs)
if pkg_to_upgrade:
- cmd = ['apk', 'add', '-u']
+ cmd = ["apk", "add", "-u"]
cmd.extend(pkg_to_upgrade)
else:
- cmd = ['apk', 'upgrade']
+ cmd = ["apk", "upgrade"]
- call = __salt__['cmd.run_all'](cmd,
- output_loglevel='trace',
- python_shell=False,
- redirect_stderr=True)
+ call = __salt__["cmd.run_all"](
+ cmd, output_loglevel="trace", python_shell=False, redirect_stderr=True
+ )
- if call['retcode'] != 0:
- ret['result'] = False
- if call['stdout']:
- ret['comment'] = call['stdout']
+ if call["retcode"] != 0:
+ ret["result"] = False
+ if call["stdout"]:
+ ret["comment"] = call["stdout"]
- __context__.pop('pkg.list_pkgs', None)
+ __context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
- ret['changes'] = salt.utils.data.compare_dicts(old, new)
+ ret["changes"] = salt.utils.data.compare_dicts(old, new)
return ret
def list_upgrades(refresh=True):
- '''
+ """
List all available package upgrades.
CLI Example:
@@ -494,38 +478,36 @@ def list_upgrades(refresh=True):
.. code-block:: bash
salt '*' pkg.list_upgrades
- '''
+ """
ret = {}
if salt.utils.data.is_true(refresh):
refresh_db()
- cmd = ['apk', 'upgrade', '-s']
- call = __salt__['cmd.run_all'](cmd,
- output_loglevel='trace',
- python_shell=False)
+ cmd = ["apk", "upgrade", "-s"]
+ call = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
- if call['retcode'] != 0:
- comment = ''
- if 'stderr' in call:
- comment += call['stderr']
- if 'stdout' in call:
- comment += call['stdout']
+ if call["retcode"] != 0:
+ comment = ""
+ if "stderr" in call:
+ comment += call["stderr"]
+ if "stdout" in call:
+ comment += call["stdout"]
raise CommandExecutionError(comment)
else:
- out = call['stdout']
+ out = call["stdout"]
for line in out.splitlines():
- if 'Upgrading' in line:
- name = line.split(' ')[2]
- _oldversion = line.split(' ')[3].strip('(')
- newversion = line.split(' ')[5].strip(')')
+ if "Upgrading" in line:
+ name = line.split(" ")[2]
+ _oldversion = line.split(" ")[3].strip("(")
+ newversion = line.split(" ")[5].strip(")")
ret[name] = newversion
return ret
def file_list(*packages):
- '''
+ """
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's package database (not
generally recommended).
@@ -537,12 +519,12 @@ def file_list(*packages):
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
- '''
+ """
return file_dict(*packages)
def file_dict(*packages):
- '''
+ """
List the files that belong to a package, grouped by package. Not
specifying any packages will return a list of _every_ file on the system's
package database (not generally recommended).
@@ -554,34 +536,32 @@ def file_dict(*packages):
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
- '''
+ """
errors = []
ret = {}
- cmd_files = ['apk', 'info', '-L']
+ cmd_files = ["apk", "info", "-L"]
if not packages:
- return 'Package name should be provided'
+ return "Package name should be provided"
for package in packages:
files = []
cmd = cmd_files[:]
cmd.append(package)
- out = __salt__['cmd.run_all'](cmd,
- output_loglevel='trace',
- python_shell=False)
- for line in out['stdout'].splitlines():
- if line.endswith('contains:'):
+ out = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
+ for line in out["stdout"].splitlines():
+ if line.endswith("contains:"):
continue
else:
files.append(line)
if files:
ret[package] = files
- return {'errors': errors, 'packages': ret}
+ return {"errors": errors, "packages": ret}
def owner(*paths):
- '''
+ """
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version (file|key)s)[\w\s]+:$'
- list_pattern = r'^\s+-\s+(?P.*)$'
+ type_pattern = r"^List\s+[\w\s]+(?P(file|key)s)[\w\s]+:$"
+ list_pattern = r"^\s+-\s+(?P.*)$"
current_block = None
for line in cmd_ret.splitlines():
if current_block:
match = re.search(list_pattern, line)
if match:
- package_type = 'deleted_{}'.format(current_block)
- ret[package_type].append(match.group('package'))
+ package_type = "deleted_{}".format(current_block)
+ ret[package_type].append(match.group("package"))
else:
current_block = None
# Intentionally not using an else here, in case of a situation where
@@ -500,8 +548,8 @@ def cleanup_db(config_path=_DEFAULT_CONFIG_PATH, dry_run=False):
if not current_block:
match = re.search(type_pattern, line)
if match:
- current_block = match.group('package_type')
+ current_block = match.group("package_type")
- log.debug('Package keys identified for deletion: %s', len(ret['deleted_keys']))
- log.debug('Package files identified for deletion: %s', len(ret['deleted_files']))
+ log.debug("Package keys identified for deletion: %s", len(ret["deleted_keys"]))
+ log.debug("Package files identified for deletion: %s", len(ret["deleted_files"]))
return ret
diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
index b5503f0b10d..7cf4130cce4 100644
--- a/salt/modules/aptpkg.py
+++ b/salt/modules/aptpkg.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Support for APT (Advanced Packaging Tool)
.. important::
@@ -9,32 +9,24 @@ Support for APT (Advanced Packaging Tool)
`.
For repository management, the ``python-apt`` package must be installed.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import copy
+import datetime
+import fnmatch
+import logging
import os
import re
-import logging
import time
-import fnmatch
-import datetime
-
-
-# Import third party libs
-# pylint: disable=no-name-in-module,import-error,redefined-builtin
-from salt.ext import six
-from salt.ext.six.moves.urllib.error import HTTPError
-from salt.ext.six.moves.urllib.request import Request as _Request, urlopen as _urlopen
-# pylint: enable=no-name-in-module,import-error,redefined-builtin
# Import salt libs
import salt.config
import salt.syspaths
-from salt.modules.cmdmod import _parse_env
import salt.utils.args
import salt.utils.data
+import salt.utils.environment
import salt.utils.files
import salt.utils.functools
import salt.utils.itertools
@@ -46,10 +38,18 @@ import salt.utils.stringutils
import salt.utils.systemd
import salt.utils.versions
import salt.utils.yaml
-import salt.utils.environment
-from salt.exceptions import (
- CommandExecutionError, MinionError, SaltInvocationError
-)
+from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError
+
+# Import third party libs
+# pylint: disable=no-name-in-module,import-error,redefined-builtin
+from salt.ext import six
+from salt.ext.six.moves.urllib.error import HTTPError
+from salt.ext.six.moves.urllib.request import Request as _Request
+from salt.ext.six.moves.urllib.request import urlopen as _urlopen
+from salt.modules.cmdmod import _parse_env
+
+# pylint: enable=no-name-in-module,import-error,redefined-builtin
+
log = logging.getLogger(__name__)
@@ -58,38 +58,41 @@ try:
import apt.cache
import apt.debfile
from aptsources import sourceslist
+
HAS_APT = True
except ImportError:
HAS_APT = False
try:
import apt_pkg
+
HAS_APTPKG = True
except ImportError:
HAS_APTPKG = False
try:
import softwareproperties.ppa
+
HAS_SOFTWAREPROPERTIES = True
except ImportError:
HAS_SOFTWAREPROPERTIES = False
# pylint: enable=import-error
APT_LISTS_PATH = "/var/lib/apt/lists"
-PKG_ARCH_SEPARATOR = ':'
+PKG_ARCH_SEPARATOR = ":"
# Source format for urllib fallback on PPA handling
-LP_SRC_FORMAT = 'deb http://ppa.launchpad.net/{0}/{1}/ubuntu {2} main'
-LP_PVT_SRC_FORMAT = 'deb https://{0}private-ppa.launchpad.net/{1}/{2}/ubuntu' \
- ' {3} main'
+LP_SRC_FORMAT = "deb http://ppa.launchpad.net/{0}/{1}/ubuntu {2} main"
+LP_PVT_SRC_FORMAT = (
+ "deb https://{0}private-ppa.launchpad.net/{1}/{2}/ubuntu" " {3} main"
+)
-_MODIFY_OK = frozenset(['uri', 'comps', 'architectures', 'disabled',
- 'file', 'dist'])
+_MODIFY_OK = frozenset(["uri", "comps", "architectures", "disabled", "file", "dist"])
DPKG_ENV_VARS = {
- 'APT_LISTBUGS_FRONTEND': 'none',
- 'APT_LISTCHANGES_FRONTEND': 'none',
- 'DEBIAN_FRONTEND': 'noninteractive',
- 'UCF_FORCE_CONFFOLD': '1',
+ "APT_LISTBUGS_FRONTEND": "none",
+ "APT_LISTCHANGES_FRONTEND": "none",
+ "DEBIAN_FRONTEND": "noninteractive",
+ "UCF_FORCE_CONFFOLD": "1",
}
if six.PY2:
# Ensure no unicode in env vars on PY2, as it causes problems with
@@ -97,37 +100,37 @@ if six.PY2:
DPKG_ENV_VARS = salt.utils.data.encode(DPKG_ENV_VARS)
# Define the module's virtual name
-__virtualname__ = 'pkg'
+__virtualname__ = "pkg"
def __virtual__():
- '''
+ """
Confirm this module is on a Debian-based system
- '''
+ """
# If your minion is running an OS which is Debian-based but does not have
# an "os_family" grain of Debian, then the proper fix is NOT to check for
# the minion's "os_family" grain here in the __virtual__. The correct fix
# is to add the value from the minion's "os" grain to the _OS_FAMILY_MAP
# dict in salt/grains/core.py, so that we assign the correct "os_family"
# grain to the minion.
- if __grains__.get('os_family') == 'Debian':
+ if __grains__.get("os_family") == "Debian":
return __virtualname__
- return False, 'The pkg module could not be loaded: unsupported OS family'
+ return False, "The pkg module could not be loaded: unsupported OS family"
def __init__(opts):
- '''
+ """
For Debian and derivative systems, set up
a few env variables to keep apt happy and
non-interactive.
- '''
+ """
if __virtual__() == __virtualname__:
# Export these puppies so they persist
os.environ.update(DPKG_ENV_VARS)
def _get_ppa_info_from_launchpad(owner_name, ppa_name):
- '''
+ """
Idea from softwareproperties.ppa.
Uses urllib2 which sacrifices server cert verification.
@@ -136,61 +139,70 @@ def _get_ppa_info_from_launchpad(owner_name, ppa_name):
:param owner_name:
:param ppa_name:
:return:
- '''
+ """
- lp_url = 'https://launchpad.net/api/1.0/~{0}/+archive/{1}'.format(
- owner_name, ppa_name)
- request = _Request(lp_url, headers={'Accept': 'application/json'})
+ lp_url = "https://launchpad.net/api/1.0/~{0}/+archive/{1}".format(
+ owner_name, ppa_name
+ )
+ request = _Request(lp_url, headers={"Accept": "application/json"})
lp_page = _urlopen(request)
return salt.utils.json.load(lp_page)
def _reconstruct_ppa_name(owner_name, ppa_name):
- '''
+ """
Stringify PPA name from args.
- '''
- return 'ppa:{0}/{1}'.format(owner_name, ppa_name)
+ """
+ return "ppa:{0}/{1}".format(owner_name, ppa_name)
def _check_apt():
- '''
+ """
Abort if python-apt is not installed
- '''
+ """
if not HAS_APT:
- raise CommandExecutionError(
- 'Error: \'python-apt\' package not installed'
- )
+ raise CommandExecutionError("Error: 'python-apt' package not installed")
def _call_apt(args, scope=True, **kwargs):
- '''
+ """
Call apt* utilities.
- '''
+ """
cmd = []
- if scope and salt.utils.systemd.has_scope(__context__) and __salt__['config.get']('systemd.scope', True):
- cmd.extend(['systemd-run', '--scope'])
+ if (
+ scope
+ and salt.utils.systemd.has_scope(__context__)
+ and __salt__["config.get"]("systemd.scope", True)
+ ):
+ cmd.extend(
+ ["systemd-run", "--scope", "--description", '"{0}"'.format(__name__)]
+ )
cmd.extend(args)
- params = {'output_loglevel': 'trace',
- 'python_shell': False,
- 'env': salt.utils.environment.get_module_environment(globals())}
+ params = {
+ "output_loglevel": "trace",
+ "python_shell": False,
+ "env": salt.utils.environment.get_module_environment(globals()),
+ }
params.update(kwargs)
- return __salt__['cmd.run_all'](cmd, **params)
+ return __salt__["cmd.run_all"](cmd, **params)
def _warn_software_properties(repo):
- '''
+ """
Warn of missing python-software-properties package.
- '''
- log.warning('The \'python-software-properties\' package is not installed. '
- 'For more accurate support of PPA repositories, you should '
- 'install this package.')
- log.warning('Best guess at ppa format: %s', repo)
+ """
+ log.warning(
+ "The 'python-software-properties' package is not installed. "
+ "For more accurate support of PPA repositories, you should "
+ "install this package."
+ )
+ log.warning("Best guess at ppa format: %s", repo)
def normalize_name(name):
- '''
+ """
Strips the architecture from the specified package name, if necessary.
CLI Example:
@@ -198,7 +210,7 @@ def normalize_name(name):
.. code-block:: bash
salt '*' pkg.normalize_name zsh:amd64
- '''
+ """
try:
name, arch = name.rsplit(PKG_ARCH_SEPARATOR, 1)
except ValueError:
@@ -207,7 +219,7 @@ def normalize_name(name):
def parse_arch(name):
- '''
+ """
Parse name and architecture from the specified package name.
CLI Example:
@@ -215,19 +227,16 @@ def parse_arch(name):
.. code-block:: bash
salt '*' pkg.parse_arch zsh:amd64
- '''
+ """
try:
_name, _arch = name.rsplit(PKG_ARCH_SEPARATOR, 1)
except ValueError:
_name, _arch = name, None
- return {
- 'name': _name,
- 'arch': _arch
- }
+ return {"name": _name, "arch": _arch}
def latest_version(*names, **kwargs):
- '''
+ """
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
@@ -251,44 +260,43 @@ def latest_version(*names, **kwargs):
salt '*' pkg.latest_version
salt '*' pkg.latest_version fromrepo=unstable
salt '*' pkg.latest_version ...
- '''
- refresh = salt.utils.data.is_true(kwargs.pop('refresh', True))
- show_installed = salt.utils.data.is_true(kwargs.pop('show_installed', False))
- if 'repo' in kwargs:
+ """
+ refresh = salt.utils.data.is_true(kwargs.pop("refresh", True))
+ show_installed = salt.utils.data.is_true(kwargs.pop("show_installed", False))
+ if "repo" in kwargs:
raise SaltInvocationError(
- 'The \'repo\' argument is invalid, use \'fromrepo\' instead'
+ "The 'repo' argument is invalid, use 'fromrepo' instead"
)
- fromrepo = kwargs.pop('fromrepo', None)
- cache_valid_time = kwargs.pop('cache_valid_time', 0)
+ fromrepo = kwargs.pop("fromrepo", None)
+ cache_valid_time = kwargs.pop("cache_valid_time", 0)
if len(names) == 0:
- return ''
+ return ""
ret = {}
# Initialize the dict with empty strings
for name in names:
- ret[name] = ''
+ ret[name] = ""
pkgs = list_pkgs(versions_as_list=True)
- repo = ['-o', 'APT::Default-Release={0}'.format(fromrepo)] \
- if fromrepo else None
+ repo = ["-o", "APT::Default-Release={0}".format(fromrepo)] if fromrepo else None
# Refresh before looking for the latest version available
if refresh:
refresh_db(cache_valid_time)
for name in names:
- cmd = ['apt-cache', '-q', 'policy', name]
+ cmd = ["apt-cache", "-q", "policy", name]
if repo is not None:
cmd.extend(repo)
out = _call_apt(cmd, scope=False)
- candidate = ''
- for line in salt.utils.itertools.split(out['stdout'], '\n'):
- if 'Candidate' in line:
+ candidate = ""
+ for line in salt.utils.itertools.split(out["stdout"], "\n"):
+ if "Candidate" in line:
comps = line.split()
if len(comps) >= 2:
candidate = comps[-1]
- if candidate.lower() == '(none)':
- candidate = ''
+ if candidate.lower() == "(none)":
+ candidate = ""
break
installed = pkgs.get(name, [])
@@ -301,11 +309,12 @@ def latest_version(*names, **kwargs):
# to the install candidate, then the candidate is an upgrade, so
# add it to the return dict
if not any(
- (salt.utils.versions.compare(ver1=x,
- oper='>=',
- ver2=candidate,
- cmp_func=version_cmp)
- for x in installed)
+ (
+ salt.utils.versions.compare(
+ ver1=x, oper=">=", ver2=candidate, cmp_func=version_cmp
+ )
+ for x in installed
+ )
):
ret[name] = candidate
@@ -316,11 +325,13 @@ def latest_version(*names, **kwargs):
# available_version is being deprecated
-available_version = salt.utils.functools.alias_function(latest_version, 'available_version')
+available_version = salt.utils.functools.alias_function(
+ latest_version, "available_version"
+)
def version(*names, **kwargs):
- '''
+ """
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
@@ -331,12 +342,12 @@ def version(*names, **kwargs):
salt '*' pkg.version
salt '*' pkg.version ...
- '''
- return __salt__['pkg_resource.version'](*names, **kwargs)
+ """
+ return __salt__["pkg_resource.version"](*names, **kwargs)
def refresh_db(cache_valid_time=0, failhard=False):
- '''
+ """
Updates the APT database to latest packages based upon repositories
Returns a dict, with the keys being package databases and the values being
@@ -365,7 +376,7 @@ def refresh_db(cache_valid_time=0, failhard=False):
.. code-block:: bash
salt '*' pkg.refresh_db
- '''
+ """
# Remove rtag file to keep multiple refreshes from happening in pkg states
salt.utils.pkg.clear_rtag(__opts__)
failhard = salt.utils.data.is_true(failhard)
@@ -376,59 +387,70 @@ def refresh_db(cache_valid_time=0, failhard=False):
try:
latest_update = os.stat(APT_LISTS_PATH).st_mtime
now = time.time()
- log.debug("now: %s, last update time: %s, expire after: %s seconds", now, latest_update, cache_valid_time)
+ log.debug(
+ "now: %s, last update time: %s, expire after: %s seconds",
+ now,
+ latest_update,
+ cache_valid_time,
+ )
if latest_update + cache_valid_time > now:
return ret
except TypeError as exp:
- log.warning("expected integer for cache_valid_time parameter, failed with: %s", exp)
+ log.warning(
+ "expected integer for cache_valid_time parameter, failed with: %s", exp
+ )
except IOError as exp:
log.warning("could not stat cache directory due to: %s", exp)
- call = _call_apt(['apt-get', '-q', 'update'], scope=False)
- if call['retcode'] != 0:
- comment = ''
- if 'stderr' in call:
- comment += call['stderr']
+ call = _call_apt(["apt-get", "-q", "update"], scope=False)
+ if call["retcode"] != 0:
+ comment = ""
+ if "stderr" in call:
+ comment += call["stderr"]
raise CommandExecutionError(comment)
else:
- out = call['stdout']
+ out = call["stdout"]
for line in out.splitlines():
cols = line.split()
if not cols:
continue
- ident = ' '.join(cols[1:])
- if 'Get' in cols[0]:
+ ident = " ".join(cols[1:])
+ if "Get" in cols[0]:
# Strip filesize from end of line
- ident = re.sub(r' \[.+B\]$', '', ident)
+ ident = re.sub(r" \[.+B\]$", "", ident)
ret[ident] = True
- elif 'Ign' in cols[0]:
+ elif "Ign" in cols[0]:
ret[ident] = False
- elif 'Hit' in cols[0]:
+ elif "Hit" in cols[0]:
ret[ident] = None
- elif 'Err' in cols[0]:
+ elif "Err" in cols[0]:
ret[ident] = False
error_repos.append(ident)
if failhard and error_repos:
- raise CommandExecutionError('Error getting repos: {0}'.format(', '.join(error_repos)))
+ raise CommandExecutionError(
+ "Error getting repos: {0}".format(", ".join(error_repos))
+ )
return ret
-def install(name=None,
- refresh=False,
- fromrepo=None,
- skip_verify=False,
- debconf=None,
- pkgs=None,
- sources=None,
- reinstall=False,
- downloadonly=False,
- ignore_epoch=False,
- **kwargs):
- '''
+def install(
+ name=None,
+ refresh=False,
+ fromrepo=None,
+ skip_verify=False,
+ debconf=None,
+ pkgs=None,
+ sources=None,
+ reinstall=False,
+ downloadonly=False,
+ ignore_epoch=False,
+ **kwargs
+):
+ """
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
@@ -567,16 +589,14 @@ def install(name=None,
{'': {'old': '',
'new': ''}}
- '''
+ """
_refresh_db = False
if salt.utils.data.is_true(refresh):
_refresh_db = True
- if 'version' in kwargs and kwargs['version']:
+ if "version" in kwargs and kwargs["version"]:
_refresh_db = False
- _latest_version = latest_version(name,
- refresh=False,
- show_installed=True)
- _version = kwargs.get('version')
+ _latest_version = latest_version(name, refresh=False, show_installed=True)
+ _version = kwargs.get("version")
# If the versions don't match, refresh is True, otherwise no need
# to refresh
if not _latest_version == _version:
@@ -587,9 +607,9 @@ def install(name=None,
for pkg in pkgs:
if isinstance(pkg, dict):
_name = next(six.iterkeys(pkg))
- _latest_version = latest_version(_name,
- refresh=False,
- show_installed=True)
+ _latest_version = latest_version(
+ _name, refresh=False, show_installed=True
+ )
_version = pkg[_name]
# If the versions don't match, refresh is True, otherwise
# no need to refresh
@@ -600,17 +620,17 @@ def install(name=None,
_refresh_db = True
if debconf:
- __salt__['debconf.set_file'](debconf)
+ __salt__["debconf.set_file"](debconf)
try:
- pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](
+ pkg_params, pkg_type = __salt__["pkg_resource.parse_targets"](
name, pkgs, sources, **kwargs
)
except MinionError as exc:
raise CommandExecutionError(exc)
# Support old "repo" argument
- repo = kwargs.get('repo', '')
+ repo = kwargs.get("repo", "")
if not fromrepo and repo:
fromrepo = repo
@@ -624,71 +644,74 @@ def install(name=None,
downgrade = []
to_reinstall = {}
errors = []
- if pkg_type == 'repository':
+ if pkg_type == "repository":
pkg_params_items = list(six.iteritems(pkg_params))
- has_comparison = [x for x, y in pkg_params_items
- if y is not None
- and (y.startswith('<') or y.startswith('>'))]
- _available = list_repo_pkgs(*has_comparison, byrepo=False, **kwargs) \
- if has_comparison else {}
+ has_comparison = [
+ x
+ for x, y in pkg_params_items
+ if y is not None and (y.startswith("<") or y.startswith(">"))
+ ]
+ _available = (
+ list_repo_pkgs(*has_comparison, byrepo=False, **kwargs)
+ if has_comparison
+ else {}
+ )
# Build command prefix
- cmd_prefix.extend(['apt-get', '-q', '-y'])
- if kwargs.get('force_yes', False):
- cmd_prefix.append('--force-yes')
- if 'force_conf_new' in kwargs and kwargs['force_conf_new']:
- cmd_prefix.extend(['-o', 'DPkg::Options::=--force-confnew'])
+ cmd_prefix.extend(["apt-get", "-q", "-y"])
+ if kwargs.get("force_yes", False):
+ cmd_prefix.append("--force-yes")
+ if "force_conf_new" in kwargs and kwargs["force_conf_new"]:
+ cmd_prefix.extend(["-o", "DPkg::Options::=--force-confnew"])
else:
- cmd_prefix.extend(['-o', 'DPkg::Options::=--force-confold'])
- cmd_prefix += ['-o', 'DPkg::Options::=--force-confdef']
- if 'install_recommends' in kwargs:
- if not kwargs['install_recommends']:
- cmd_prefix.append('--no-install-recommends')
+ cmd_prefix.extend(["-o", "DPkg::Options::=--force-confold"])
+ cmd_prefix += ["-o", "DPkg::Options::=--force-confdef"]
+ if "install_recommends" in kwargs:
+ if not kwargs["install_recommends"]:
+ cmd_prefix.append("--no-install-recommends")
else:
- cmd_prefix.append('--install-recommends')
- if 'only_upgrade' in kwargs and kwargs['only_upgrade']:
- cmd_prefix.append('--only-upgrade')
+ cmd_prefix.append("--install-recommends")
+ if "only_upgrade" in kwargs and kwargs["only_upgrade"]:
+ cmd_prefix.append("--only-upgrade")
if skip_verify:
- cmd_prefix.append('--allow-unauthenticated')
+ cmd_prefix.append("--allow-unauthenticated")
if fromrepo:
- cmd_prefix.extend(['-t', fromrepo])
- cmd_prefix.append('install')
+ cmd_prefix.extend(["-t", fromrepo])
+ cmd_prefix.append("install")
else:
pkg_params_items = []
for pkg_source in pkg_params:
- if 'lowpkg.bin_pkg_info' in __salt__:
- deb_info = __salt__['lowpkg.bin_pkg_info'](pkg_source)
+ if "lowpkg.bin_pkg_info" in __salt__:
+ deb_info = __salt__["lowpkg.bin_pkg_info"](pkg_source)
else:
deb_info = None
if deb_info is None:
log.error(
- 'pkg.install: Unable to get deb information for %s. '
- 'Version comparisons will be unavailable.', pkg_source
+ "pkg.install: Unable to get deb information for %s. "
+ "Version comparisons will be unavailable.",
+ pkg_source,
)
pkg_params_items.append([pkg_source])
else:
pkg_params_items.append(
- [deb_info['name'], pkg_source, deb_info['version']]
+ [deb_info["name"], pkg_source, deb_info["version"]]
)
# Build command prefix
- if 'force_conf_new' in kwargs and kwargs['force_conf_new']:
- cmd_prefix.extend(['dpkg', '-i', '--force-confnew'])
+ if "force_conf_new" in kwargs and kwargs["force_conf_new"]:
+ cmd_prefix.extend(["dpkg", "-i", "--force-confnew"])
else:
- cmd_prefix.extend(['dpkg', '-i', '--force-confold'])
+ cmd_prefix.extend(["dpkg", "-i", "--force-confold"])
if skip_verify:
- cmd_prefix.append('--force-bad-verify')
+ cmd_prefix.append("--force-bad-verify")
if HAS_APT:
_resolve_deps(name, pkg_params, **kwargs)
for pkg_item_list in pkg_params_items:
- if pkg_type == 'repository':
+ if pkg_type == "repository":
pkgname, version_num = pkg_item_list
- if name \
- and pkgs is None \
- and kwargs.get('version') \
- and len(pkg_params) == 1:
+ if name and pkgs is None and kwargs.get("version") and len(pkg_params) == 1:
# Only use the 'version' param if 'name' was not specified as a
# comma-separated list
- version_num = kwargs['version']
+ version_num = kwargs["version"]
else:
try:
pkgname, pkgpath, version_num = pkg_item_list
@@ -698,7 +721,7 @@ def install(name=None,
version_num = None
if version_num is None:
- if pkg_type == 'repository':
+ if pkg_type == "repository":
if reinstall and pkgname in old:
to_reinstall[pkgname] = pkgname
else:
@@ -710,10 +733,10 @@ def install(name=None,
# and version_num is not None, then we can assume that pkgname is
# not None, since the only way version_num is not None is if DEB
# metadata parsing was successful.
- if pkg_type == 'repository':
+ if pkg_type == "repository":
# Remove leading equals sign(s) to keep from building a pkgstr
# with multiple equals (which would be invalid)
- version_num = version_num.lstrip('=')
+ version_num = version_num.lstrip("=")
if pkgname in has_comparison:
candidates = _available.get(pkgname, [])
target = salt.utils.pkg.match_version(
@@ -724,37 +747,38 @@ def install(name=None,
)
if target is None:
errors.append(
- 'No version matching \'{0}{1}\' could be found '
- '(available: {2})'.format(
+ "No version matching '{0}{1}' could be found "
+ "(available: {2})".format(
pkgname,
version_num,
- ', '.join(candidates) if candidates else None
+ ", ".join(candidates) if candidates else None,
)
)
continue
else:
version_num = target
- pkgstr = '{0}={1}'.format(pkgname, version_num)
+ pkgstr = "{0}={1}".format(pkgname, version_num)
else:
pkgstr = pkgpath
- cver = old.get(pkgname, '')
- if reinstall and cver \
- and salt.utils.versions.compare(ver1=version_num,
- oper='==',
- ver2=cver,
- cmp_func=version_cmp):
+ cver = old.get(pkgname, "")
+ if (
+ reinstall
+ and cver
+ and salt.utils.versions.compare(
+ ver1=version_num, oper="==", ver2=cver, cmp_func=version_cmp
+ )
+ ):
to_reinstall[pkgname] = pkgstr
- elif not cver or salt.utils.versions.compare(ver1=version_num,
- oper='>=',
- ver2=cver,
- cmp_func=version_cmp):
+ elif not cver or salt.utils.versions.compare(
+ ver1=version_num, oper=">=", ver2=cver, cmp_func=version_cmp
+ ):
targets.append(pkgstr)
else:
downgrade.append(pkgstr)
if fromrepo and not sources:
- log.info('Targeting repo \'%s\'', fromrepo)
+ log.info("Targeting repo '%s'", fromrepo)
cmds = []
all_pkgs = []
@@ -766,9 +790,9 @@ def install(name=None,
if downgrade:
cmd = copy.deepcopy(cmd_prefix)
- if pkg_type == 'repository' and '--force-yes' not in cmd:
+ if pkg_type == "repository" and "--force-yes" not in cmd:
# Downgrading requires --force-yes. Insert this before 'install'
- cmd.insert(-1, '--force-yes')
+ cmd.insert(-1, "--force-yes")
cmd.extend(downgrade)
cmds.append(cmd)
@@ -779,26 +803,26 @@ def install(name=None,
all_pkgs.extend(to_reinstall)
cmd = copy.deepcopy(cmd_prefix)
if not sources:
- cmd.append('--reinstall')
+ cmd.append("--reinstall")
cmd.extend([x for x in six.itervalues(to_reinstall)])
cmds.append(cmd)
if not cmds:
ret = {}
else:
- cache_valid_time = kwargs.pop('cache_valid_time', 0)
+ cache_valid_time = kwargs.pop("cache_valid_time", 0)
if _refresh_db:
refresh_db(cache_valid_time)
- env = _parse_env(kwargs.get('env'))
+ env = _parse_env(kwargs.get("env"))
env.update(DPKG_ENV_VARS.copy())
- hold_pkgs = get_selections(state='hold').get('hold', [])
+ hold_pkgs = get_selections(state="hold").get("hold", [])
# all_pkgs contains the argument to be passed to apt-get install, which
# when a specific version is requested will be in the format
# name=version. Strip off the '=' if present so we can compare the
# held package names against the pacakges we are trying to install.
- targeted_names = [x.split('=')[0] for x in all_pkgs]
+ targeted_names = [x.split("=")[0] for x in all_pkgs]
to_unhold = [x for x in hold_pkgs if x in targeted_names]
if to_unhold:
@@ -806,81 +830,87 @@ def install(name=None,
for cmd in cmds:
out = _call_apt(cmd)
- if out['retcode'] != 0 and out['stderr']:
- errors.append(out['stderr'])
+ if out["retcode"] != 0 and out["stderr"]:
+ errors.append(out["stderr"])
- __context__.pop('pkg.list_pkgs', None)
+ __context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
for pkgname in to_reinstall:
if pkgname not in ret or pkgname in old:
- ret.update({pkgname: {'old': old.get(pkgname, ''),
- 'new': new.get(pkgname, '')}})
+ ret.update(
+ {
+ pkgname: {
+ "old": old.get(pkgname, ""),
+ "new": new.get(pkgname, ""),
+ }
+ }
+ )
if to_unhold:
hold(pkgs=to_unhold)
if errors:
raise CommandExecutionError(
- 'Problem encountered installing package(s)',
- info={'errors': errors, 'changes': ret}
+ "Problem encountered installing package(s)",
+ info={"errors": errors, "changes": ret},
)
return ret
-def _uninstall(action='remove', name=None, pkgs=None, **kwargs):
- '''
+def _uninstall(action="remove", name=None, pkgs=None, **kwargs):
+ """
remove and purge do identical things but with different apt-get commands,
this function performs the common logic.
- '''
+ """
try:
- pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0]
+ pkg_params = __salt__["pkg_resource.parse_targets"](name, pkgs)[0]
except MinionError as exc:
raise CommandExecutionError(exc)
old = list_pkgs()
old_removed = list_pkgs(removed=True)
targets = [x for x in pkg_params if x in old]
- if action == 'purge':
+ if action == "purge":
targets.extend([x for x in pkg_params if x in old_removed])
if not targets:
return {}
- cmd = ['apt-get', '-q', '-y', action]
+ cmd = ["apt-get", "-q", "-y", action]
cmd.extend(targets)
- env = _parse_env(kwargs.get('env'))
+ env = _parse_env(kwargs.get("env"))
env.update(DPKG_ENV_VARS.copy())
out = _call_apt(cmd, env=env)
- if out['retcode'] != 0 and out['stderr']:
- errors = [out['stderr']]
+ if out["retcode"] != 0 and out["stderr"]:
+ errors = [out["stderr"]]
else:
errors = []
- __context__.pop('pkg.list_pkgs', None)
+ __context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
new_removed = list_pkgs(removed=True)
changes = salt.utils.data.compare_dicts(old, new)
- if action == 'purge':
+ if action == "purge":
ret = {
- 'removed': salt.utils.data.compare_dicts(old_removed, new_removed),
- 'installed': changes
+ "removed": salt.utils.data.compare_dicts(old_removed, new_removed),
+ "installed": changes,
}
else:
ret = changes
if errors:
raise CommandExecutionError(
- 'Problem encountered removing package(s)',
- info={'errors': errors, 'changes': ret}
+ "Problem encountered removing package(s)",
+ info={"errors": errors, "changes": ret},
)
return ret
def autoremove(list_only=False, purge=False):
- '''
+ """
.. versionadded:: 2015.5.0
Remove packages not required by another package using ``apt-get
@@ -902,40 +932,40 @@ def autoremove(list_only=False, purge=False):
salt '*' pkg.autoremove
salt '*' pkg.autoremove list_only=True
salt '*' pkg.autoremove purge=True
- '''
+ """
cmd = []
if list_only:
ret = []
- cmd.extend(['apt-get', '--assume-no'])
+ cmd.extend(["apt-get", "--assume-no"])
if purge:
- cmd.append('--purge')
- cmd.append('autoremove')
- out = _call_apt(cmd, ignore_retcode=True)['stdout']
+ cmd.append("--purge")
+ cmd.append("autoremove")
+ out = _call_apt(cmd, ignore_retcode=True)["stdout"]
found = False
for line in out.splitlines():
if found is True:
- if line.startswith(' '):
+ if line.startswith(" "):
ret.extend(line.split())
else:
found = False
- elif 'The following packages will be REMOVED:' in line:
+ elif "The following packages will be REMOVED:" in line:
found = True
ret.sort()
return ret
else:
old = list_pkgs()
- cmd.extend(['apt-get', '--assume-yes'])
+ cmd.extend(["apt-get", "--assume-yes"])
if purge:
- cmd.append('--purge')
- cmd.append('autoremove')
+ cmd.append("--purge")
+ cmd.append("autoremove")
_call_apt(cmd, ignore_retcode=True)
- __context__.pop('pkg.list_pkgs', None)
+ __context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
return salt.utils.data.compare_dicts(old, new)
def remove(name=None, pkgs=None, **kwargs):
- '''
+ """
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
@@ -974,12 +1004,12 @@ def remove(name=None, pkgs=None, **kwargs):
salt '*' pkg.remove
salt '*' pkg.remove ,,
salt '*' pkg.remove pkgs='["foo", "bar"]'
- '''
- return _uninstall(action='remove', name=name, pkgs=pkgs, **kwargs)
+ """
+ return _uninstall(action="remove", name=name, pkgs=pkgs, **kwargs)
def purge(name=None, pkgs=None, **kwargs):
- '''
+ """
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
@@ -1018,12 +1048,12 @@ def purge(name=None, pkgs=None, **kwargs):
salt '*' pkg.purge
salt '*' pkg.purge ,,
salt '*' pkg.purge pkgs='["foo", "bar"]'
- '''
- return _uninstall(action='purge', name=name, pkgs=pkgs, **kwargs)
+ """
+ return _uninstall(action="purge", name=name, pkgs=pkgs, **kwargs)
def upgrade(refresh=True, dist_upgrade=False, **kwargs):
- '''
+ """
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
@@ -1077,43 +1107,50 @@ def upgrade(refresh=True, dist_upgrade=False, **kwargs):
.. code-block:: bash
salt '*' pkg.upgrade
- '''
- cache_valid_time = kwargs.pop('cache_valid_time', 0)
+ """
+ cache_valid_time = kwargs.pop("cache_valid_time", 0)
if salt.utils.data.is_true(refresh):
refresh_db(cache_valid_time)
old = list_pkgs()
- if 'force_conf_new' in kwargs and kwargs['force_conf_new']:
- force_conf = '--force-confnew'
+ if "force_conf_new" in kwargs and kwargs["force_conf_new"]:
+ force_conf = "--force-confnew"
else:
- force_conf = '--force-confold'
- cmd = ['apt-get', '-q', '-y', '-o', 'DPkg::Options::={0}'.format(force_conf),
- '-o', 'DPkg::Options::=--force-confdef']
+ force_conf = "--force-confold"
+ cmd = [
+ "apt-get",
+ "-q",
+ "-y",
+ "-o",
+ "DPkg::Options::={0}".format(force_conf),
+ "-o",
+ "DPkg::Options::=--force-confdef",
+ ]
- if kwargs.get('force_yes', False):
- cmd.append('--force-yes')
- if kwargs.get('skip_verify', False):
- cmd.append('--allow-unauthenticated')
- if kwargs.get('download_only', False) or kwargs.get('downloadonly', False):
- cmd.append('--download-only')
+ if kwargs.get("force_yes", False):
+ cmd.append("--force-yes")
+ if kwargs.get("skip_verify", False):
+ cmd.append("--allow-unauthenticated")
+ if kwargs.get("download_only", False) or kwargs.get("downloadonly", False):
+ cmd.append("--download-only")
- cmd.append('dist-upgrade' if dist_upgrade else 'upgrade')
+ cmd.append("dist-upgrade" if dist_upgrade else "upgrade")
result = _call_apt(cmd, env=DPKG_ENV_VARS.copy())
- __context__.pop('pkg.list_pkgs', None)
+ __context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
- if result['retcode'] != 0:
+ if result["retcode"] != 0:
raise CommandExecutionError(
- 'Problem encountered upgrading packages',
- info={'changes': ret, 'result': result}
+ "Problem encountered upgrading packages",
+ info={"changes": ret, "result": result},
)
return ret
def hold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
- '''
+ """
.. versionadded:: 2014.7.0
Set package in 'hold' state, meaning it will not be upgraded.
@@ -1135,15 +1172,11 @@ def hold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
.. code-block:: bash
salt '*' pkg.hold pkgs='["foo", "bar"]'
- '''
+ """
if not name and not pkgs and not sources:
- raise SaltInvocationError(
- 'One of name, pkgs, or sources must be specified.'
- )
+ raise SaltInvocationError("One of name, pkgs, or sources must be specified.")
if pkgs and sources:
- raise SaltInvocationError(
- 'Only one of pkgs or sources can be specified.'
- )
+ raise SaltInvocationError("Only one of pkgs or sources can be specified.")
targets = []
if pkgs:
@@ -1159,34 +1192,29 @@ def hold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
if isinstance(target, dict):
target = next(iter(target))
- ret[target] = {'name': target,
- 'changes': {},
- 'result': False,
- 'comment': ''}
+ ret[target] = {"name": target, "changes": {}, "result": False, "comment": ""}
- state = get_selections(pattern=target, state='hold')
+ state = get_selections(pattern=target, state="hold")
if not state:
- ret[target]['comment'] = ('Package {0} not currently held.'
- .format(target))
- elif not salt.utils.data.is_true(state.get('hold', False)):
- if 'test' in __opts__ and __opts__['test']:
+ ret[target]["comment"] = "Package {0} not currently held.".format(target)
+ elif not salt.utils.data.is_true(state.get("hold", False)):
+ if "test" in __opts__ and __opts__["test"]:
ret[target].update(result=None)
- ret[target]['comment'] = ('Package {0} is set to be held.'
- .format(target))
+ ret[target]["comment"] = "Package {0} is set to be held.".format(target)
else:
- result = set_selections(selection={'hold': [target]})
+ result = set_selections(selection={"hold": [target]})
ret[target].update(changes=result[target], result=True)
- ret[target]['comment'] = ('Package {0} is now being held.'
- .format(target))
+ ret[target]["comment"] = "Package {0} is now being held.".format(target)
else:
ret[target].update(result=True)
- ret[target]['comment'] = ('Package {0} is already set to be held.'
- .format(target))
+ ret[target]["comment"] = "Package {0} is already set to be held.".format(
+ target
+ )
return ret
def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
- '''
+ """
.. versionadded:: 2014.7.0
Set package current in 'hold' state to install state,
@@ -1209,15 +1237,11 @@ def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W06
.. code-block:: bash
salt '*' pkg.unhold pkgs='["foo", "bar"]'
- '''
+ """
if not name and not pkgs and not sources:
- raise SaltInvocationError(
- 'One of name, pkgs, or sources must be specified.'
- )
+ raise SaltInvocationError("One of name, pkgs, or sources must be specified.")
if pkgs and sources:
- raise SaltInvocationError(
- 'Only one of pkgs or sources can be specified.'
- )
+ raise SaltInvocationError("Only one of pkgs or sources can be specified.")
targets = []
if pkgs:
@@ -1233,37 +1257,35 @@ def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W06
if isinstance(target, dict):
target = next(iter(target))
- ret[target] = {'name': target,
- 'changes': {},
- 'result': False,
- 'comment': ''}
+ ret[target] = {"name": target, "changes": {}, "result": False, "comment": ""}
state = get_selections(pattern=target)
if not state:
- ret[target]['comment'] = ('Package {0} does not have a state.'
- .format(target))
- elif salt.utils.data.is_true(state.get('hold', False)):
- if 'test' in __opts__ and __opts__['test']:
+ ret[target]["comment"] = "Package {0} does not have a state.".format(target)
+ elif salt.utils.data.is_true(state.get("hold", False)):
+ if "test" in __opts__ and __opts__["test"]:
ret[target].update(result=None)
- ret[target]['comment'] = ('Package {0} is set not to be '
- 'held.'.format(target))
+ ret[target]["comment"] = "Package {0} is set not to be " "held.".format(
+ target
+ )
else:
- result = set_selections(selection={'install': [target]})
+ result = set_selections(selection={"install": [target]})
ret[target].update(changes=result[target], result=True)
- ret[target]['comment'] = ('Package {0} is no longer being '
- 'held.'.format(target))
+ ret[target][
+ "comment"
+ ] = "Package {0} is no longer being " "held.".format(target)
else:
ret[target].update(result=True)
- ret[target]['comment'] = ('Package {0} is already set not to be '
- 'held.'.format(target))
+ ret[target][
+ "comment"
+ ] = "Package {0} is already set not to be " "held.".format(target)
return ret
-def list_pkgs(versions_as_list=False,
- removed=False,
- purge_desired=False,
- **kwargs): # pylint: disable=W0613
- '''
+def list_pkgs(
+ versions_as_list=False, removed=False, purge_desired=False, **kwargs
+): # pylint: disable=W0613
+ """
List the packages currently installed in a dict::
{'': ''}
@@ -1290,125 +1312,120 @@ def list_pkgs(versions_as_list=False,
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs versions_as_list=True
- '''
+ """
versions_as_list = salt.utils.data.is_true(versions_as_list)
removed = salt.utils.data.is_true(removed)
purge_desired = salt.utils.data.is_true(purge_desired)
- if 'pkg.list_pkgs' in __context__:
+ if "pkg.list_pkgs" in __context__:
if removed:
- ret = copy.deepcopy(__context__['pkg.list_pkgs']['removed'])
+ ret = copy.deepcopy(__context__["pkg.list_pkgs"]["removed"])
else:
- ret = copy.deepcopy(__context__['pkg.list_pkgs']['purge_desired'])
+ ret = copy.deepcopy(__context__["pkg.list_pkgs"]["purge_desired"])
if not purge_desired:
- ret.update(__context__['pkg.list_pkgs']['installed'])
+ ret.update(__context__["pkg.list_pkgs"]["installed"])
if not versions_as_list:
- __salt__['pkg_resource.stringify'](ret)
+ __salt__["pkg_resource.stringify"](ret)
return ret
- ret = {'installed': {}, 'removed': {}, 'purge_desired': {}}
- cmd = ['dpkg-query', '--showformat',
- '${Status} ${Package} ${Version} ${Architecture}\n', '-W']
+ ret = {"installed": {}, "removed": {}, "purge_desired": {}}
+ cmd = [
+ "dpkg-query",
+ "--showformat",
+ "${Status} ${Package} ${Version} ${Architecture}\n",
+ "-W",
+ ]
- out = __salt__['cmd.run_stdout'](
- cmd,
- output_loglevel='trace',
- python_shell=False)
+ out = __salt__["cmd.run_stdout"](cmd, output_loglevel="trace", python_shell=False)
# Typical lines of output:
# install ok installed zsh 4.3.17-1ubuntu1 amd64
# deinstall ok config-files mc 3:4.8.1-2ubuntu1 amd64
for line in out.splitlines():
cols = line.split()
try:
- linetype, status, name, version_num, arch = \
- [cols[x] for x in (0, 2, 3, 4, 5)]
+ linetype, status, name, version_num, arch = [
+ cols[x] for x in (0, 2, 3, 4, 5)
+ ]
except (ValueError, IndexError):
continue
- if __grains__.get('cpuarch', '') == 'x86_64':
- osarch = __grains__.get('osarch', '')
- if arch != 'all' and osarch == 'amd64' and osarch != arch:
- name += ':{0}'.format(arch)
+ if __grains__.get("cpuarch", "") == "x86_64":
+ osarch = __grains__.get("osarch", "")
+ if arch != "all" and osarch == "amd64" and osarch != arch:
+ name += ":{0}".format(arch)
if cols:
- if ('install' in linetype or 'hold' in linetype) and \
- 'installed' in status:
- __salt__['pkg_resource.add_pkg'](ret['installed'],
- name,
- version_num)
- elif 'deinstall' in linetype:
- __salt__['pkg_resource.add_pkg'](ret['removed'],
- name,
- version_num)
- elif 'purge' in linetype and status == 'installed':
- __salt__['pkg_resource.add_pkg'](ret['purge_desired'],
- name,
- version_num)
+ if ("install" in linetype or "hold" in linetype) and "installed" in status:
+ __salt__["pkg_resource.add_pkg"](ret["installed"], name, version_num)
+ elif "deinstall" in linetype:
+ __salt__["pkg_resource.add_pkg"](ret["removed"], name, version_num)
+ elif "purge" in linetype and status == "installed":
+ __salt__["pkg_resource.add_pkg"](
+ ret["purge_desired"], name, version_num
+ )
- for pkglist_type in ('installed', 'removed', 'purge_desired'):
- __salt__['pkg_resource.sort_pkglist'](ret[pkglist_type])
+ for pkglist_type in ("installed", "removed", "purge_desired"):
+ __salt__["pkg_resource.sort_pkglist"](ret[pkglist_type])
- __context__['pkg.list_pkgs'] = copy.deepcopy(ret)
+ __context__["pkg.list_pkgs"] = copy.deepcopy(ret)
if removed:
- ret = ret['removed']
+ ret = ret["removed"]
else:
- ret = copy.deepcopy(__context__['pkg.list_pkgs']['purge_desired'])
+ ret = copy.deepcopy(__context__["pkg.list_pkgs"]["purge_desired"])
if not purge_desired:
- ret.update(__context__['pkg.list_pkgs']['installed'])
+ ret.update(__context__["pkg.list_pkgs"]["installed"])
if not versions_as_list:
- __salt__['pkg_resource.stringify'](ret)
+ __salt__["pkg_resource.stringify"](ret)
return ret
def _get_upgradable(dist_upgrade=True, **kwargs):
- '''
+ """
Utility function to get upgradable packages
Sample return data:
{ 'pkgname': '1.2.3-45', ... }
- '''
+ """
- cmd = ['apt-get', '--just-print']
+ cmd = ["apt-get", "--just-print"]
if dist_upgrade:
- cmd.append('dist-upgrade')
+ cmd.append("dist-upgrade")
else:
- cmd.append('upgrade')
+ cmd.append("upgrade")
try:
- cmd.extend(['-o', 'APT::Default-Release={0}'.format(kwargs['fromrepo'])])
+ cmd.extend(["-o", "APT::Default-Release={0}".format(kwargs["fromrepo"])])
except KeyError:
pass
call = _call_apt(cmd)
- if call['retcode'] != 0:
- msg = 'Failed to get upgrades'
- for key in ('stderr', 'stdout'):
+ if call["retcode"] != 0:
+ msg = "Failed to get upgrades"
+ for key in ("stderr", "stdout"):
if call[key]:
- msg += ': ' + call[key]
+ msg += ": " + call[key]
break
raise CommandExecutionError(msg)
else:
- out = call['stdout']
+ out = call["stdout"]
# rexp parses lines that look like the following:
# Conf libxfont1 (1:1.4.5-1 Debian:testing [i386])
- rexp = re.compile('(?m)^Conf '
- '([^ ]+) ' # Package name
- r'\(([^ ]+)') # Version
- keys = ['name', 'version']
+ rexp = re.compile("(?m)^Conf " "([^ ]+) " r"\(([^ ]+)") # Package name # Version
+ keys = ["name", "version"]
_get = lambda l, k: l[keys.index(k)]
upgrades = rexp.findall(out)
ret = {}
for line in upgrades:
- name = _get(line, 'name')
- version_num = _get(line, 'version')
+ name = _get(line, "name")
+ version_num = _get(line, "version")
ret[name] = version_num
return ret
def list_upgrades(refresh=True, dist_upgrade=True, **kwargs):
- '''
+ """
List all available package upgrades.
refresh
@@ -1431,15 +1448,15 @@ def list_upgrades(refresh=True, dist_upgrade=True, **kwargs):
.. code-block:: bash
salt '*' pkg.list_upgrades
- '''
- cache_valid_time = kwargs.pop('cache_valid_time', 0)
+ """
+ cache_valid_time = kwargs.pop("cache_valid_time", 0)
if salt.utils.data.is_true(refresh):
refresh_db(cache_valid_time)
return _get_upgradable(dist_upgrade, **kwargs)
def upgrade_available(name):
- '''
+ """
Check whether or not an upgrade is available for a given package
CLI Example:
@@ -1447,12 +1464,12 @@ def upgrade_available(name):
.. code-block:: bash
salt '*' pkg.upgrade_available
- '''
- return latest_version(name) != ''
+ """
+ return latest_version(name) != ""
def version_cmp(pkg1, pkg2, ignore_epoch=False):
- '''
+ """
Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
making the comparison.
@@ -1467,9 +1484,12 @@ def version_cmp(pkg1, pkg2, ignore_epoch=False):
.. code-block:: bash
salt '*' pkg.version_cmp '0.2.4-0ubuntu1' '0.2.4.1-0ubuntu1'
- '''
- normalize = lambda x: six.text_type(x).split(':', 1)[-1] \
- if ignore_epoch else six.text_type(x)
+ """
+ normalize = (
+ lambda x: six.text_type(x).split(":", 1)[-1]
+ if ignore_epoch
+ else six.text_type(x)
+ )
# both apt_pkg.version_compare and _cmd_quote need string arguments.
pkg1 = normalize(pkg1)
pkg2 = normalize(pkg2)
@@ -1494,12 +1514,11 @@ def version_cmp(pkg1, pkg2, ignore_epoch=False):
# Try to use shell version in case of errors w/python bindings
pass
try:
- for oper, ret in (('lt', -1), ('eq', 0), ('gt', 1)):
- cmd = ['dpkg', '--compare-versions', pkg1, oper, pkg2]
- retcode = __salt__['cmd.retcode'](cmd,
- output_loglevel='trace',
- python_shell=False,
- ignore_retcode=True)
+ for oper, ret in (("lt", -1), ("eq", 0), ("gt", 1)):
+ cmd = ["dpkg", "--compare-versions", pkg1, oper, pkg2]
+ retcode = __salt__["cmd.retcode"](
+ cmd, output_loglevel="trace", python_shell=False, ignore_retcode=True
+ )
if retcode == 0:
return ret
except Exception as exc: # pylint: disable=broad-except
@@ -1508,43 +1527,49 @@ def version_cmp(pkg1, pkg2, ignore_epoch=False):
def _split_repo_str(repo):
- '''
+ """
Return APT source entry as a tuple.
- '''
+ """
split = sourceslist.SourceEntry(repo)
return split.type, split.architectures, split.uri, split.dist, split.comps
def _consolidate_repo_sources(sources):
- '''
+ """
Consolidate APT sources.
- '''
+ """
if not isinstance(sources, sourceslist.SourcesList):
raise TypeError(
- '\'{0}\' not a \'{1}\''.format(
- type(sources),
- sourceslist.SourcesList
- )
+ "'{0}' not a '{1}'".format(type(sources), sourceslist.SourcesList)
)
consolidated = {}
delete_files = set()
- base_file = sourceslist.SourceEntry('').file
+ base_file = sourceslist.SourceEntry("").file
repos = [s for s in sources.list if not s.invalid]
for repo in repos:
- repo.uri = repo.uri.rstrip('/')
+ repo.uri = repo.uri.rstrip("/")
# future lint: disable=blacklisted-function
- key = str((getattr(repo, 'architectures', []),
- repo.disabled, repo.type, repo.uri, repo.dist))
+ key = str(
+ (
+ getattr(repo, "architectures", []),
+ repo.disabled,
+ repo.type,
+ repo.uri,
+ repo.dist,
+ )
+ )
# future lint: enable=blacklisted-function
if key in consolidated:
combined = consolidated[key]
combined_comps = set(repo.comps).union(set(combined.comps))
consolidated[key].comps = list(combined_comps)
else:
- consolidated[key] = sourceslist.SourceEntry(salt.utils.pkg.deb.strip_uri(repo.line))
+ consolidated[key] = sourceslist.SourceEntry(
+ salt.utils.pkg.deb.strip_uri(repo.line)
+ )
if repo.file != base_file:
delete_files.add(repo.file)
@@ -1560,7 +1585,7 @@ def _consolidate_repo_sources(sources):
def list_repo_pkgs(*args, **kwargs): # pylint: disable=unused-import
- '''
+ """
.. versionadded:: 2017.7.0
Returns all available packages. Optionally, package names (and name globs)
@@ -1588,21 +1613,21 @@ def list_repo_pkgs(*args, **kwargs): # pylint: disable=unused-import
salt '*' pkg.list_repo_pkgs
salt '*' pkg.list_repo_pkgs foo bar baz
- '''
+ """
if args:
# Get only information about packages in args
- cmd = ['apt-cache', 'show'] + [arg for arg in args]
+ cmd = ["apt-cache", "show"] + [arg for arg in args]
else:
# Get information about all available packages
- cmd = ['apt-cache', 'dump']
+ cmd = ["apt-cache", "dump"]
out = _call_apt(cmd, scope=False, ignore_retcode=True)
ret = {}
pkg_name = None
skip_pkg = False
- new_pkg = re.compile('^Package: (.+)')
- for line in salt.utils.itertools.split(out['stdout'], '\n'):
+ new_pkg = re.compile("^Package: (.+)")
+ for line in salt.utils.itertools.split(out["stdout"], "\n"):
if not line.strip():
continue
try:
@@ -1614,14 +1639,14 @@ def list_repo_pkgs(*args, **kwargs): # pylint: disable=unused-import
pkg_name = cur_pkg
continue
comps = line.strip().split(None, 1)
- if comps[0] == 'Version:':
+ if comps[0] == "Version:":
ret.setdefault(pkg_name, []).append(comps[1])
return ret
def list_repos():
- '''
+ """
Lists all repos in the sources.list (and sources.lists.d) files
CLI Example:
@@ -1630,7 +1655,7 @@ def list_repos():
salt '*' pkg.list_repos
salt '*' pkg.list_repos disabled=True
- '''
+ """
_check_apt()
repos = {}
sources = sourceslist.SourcesList()
@@ -1638,20 +1663,20 @@ def list_repos():
if source.invalid:
continue
repo = {}
- repo['file'] = source.file
- repo['comps'] = getattr(source, 'comps', [])
- repo['disabled'] = source.disabled
- repo['dist'] = source.dist
- repo['type'] = source.type
- repo['uri'] = source.uri.rstrip('/')
- repo['line'] = salt.utils.pkg.deb.strip_uri(source.line.strip())
- repo['architectures'] = getattr(source, 'architectures', [])
+ repo["file"] = source.file
+ repo["comps"] = getattr(source, "comps", [])
+ repo["disabled"] = source.disabled
+ repo["dist"] = source.dist
+ repo["type"] = source.type
+ repo["uri"] = source.uri.rstrip("/")
+ repo["line"] = salt.utils.pkg.deb.strip_uri(source.line.strip())
+ repo["architectures"] = getattr(source, "architectures", [])
repos.setdefault(source.uri, []).append(repo)
return repos
def get_repo(repo, **kwargs):
- '''
+ """
Display a repo from the sources.list / sources.list.d
The repo passed in needs to be a complete repo entry.
@@ -1661,33 +1686,31 @@ def get_repo(repo, **kwargs):
.. code-block:: bash
salt '*' pkg.get_repo "myrepo definition"
- '''
+ """
_check_apt()
- ppa_auth = kwargs.get('ppa_auth', None)
+ ppa_auth = kwargs.get("ppa_auth", None)
# we have to be clever about this since the repo definition formats
# are a bit more "loose" than in some other distributions
- if repo.startswith('ppa:') and __grains__['os'] in ('Ubuntu', 'Mint', 'neon'):
+ if repo.startswith("ppa:") and __grains__["os"] in ("Ubuntu", "Mint", "neon"):
# This is a PPA definition meaning special handling is needed
# to derive the name.
- dist = __grains__['lsb_distrib_codename']
- owner_name, ppa_name = repo[4:].split('/')
+ dist = __grains__["lsb_distrib_codename"]
+ owner_name, ppa_name = repo[4:].split("/")
if ppa_auth:
- auth_info = '{0}@'.format(ppa_auth)
- repo = LP_PVT_SRC_FORMAT.format(auth_info, owner_name,
- ppa_name, dist)
+ auth_info = "{0}@".format(ppa_auth)
+ repo = LP_PVT_SRC_FORMAT.format(auth_info, owner_name, ppa_name, dist)
else:
if HAS_SOFTWAREPROPERTIES:
try:
- if hasattr(softwareproperties.ppa, 'PPAShortcutHandler'):
- repo = softwareproperties.ppa.PPAShortcutHandler(
- repo).expand(dist)[0]
+ if hasattr(softwareproperties.ppa, "PPAShortcutHandler"):
+ repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(
+ dist
+ )[0]
else:
- repo = softwareproperties.ppa.expand_ppa_line(
- repo,
- dist)[0]
+ repo = softwareproperties.ppa.expand_ppa_line(repo, dist)[0]
except NameError as name_error:
raise CommandExecutionError(
- 'Could not find ppa {0}: {1}'.format(repo, name_error)
+ "Could not find ppa {0}: {1}".format(repo, name_error)
)
else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
@@ -1696,37 +1719,45 @@ def get_repo(repo, **kwargs):
if repos:
try:
- repo_type, repo_architectures, repo_uri, repo_dist, repo_comps = _split_repo_str(repo)
+ (
+ repo_type,
+ repo_architectures,
+ repo_uri,
+ repo_dist,
+ repo_comps,
+ ) = _split_repo_str(repo)
if ppa_auth:
- uri_match = re.search('(http[s]?://)(.+)', repo_uri)
+ uri_match = re.search("(http[s]?://)(.+)", repo_uri)
if uri_match:
if not uri_match.group(2).startswith(ppa_auth):
- repo_uri = '{0}{1}@{2}'.format(uri_match.group(1),
- ppa_auth,
- uri_match.group(2))
+ repo_uri = "{0}{1}@{2}".format(
+ uri_match.group(1), ppa_auth, uri_match.group(2)
+ )
except SyntaxError:
raise CommandExecutionError(
- 'Error: repo \'{0}\' is not a well formatted definition'
- .format(repo)
+ "Error: repo '{0}' is not a well formatted definition".format(repo)
)
for source in six.itervalues(repos):
for sub in source:
- if (sub['type'] == repo_type and
+ if (
+ sub["type"] == repo_type
+ and
# strip trailing '/' from repo_uri, it's valid in definition
# but not valid when compared to persisted source
- sub['uri'].rstrip('/') == repo_uri.rstrip('/') and
- sub['dist'] == repo_dist):
+ sub["uri"].rstrip("/") == repo_uri.rstrip("/")
+ and sub["dist"] == repo_dist
+ ):
if not repo_comps:
return sub
for comp in repo_comps:
- if comp in sub.get('comps', []):
+ if comp in sub.get("comps", []):
return sub
return {}
def del_repo(repo, **kwargs):
- '''
+ """
Delete a repo from the sources.list / sources.list.d
If the .list file is in the sources.list.d directory
@@ -1741,25 +1772,24 @@ def del_repo(repo, **kwargs):
.. code-block:: bash
salt '*' pkg.del_repo "myrepo definition"
- '''
+ """
_check_apt()
is_ppa = False
- if repo.startswith('ppa:') and __grains__['os'] in ('Ubuntu', 'Mint', 'neon'):
+ if repo.startswith("ppa:") and __grains__["os"] in ("Ubuntu", "Mint", "neon"):
# This is a PPA definition meaning special handling is needed
# to derive the name.
is_ppa = True
- dist = __grains__['lsb_distrib_codename']
+ dist = __grains__["lsb_distrib_codename"]
if not HAS_SOFTWAREPROPERTIES:
_warn_software_properties(repo)
- owner_name, ppa_name = repo[4:].split('/')
- if 'ppa_auth' in kwargs:
- auth_info = '{0}@'.format(kwargs['ppa_auth'])
- repo = LP_PVT_SRC_FORMAT.format(auth_info, dist, owner_name,
- ppa_name)
+ owner_name, ppa_name = repo[4:].split("/")
+ if "ppa_auth" in kwargs:
+ auth_info = "{0}@".format(kwargs["ppa_auth"])
+ repo = LP_PVT_SRC_FORMAT.format(auth_info, dist, owner_name, ppa_name)
else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
else:
- if hasattr(softwareproperties.ppa, 'PPAShortcutHandler'):
+ if hasattr(softwareproperties.ppa, "PPAShortcutHandler"):
repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(dist)[0]
else:
repo = softwareproperties.ppa.expand_ppa_line(repo, dist)[0]
@@ -1769,22 +1799,25 @@ def del_repo(repo, **kwargs):
if repos:
deleted_from = dict()
try:
- repo_type, \
- repo_architectures, \
- repo_uri, \
- repo_dist, \
- repo_comps = _split_repo_str(repo)
+ (
+ repo_type,
+ repo_architectures,
+ repo_uri,
+ repo_dist,
+ repo_comps,
+ ) = _split_repo_str(repo)
except SyntaxError:
raise SaltInvocationError(
- 'Error: repo \'{0}\' not a well formatted definition'
- .format(repo)
+ "Error: repo '{0}' not a well formatted definition".format(repo)
)
for source in repos:
- if (source.type == repo_type
- and source.architectures == repo_architectures
- and source.uri == repo_uri
- and source.dist == repo_dist):
+ if (
+ source.type == repo_type
+ and source.architectures == repo_architectures
+ and source.uri == repo_uri
+ and source.dist == repo_dist
+ ):
s_comps = set(source.comps)
r_comps = set(repo_comps)
@@ -1799,8 +1832,13 @@ def del_repo(repo, **kwargs):
# PPAs are special and can add deb-src where expand_ppa_line
# doesn't always reflect this. Lets just cleanup here for good
# measure
- if (is_ppa and repo_type == 'deb' and source.type == 'deb-src' and
- source.uri == repo_uri and source.dist == repo_dist):
+ if (
+ is_ppa
+ and repo_type == "deb"
+ and source.type == "deb-src"
+ and source.uri == repo_uri
+ and source.dist == repo_dist
+ ):
s_comps = set(source.comps)
r_comps = set(repo_comps)
@@ -1814,16 +1852,15 @@ def del_repo(repo, **kwargs):
pass
sources.save()
if deleted_from:
- ret = ''
+ ret = ""
for source in sources:
if source.file in deleted_from:
deleted_from[source.file] += 1
for repo_file, count in six.iteritems(deleted_from):
- msg = 'Repo \'{0}\' has been removed from {1}.\n'
- if count == 0 and 'sources.list.d/' in repo_file:
+ msg = "Repo '{0}' has been removed from {1}.\n"
+ if count == 0 and "sources.list.d/" in repo_file:
if os.path.isfile(repo_file):
- msg = ('File {1} containing repo \'{0}\' has been '
- 'removed.')
+ msg = "File {1} containing repo '{0}' has been " "removed."
try:
os.remove(repo_file)
except OSError:
@@ -1834,12 +1871,12 @@ def del_repo(repo, **kwargs):
return ret
raise CommandExecutionError(
- 'Repo {0} doesn\'t exist in the sources.list(s)'.format(repo)
+ "Repo {0} doesn't exist in the sources.list(s)".format(repo)
)
def _convert_if_int(value):
- '''
+ """
.. versionadded:: 2017.7.0
Convert to an int if necessary.
@@ -1848,7 +1885,7 @@ def _convert_if_int(value):
:return: The converted or passed value.
:rtype: bool|int|str
- '''
+ """
try:
value = int(str(value)) # future lint: disable=blacklisted-function
except ValueError:
@@ -1857,7 +1894,7 @@ def _convert_if_int(value):
def get_repo_keys():
- '''
+ """
.. versionadded:: 2017.7.0
List known repo key details.
@@ -1870,66 +1907,76 @@ def get_repo_keys():
.. code-block:: bash
salt '*' pkg.get_repo_keys
- '''
+ """
ret = dict()
repo_keys = list()
# The double usage of '--with-fingerprint' is necessary in order to
# retrieve the fingerprint of the subkey.
- cmd = ['apt-key', 'adv', '--batch', '--list-public-keys', '--with-fingerprint',
- '--with-fingerprint', '--with-colons', '--fixed-list-mode']
+ cmd = [
+ "apt-key",
+ "adv",
+ "--batch",
+ "--list-public-keys",
+ "--with-fingerprint",
+ "--with-fingerprint",
+ "--with-colons",
+ "--fixed-list-mode",
+ ]
cmd_ret = _call_apt(cmd, scope=False)
- if cmd_ret['retcode'] != 0:
- log.error(cmd_ret['stderr'])
+ if cmd_ret["retcode"] != 0:
+ log.error(cmd_ret["stderr"])
return ret
- lines = [line for line in cmd_ret['stdout'].splitlines() if line.strip()]
+ lines = [line for line in cmd_ret["stdout"].splitlines() if line.strip()]
# Reference for the meaning of each item in the colon-separated
# record can be found here: https://goo.gl/KIZbvp
for line in lines:
- items = [_convert_if_int(item.strip()) if item.strip() else None for item in line.split(':')]
+ items = [
+ _convert_if_int(item.strip()) if item.strip() else None
+ for item in line.split(":")
+ ]
key_props = dict()
if len(items) < 2:
- log.debug('Skipping line: %s', line)
+ log.debug("Skipping line: %s", line)
continue
- if items[0] in ('pub', 'sub'):
- key_props.update({
- 'algorithm': items[3],
- 'bits': items[2],
- 'capability': items[11],
- 'date_creation': items[5],
- 'date_expiration': items[6],
- 'keyid': items[4],
- 'validity': items[1]
- })
+ if items[0] in ("pub", "sub"):
+ key_props.update(
+ {
+ "algorithm": items[3],
+ "bits": items[2],
+ "capability": items[11],
+ "date_creation": items[5],
+ "date_expiration": items[6],
+ "keyid": items[4],
+ "validity": items[1],
+ }
+ )
- if items[0] == 'pub':
+ if items[0] == "pub":
repo_keys.append(key_props)
else:
- repo_keys[-1]['subkey'] = key_props
- elif items[0] == 'fpr':
- if repo_keys[-1].get('subkey', False):
- repo_keys[-1]['subkey'].update({'fingerprint': items[9]})
+ repo_keys[-1]["subkey"] = key_props
+ elif items[0] == "fpr":
+ if repo_keys[-1].get("subkey", False):
+ repo_keys[-1]["subkey"].update({"fingerprint": items[9]})
else:
- repo_keys[-1].update({'fingerprint': items[9]})
- elif items[0] == 'uid':
- repo_keys[-1].update({
- 'uid': items[9],
- 'uid_hash': items[7]
- })
+ repo_keys[-1].update({"fingerprint": items[9]})
+ elif items[0] == "uid":
+ repo_keys[-1].update({"uid": items[9], "uid_hash": items[7]})
for repo_key in repo_keys:
- ret[repo_key['keyid']] = repo_key
+ ret[repo_key["keyid"]] = repo_key
return ret
-def add_repo_key(path=None, text=None, keyserver=None, keyid=None, saltenv='base'):
- '''
+def add_repo_key(path=None, text=None, keyserver=None, keyid=None, saltenv="base"):
+ """
.. versionadded:: 2017.7.0
Add a repo key using ``apt-key add``.
@@ -1952,55 +1999,59 @@ def add_repo_key(path=None, text=None, keyserver=None, keyid=None, saltenv='base
salt '*' pkg.add_repo_key text="'$KEY1'"
salt '*' pkg.add_repo_key keyserver='keyserver.example' keyid='0000AAAA'
- '''
- cmd = ['apt-key']
+ """
+ cmd = ["apt-key"]
kwargs = {}
current_repo_keys = get_repo_keys()
if path:
- cached_source_path = __salt__['cp.cache_file'](path, saltenv)
+ cached_source_path = __salt__["cp.cache_file"](path, saltenv)
if not cached_source_path:
- log.error('Unable to get cached copy of file: %s', path)
+ log.error("Unable to get cached copy of file: %s", path)
return False
- cmd.extend(['add', cached_source_path])
+ cmd.extend(["add", cached_source_path])
elif text:
- log.debug('Received value: %s', text)
+ log.debug("Received value: %s", text)
- cmd.extend(['add', '-'])
- kwargs.update({'stdin': text})
+ cmd.extend(["add", "-"])
+ kwargs.update({"stdin": text})
elif keyserver:
if not keyid:
- error_msg = 'No keyid or keyid too short for keyserver: {0}'.format(keyserver)
+ error_msg = "No keyid or keyid too short for keyserver: {0}".format(
+ keyserver
+ )
raise SaltInvocationError(error_msg)
- cmd.extend(['adv', '--batch', '--keyserver', keyserver, '--recv', keyid])
+ cmd.extend(["adv", "--batch", "--keyserver", keyserver, "--recv", keyid])
elif keyid:
- error_msg = 'No keyserver specified for keyid: {0}'.format(keyid)
+ error_msg = "No keyserver specified for keyid: {0}".format(keyid)
raise SaltInvocationError(error_msg)
else:
- raise TypeError('{0}() takes at least 1 argument (0 given)'.format(add_repo_key.__name__))
+ raise TypeError(
+ "{0}() takes at least 1 argument (0 given)".format(add_repo_key.__name__)
+ )
# If the keyid is provided or determined, check it against the existing
# repo key ids to determine whether it needs to be imported.
if keyid:
for current_keyid in current_repo_keys:
- if current_keyid[-(len(keyid)):] == keyid:
+ if current_keyid[-(len(keyid)) :] == keyid:
log.debug("The keyid '%s' already present: %s", keyid, current_keyid)
return True
cmd_ret = _call_apt(cmd, **kwargs)
- if cmd_ret['retcode'] == 0:
+ if cmd_ret["retcode"] == 0:
return True
- log.error('Unable to add repo key: %s', cmd_ret['stderr'])
+ log.error("Unable to add repo key: %s", cmd_ret["stderr"])
return False
def del_repo_key(name=None, **kwargs):
- '''
+ """
.. versionadded:: 2015.8.0
Remove a repo key using ``apt-key del``
@@ -2026,36 +2077,31 @@ def del_repo_key(name=None, **kwargs):
salt '*' pkg.del_repo_key keyid=0123ABCD
salt '*' pkg.del_repo_key name='ppa:foo/bar' keyid_ppa=True
- '''
- if kwargs.get('keyid_ppa', False):
- if isinstance(name, six.string_types) and name.startswith('ppa:'):
- owner_name, ppa_name = name[4:].split('/')
- ppa_info = _get_ppa_info_from_launchpad(
- owner_name, ppa_name)
- keyid = ppa_info['signing_key_fingerprint'][-8:]
+ """
+ if kwargs.get("keyid_ppa", False):
+ if isinstance(name, six.string_types) and name.startswith("ppa:"):
+ owner_name, ppa_name = name[4:].split("/")
+ ppa_info = _get_ppa_info_from_launchpad(owner_name, ppa_name)
+ keyid = ppa_info["signing_key_fingerprint"][-8:]
else:
- raise SaltInvocationError(
- 'keyid_ppa requires that a PPA be passed'
- )
+ raise SaltInvocationError("keyid_ppa requires that a PPA be passed")
else:
- if 'keyid' in kwargs:
- keyid = kwargs.get('keyid')
+ if "keyid" in kwargs:
+ keyid = kwargs.get("keyid")
else:
- raise SaltInvocationError(
- 'keyid or keyid_ppa and PPA name must be passed'
- )
+ raise SaltInvocationError("keyid or keyid_ppa and PPA name must be passed")
- result = _call_apt(['apt-key', 'del', keyid], scope=False)
- if result['retcode'] != 0:
- msg = 'Failed to remove keyid {0}'
- if result['stderr']:
- msg += ': {0}'.format(result['stderr'])
+ result = _call_apt(["apt-key", "del", keyid], scope=False)
+ if result["retcode"] != 0:
+ msg = "Failed to remove keyid {0}"
+ if result["stderr"]:
+ msg += ": {0}".format(result["stderr"])
raise CommandExecutionError(msg)
return keyid
-def mod_repo(repo, saltenv='base', **kwargs):
- '''
+def mod_repo(repo, saltenv="base", **kwargs):
+ """
Modify one or more values for a repo. If the repo does not exist, it will
be created, so long as the definition is well formed. For Ubuntu the
``ppa:/repo`` format is acceptable. ``ppa:`` format can only be
@@ -2117,22 +2163,21 @@ def mod_repo(repo, saltenv='base', **kwargs):
salt '*' pkg.mod_repo 'myrepo definition' uri=http://new/uri
salt '*' pkg.mod_repo 'myrepo definition' comps=main,universe
- '''
- if 'refresh_db' in kwargs:
- refresh = kwargs['refresh_db']
+ """
+ if "refresh_db" in kwargs:
+ refresh = kwargs["refresh_db"]
else:
- refresh = kwargs.get('refresh', True)
+ refresh = kwargs.get("refresh", True)
_check_apt()
# to ensure no one sets some key values that _shouldn't_ be changed on the
# object itself, this is just a white-list of "ok" to set properties
- if repo.startswith('ppa:'):
- if __grains__['os'] in ('Ubuntu', 'Mint', 'neon'):
+ if repo.startswith("ppa:"):
+ if __grains__["os"] in ("Ubuntu", "Mint", "neon"):
# secure PPAs cannot be supported as of the time of this code
# implementation via apt-add-repository. The code path for
# secure PPAs should be the same as urllib method
- if salt.utils.path.which('apt-add-repository') \
- and 'ppa_auth' not in kwargs:
+ if salt.utils.path.which("apt-add-repository") and "ppa_auth" not in kwargs:
repo_info = get_repo(repo)
if repo_info:
return {repo: repo_info}
@@ -2140,21 +2185,20 @@ def mod_repo(repo, saltenv='base', **kwargs):
env = None
http_proxy_url = _get_http_proxy_url()
if http_proxy_url:
- env = {'http_proxy': http_proxy_url,
- 'https_proxy': http_proxy_url}
- if float(__grains__['osrelease']) < 12.04:
- cmd = ['apt-add-repository', repo]
+ env = {
+ "http_proxy": http_proxy_url,
+ "https_proxy": http_proxy_url,
+ }
+ if float(__grains__["osrelease"]) < 12.04:
+ cmd = ["apt-add-repository", repo]
else:
- cmd = ['apt-add-repository', '-y', repo]
+ cmd = ["apt-add-repository", "-y", repo]
out = _call_apt(cmd, env=env, scope=False, **kwargs)
- if out['retcode']:
+ if out["retcode"]:
raise CommandExecutionError(
- 'Unable to add PPA \'{0}\'. \'{1}\' exited with '
- 'status {2!s}: \'{3}\' '.format(
- repo[4:],
- cmd,
- out['retcode'],
- out['stderr']
+ "Unable to add PPA '{0}'. '{1}' exited with "
+ "status {2!s}: '{3}' ".format(
+ repo[4:], cmd, out["retcode"], out["stderr"]
)
)
# explicit refresh when a repo is modified.
@@ -2165,77 +2209,79 @@ def mod_repo(repo, saltenv='base', **kwargs):
if not HAS_SOFTWAREPROPERTIES:
_warn_software_properties(repo)
else:
- log.info('Falling back to urllib method for private PPA')
+ log.info("Falling back to urllib method for private PPA")
# fall back to urllib style
try:
- owner_name, ppa_name = repo[4:].split('/', 1)
+ owner_name, ppa_name = repo[4:].split("/", 1)
except ValueError:
raise CommandExecutionError(
- 'Unable to get PPA info from argument. '
+ "Unable to get PPA info from argument. "
'Expected format "/" '
- '(e.g. saltstack/salt) not found. Received '
- '\'{0}\' instead.'.format(repo[4:])
+ "(e.g. saltstack/salt) not found. Received "
+ "'{0}' instead.".format(repo[4:])
)
- dist = __grains__['lsb_distrib_codename']
+ dist = __grains__["lsb_distrib_codename"]
# ppa has a lot of implicit arguments. Make them explicit.
# These will defer to any user-defined variants
- kwargs['dist'] = dist
- ppa_auth = ''
- if 'file' not in kwargs:
- filename = '/etc/apt/sources.list.d/{0}-{1}-{2}.list'
- kwargs['file'] = filename.format(owner_name, ppa_name,
- dist)
+ kwargs["dist"] = dist
+ ppa_auth = ""
+ if "file" not in kwargs:
+ filename = "/etc/apt/sources.list.d/{0}-{1}-{2}.list"
+ kwargs["file"] = filename.format(owner_name, ppa_name, dist)
try:
launchpad_ppa_info = _get_ppa_info_from_launchpad(
- owner_name, ppa_name)
- if 'ppa_auth' not in kwargs:
- kwargs['keyid'] = launchpad_ppa_info[
- 'signing_key_fingerprint']
+ owner_name, ppa_name
+ )
+ if "ppa_auth" not in kwargs:
+ kwargs["keyid"] = launchpad_ppa_info["signing_key_fingerprint"]
else:
- if 'keyid' not in kwargs:
- error_str = 'Private PPAs require a ' \
- 'keyid to be specified: {0}/{1}'
+ if "keyid" not in kwargs:
+ error_str = (
+ "Private PPAs require a "
+ "keyid to be specified: {0}/{1}"
+ )
raise CommandExecutionError(
error_str.format(owner_name, ppa_name)
)
except HTTPError as exc:
raise CommandExecutionError(
- 'Launchpad does not know about {0}/{1}: {2}'.format(
- owner_name, ppa_name, exc)
+ "Launchpad does not know about {0}/{1}: {2}".format(
+ owner_name, ppa_name, exc
+ )
)
except IndexError as exc:
raise CommandExecutionError(
- 'Launchpad knows about {0}/{1} but did not '
- 'return a fingerprint. Please set keyid '
- 'manually: {2}'.format(owner_name, ppa_name, exc)
+ "Launchpad knows about {0}/{1} but did not "
+ "return a fingerprint. Please set keyid "
+ "manually: {2}".format(owner_name, ppa_name, exc)
)
- if 'keyserver' not in kwargs:
- kwargs['keyserver'] = 'keyserver.ubuntu.com'
- if 'ppa_auth' in kwargs:
- if not launchpad_ppa_info['private']:
+ if "keyserver" not in kwargs:
+ kwargs["keyserver"] = "keyserver.ubuntu.com"
+ if "ppa_auth" in kwargs:
+ if not launchpad_ppa_info["private"]:
raise CommandExecutionError(
- 'PPA is not private but auth credentials '
- 'passed: {0}'.format(repo)
+ "PPA is not private but auth credentials "
+ "passed: {0}".format(repo)
)
# assign the new repo format to the "repo" variable
# so we can fall through to the "normal" mechanism
# here.
- if 'ppa_auth' in kwargs:
- ppa_auth = '{0}@'.format(kwargs['ppa_auth'])
- repo = LP_PVT_SRC_FORMAT.format(ppa_auth, owner_name,
- ppa_name, dist)
+ if "ppa_auth" in kwargs:
+ ppa_auth = "{0}@".format(kwargs["ppa_auth"])
+ repo = LP_PVT_SRC_FORMAT.format(
+ ppa_auth, owner_name, ppa_name, dist
+ )
else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
else:
raise CommandExecutionError(
- 'cannot parse "ppa:" style repo definitions: {0}'
- .format(repo)
+ 'cannot parse "ppa:" style repo definitions: {0}'.format(repo)
)
sources = sourceslist.SourcesList()
- if kwargs.get('consolidate', False):
+ if kwargs.get("consolidate", False):
# attempt to de-dup and consolidate all sources
# down to entries in sources.list
# this option makes it easier to keep the sources
@@ -2252,122 +2298,148 @@ def mod_repo(repo, saltenv='base', **kwargs):
repos = [s for s in sources if not s.invalid]
mod_source = None
try:
- repo_type, \
- repo_architectures, \
- repo_uri, \
- repo_dist, \
- repo_comps = _split_repo_str(repo)
+ (
+ repo_type,
+ repo_architectures,
+ repo_uri,
+ repo_dist,
+ repo_comps,
+ ) = _split_repo_str(repo)
except SyntaxError:
raise SyntaxError(
- 'Error: repo \'{0}\' not a well formatted definition'.format(repo)
+ "Error: repo '{0}' not a well formatted definition".format(repo)
)
full_comp_list = set(repo_comps)
- no_proxy = __salt__['config.option']('no_proxy')
+ no_proxy = __salt__["config.option"]("no_proxy")
- if 'keyid' in kwargs:
- keyid = kwargs.pop('keyid', None)
- keyserver = kwargs.pop('keyserver', None)
+ if "keyid" in kwargs:
+ keyid = kwargs.pop("keyid", None)
+ keyserver = kwargs.pop("keyserver", None)
if not keyid or not keyserver:
- error_str = 'both keyserver and keyid options required.'
+ error_str = "both keyserver and keyid options required."
raise NameError(error_str)
if not isinstance(keyid, list):
keyid = [keyid]
for key in keyid:
- if isinstance(key, int): # yaml can make this an int, we need the hex version
+ if isinstance(
+ key, int
+ ): # yaml can make this an int, we need the hex version
key = hex(key)
- cmd = ['apt-key', 'export', key]
- output = __salt__['cmd.run_stdout'](cmd, python_shell=False, **kwargs)
- imported = output.startswith('-----BEGIN PGP')
+ cmd = ["apt-key", "export", key]
+ output = __salt__["cmd.run_stdout"](cmd, python_shell=False, **kwargs)
+ imported = output.startswith("-----BEGIN PGP")
if keyserver:
if not imported:
http_proxy_url = _get_http_proxy_url()
if http_proxy_url and keyserver not in no_proxy:
- cmd = ['apt-key', 'adv', '--batch', '--keyserver-options', 'http-proxy={0}'.format(http_proxy_url),
- '--keyserver', keyserver, '--logger-fd', '1', '--recv-keys', key]
+ cmd = [
+ "apt-key",
+ "adv",
+ "--batch",
+ "--keyserver-options",
+ "http-proxy={0}".format(http_proxy_url),
+ "--keyserver",
+ keyserver,
+ "--logger-fd",
+ "1",
+ "--recv-keys",
+ key,
+ ]
else:
- cmd = ['apt-key', 'adv', '--batch', '--keyserver', keyserver,
- '--logger-fd', '1', '--recv-keys', key]
+ cmd = [
+ "apt-key",
+ "adv",
+ "--batch",
+ "--keyserver",
+ keyserver,
+ "--logger-fd",
+ "1",
+ "--recv-keys",
+ key,
+ ]
ret = _call_apt(cmd, scope=False, **kwargs)
- if ret['retcode'] != 0:
+ if ret["retcode"] != 0:
raise CommandExecutionError(
- 'Error: key retrieval failed: {0}'.format(ret['stdout'])
+ "Error: key retrieval failed: {0}".format(ret["stdout"])
)
- elif 'key_url' in kwargs:
- key_url = kwargs['key_url']
- fn_ = __salt__['cp.cache_file'](key_url, saltenv)
+ elif "key_url" in kwargs:
+ key_url = kwargs["key_url"]
+ fn_ = __salt__["cp.cache_file"](key_url, saltenv)
if not fn_:
+ raise CommandExecutionError("Error: file not found: {0}".format(key_url))
+ cmd = ["apt-key", "add", fn_]
+ out = __salt__["cmd.run_stdout"](cmd, python_shell=False, **kwargs)
+ if not out.upper().startswith("OK"):
raise CommandExecutionError(
- 'Error: file not found: {0}'.format(key_url)
- )
- cmd = ['apt-key', 'add', fn_]
- out = __salt__['cmd.run_stdout'](cmd, python_shell=False, **kwargs)
- if not out.upper().startswith('OK'):
- raise CommandExecutionError(
- 'Error: failed to add key from {0}'.format(key_url)
+ "Error: failed to add key from {0}".format(key_url)
)
- elif 'key_text' in kwargs:
- key_text = kwargs['key_text']
- cmd = ['apt-key', 'add', '-']
- out = __salt__['cmd.run_stdout'](cmd, stdin=key_text,
- python_shell=False, **kwargs)
- if not out.upper().startswith('OK'):
+ elif "key_text" in kwargs:
+ key_text = kwargs["key_text"]
+ cmd = ["apt-key", "add", "-"]
+ out = __salt__["cmd.run_stdout"](
+ cmd, stdin=key_text, python_shell=False, **kwargs
+ )
+ if not out.upper().startswith("OK"):
raise CommandExecutionError(
- 'Error: failed to add key:\n{0}'.format(key_text)
+ "Error: failed to add key:\n{0}".format(key_text)
)
- if 'comps' in kwargs:
- kwargs['comps'] = kwargs['comps'].split(',')
- full_comp_list |= set(kwargs['comps'])
+ if "comps" in kwargs:
+ kwargs["comps"] = kwargs["comps"].split(",")
+ full_comp_list |= set(kwargs["comps"])
else:
- kwargs['comps'] = list(full_comp_list)
+ kwargs["comps"] = list(full_comp_list)
- if 'architectures' in kwargs:
- kwargs['architectures'] = kwargs['architectures'].split(',')
+ if "architectures" in kwargs:
+ kwargs["architectures"] = kwargs["architectures"].split(",")
else:
- kwargs['architectures'] = repo_architectures
+ kwargs["architectures"] = repo_architectures
- if 'disabled' in kwargs:
- kwargs['disabled'] = salt.utils.data.is_true(kwargs['disabled'])
- elif 'enabled' in kwargs:
- kwargs['disabled'] = not salt.utils.data.is_true(kwargs['enabled'])
+ if "disabled" in kwargs:
+ kwargs["disabled"] = salt.utils.data.is_true(kwargs["disabled"])
+ elif "enabled" in kwargs:
+ kwargs["disabled"] = not salt.utils.data.is_true(kwargs["enabled"])
- kw_type = kwargs.get('type')
- kw_dist = kwargs.get('dist')
+ kw_type = kwargs.get("type")
+ kw_dist = kwargs.get("dist")
for source in repos:
# This series of checks will identify the starting source line
# and the resulting source line. The idea here is to ensure
# we are not returning bogus data because the source line
# has already been modified on a previous run.
- repo_matches = source.type == repo_type and source.uri == repo_uri and source.dist == repo_dist
+ repo_matches = (
+ source.type == repo_type
+ and source.uri == repo_uri
+ and source.dist == repo_dist
+ )
kw_matches = source.dist == kw_dist and source.type == kw_type
if repo_matches or kw_matches:
for comp in full_comp_list:
- if comp in getattr(source, 'comps', []):
+ if comp in getattr(source, "comps", []):
mod_source = source
if not source.comps:
mod_source = source
- if kwargs['architectures'] != source.architectures:
+ if kwargs["architectures"] != source.architectures:
mod_source = source
if mod_source:
break
- if 'comments' in kwargs:
- kwargs['comments'] = \
- salt.utils.pkg.deb.combine_comments(kwargs['comments'])
+ if "comments" in kwargs:
+ kwargs["comments"] = salt.utils.pkg.deb.combine_comments(kwargs["comments"])
if not mod_source:
mod_source = sourceslist.SourceEntry(repo)
- if 'comments' in kwargs:
- mod_source.comment = kwargs['comments']
+ if "comments" in kwargs:
+ mod_source.comment = kwargs["comments"]
sources.list.append(mod_source)
- elif 'comments' in kwargs:
- mod_source.comment = kwargs['comments']
+ elif "comments" in kwargs:
+ mod_source.comment = kwargs["comments"]
for key in kwargs:
if key in _MODIFY_OK and hasattr(mod_source, key):
@@ -2378,19 +2450,19 @@ def mod_repo(repo, saltenv='base', **kwargs):
refresh_db()
return {
repo: {
- 'architectures': getattr(mod_source, 'architectures', []),
- 'comps': mod_source.comps,
- 'disabled': mod_source.disabled,
- 'file': mod_source.file,
- 'type': mod_source.type,
- 'uri': mod_source.uri,
- 'line': mod_source.line
+ "architectures": getattr(mod_source, "architectures", []),
+ "comps": mod_source.comps,
+ "disabled": mod_source.disabled,
+ "file": mod_source.file,
+ "type": mod_source.type,
+ "uri": mod_source.uri,
+ "line": mod_source.line,
}
}
def file_list(*packages):
- '''
+ """
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's package database (not
generally recommended).
@@ -2402,12 +2474,12 @@ def file_list(*packages):
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
- '''
- return __salt__['lowpkg.file_list'](*packages)
+ """
+ return __salt__["lowpkg.file_list"](*packages)
def file_dict(*packages):
- '''
+ """
List the files that belong to a package, grouped by package. Not
specifying any packages will return a list of _every_ file on the system's
package database (not generally recommended).
@@ -2419,74 +2491,75 @@ def file_dict(*packages):
salt '*' pkg.file_dict httpd
salt '*' pkg.file_dict httpd postfix
salt '*' pkg.file_dict
- '''
- return __salt__['lowpkg.file_dict'](*packages)
+ """
+ return __salt__["lowpkg.file_dict"](*packages)
def expand_repo_def(**kwargs):
- '''
+ """
Take a repository definition and expand it to the full pkg repository dict
that can be used for comparison. This is a helper function to make
the Debian/Ubuntu apt sources sane for comparison in the pkgrepo states.
This is designed to be called from pkgrepo states and will have little use
being called on the CLI.
- '''
- if 'repo' not in kwargs:
- raise SaltInvocationError('missing \'repo\' argument')
+ """
+ if "repo" not in kwargs:
+ raise SaltInvocationError("missing 'repo' argument")
_check_apt()
sanitized = {}
- repo = salt.utils.pkg.deb.strip_uri(kwargs['repo'])
- if repo.startswith('ppa:') and __grains__['os'] in ('Ubuntu', 'Mint', 'neon'):
- dist = __grains__['lsb_distrib_codename']
- owner_name, ppa_name = repo[4:].split('/', 1)
- if 'ppa_auth' in kwargs:
- auth_info = '{0}@'.format(kwargs['ppa_auth'])
- repo = LP_PVT_SRC_FORMAT.format(auth_info, owner_name, ppa_name,
- dist)
+ repo = salt.utils.pkg.deb.strip_uri(kwargs["repo"])
+ if repo.startswith("ppa:") and __grains__["os"] in ("Ubuntu", "Mint", "neon"):
+ dist = __grains__["lsb_distrib_codename"]
+ owner_name, ppa_name = repo[4:].split("/", 1)
+ if "ppa_auth" in kwargs:
+ auth_info = "{0}@".format(kwargs["ppa_auth"])
+ repo = LP_PVT_SRC_FORMAT.format(auth_info, owner_name, ppa_name, dist)
else:
if HAS_SOFTWAREPROPERTIES:
- if hasattr(softwareproperties.ppa, 'PPAShortcutHandler'):
- repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(dist)[0]
+ if hasattr(softwareproperties.ppa, "PPAShortcutHandler"):
+ repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(dist)[
+ 0
+ ]
else:
repo = softwareproperties.ppa.expand_ppa_line(repo, dist)[0]
else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
- if 'file' not in kwargs:
- filename = '/etc/apt/sources.list.d/{0}-{1}-{2}.list'
- kwargs['file'] = filename.format(owner_name, ppa_name, dist)
+ if "file" not in kwargs:
+ filename = "/etc/apt/sources.list.d/{0}-{1}-{2}.list"
+ kwargs["file"] = filename.format(owner_name, ppa_name, dist)
source_entry = sourceslist.SourceEntry(repo)
- for list_args in ('architectures', 'comps'):
+ for list_args in ("architectures", "comps"):
if list_args in kwargs:
- kwargs[list_args] = kwargs[list_args].split(',')
+ kwargs[list_args] = kwargs[list_args].split(",")
for kwarg in _MODIFY_OK:
if kwarg in kwargs:
setattr(source_entry, kwarg, kwargs[kwarg])
- sanitized['file'] = source_entry.file
- sanitized['comps'] = getattr(source_entry, 'comps', [])
- sanitized['disabled'] = source_entry.disabled
- sanitized['dist'] = source_entry.dist
- sanitized['type'] = source_entry.type
- sanitized['uri'] = source_entry.uri.rstrip('/')
- sanitized['line'] = source_entry.line.strip()
- sanitized['architectures'] = getattr(source_entry, 'architectures', [])
+ sanitized["file"] = source_entry.file
+ sanitized["comps"] = getattr(source_entry, "comps", [])
+ sanitized["disabled"] = source_entry.disabled
+ sanitized["dist"] = source_entry.dist
+ sanitized["type"] = source_entry.type
+ sanitized["uri"] = source_entry.uri.rstrip("/")
+ sanitized["line"] = source_entry.line.strip()
+ sanitized["architectures"] = getattr(source_entry, "architectures", [])
return sanitized
def _parse_selections(dpkgselection):
- '''
+ """
Parses the format from ``dpkg --get-selections`` and return a format that
pkg.get_selections and pkg.set_selections work with.
- '''
+ """
ret = {}
if isinstance(dpkgselection, six.string_types):
- dpkgselection = dpkgselection.split('\n')
+ dpkgselection = dpkgselection.split("\n")
for line in dpkgselection:
if line:
_pkg, _state = line.split()
@@ -2498,7 +2571,7 @@ def _parse_selections(dpkgselection):
def get_selections(pattern=None, state=None):
- '''
+ """
View package state from the dpkg database.
Returns a dict of dicts containing the state, and package names:
@@ -2521,13 +2594,13 @@ def get_selections(pattern=None, state=None):
salt '*' pkg.get_selections 'python-*'
salt '*' pkg.get_selections state=hold
salt '*' pkg.get_selections 'openssh*' state=hold
- '''
+ """
ret = {}
- cmd = ['dpkg', '--get-selections']
- cmd.append(pattern if pattern else '*')
- stdout = __salt__['cmd.run_stdout'](cmd,
- output_loglevel='trace',
- python_shell=False)
+ cmd = ["dpkg", "--get-selections"]
+ cmd.append(pattern if pattern else "*")
+ stdout = __salt__["cmd.run_stdout"](
+ cmd, output_loglevel="trace", python_shell=False
+ )
ret = _parse_selections(stdout)
if state:
return {state: ret.get(state, [])}
@@ -2540,8 +2613,8 @@ def get_selections(pattern=None, state=None):
# above, but override that if explicitly specified
# TODO: handle path to selection file from local fs as well as from salt file
# server
-def set_selections(path=None, selection=None, clear=False, saltenv='base'):
- '''
+def set_selections(path=None, selection=None, clear=False, saltenv="base"):
+ """
Change package state in the dpkg database.
The state can be any one of, documented in ``dpkg(1)``:
@@ -2579,46 +2652,49 @@ def set_selections(path=None, selection=None, clear=False, saltenv='base'):
salt '*' pkg.set_selections selection='{"hold": ["openssh-server", "openssh-client"]}'
salt '*' pkg.set_selections salt://path/to/file
salt '*' pkg.set_selections salt://path/to/file clear=True
- '''
+ """
ret = {}
if not path and not selection:
return ret
if path and selection:
- err = ('The \'selection\' and \'path\' arguments to '
- 'pkg.set_selections are mutually exclusive, and cannot be '
- 'specified together')
+ err = (
+ "The 'selection' and 'path' arguments to "
+ "pkg.set_selections are mutually exclusive, and cannot be "
+ "specified together"
+ )
raise SaltInvocationError(err)
if isinstance(selection, six.string_types):
try:
selection = salt.utils.yaml.safe_load(selection)
- except (salt.utils.yaml.parser.ParserError, salt.utils.yaml.scanner.ScannerError) as exc:
- raise SaltInvocationError(
- 'Improperly-formatted selection: {0}'.format(exc)
- )
+ except (
+ salt.utils.yaml.parser.ParserError,
+ salt.utils.yaml.scanner.ScannerError,
+ ) as exc:
+ raise SaltInvocationError("Improperly-formatted selection: {0}".format(exc))
if path:
- path = __salt__['cp.cache_file'](path, saltenv)
- with salt.utils.files.fopen(path, 'r') as ifile:
- content = [salt.utils.stringutils.to_unicode(x)
- for x in ifile.readlines()]
+ path = __salt__["cp.cache_file"](path, saltenv)
+ with salt.utils.files.fopen(path, "r") as ifile:
+ content = [salt.utils.stringutils.to_unicode(x) for x in ifile.readlines()]
selection = _parse_selections(content)
if selection:
- valid_states = ('install', 'hold', 'deinstall', 'purge')
+ valid_states = ("install", "hold", "deinstall", "purge")
bad_states = [x for x in selection if x not in valid_states]
if bad_states:
raise SaltInvocationError(
- 'Invalid state(s): {0}'.format(', '.join(bad_states))
+ "Invalid state(s): {0}".format(", ".join(bad_states))
)
if clear:
- cmd = ['dpkg', '--clear-selections']
- if not __opts__['test']:
+ cmd = ["dpkg", "--clear-selections"]
+ if not __opts__["test"]:
result = _call_apt(cmd, scope=False)
- if result['retcode'] != 0:
- err = ('Running dpkg --clear-selections failed: '
- '{0}'.format(result['stderr']))
+ if result["retcode"] != 0:
+ err = "Running dpkg --clear-selections failed: " "{0}".format(
+ result["stderr"]
+ )
log.error(err)
raise CommandExecutionError(err)
@@ -2630,30 +2706,26 @@ def set_selections(path=None, selection=None, clear=False, saltenv='base'):
for _pkg in _pkgs:
if _state == sel_revmap.get(_pkg):
continue
- cmd = ['dpkg', '--set-selections']
- cmd_in = '{0} {1}'.format(_pkg, _state)
- if not __opts__['test']:
+ cmd = ["dpkg", "--set-selections"]
+ cmd_in = "{0} {1}".format(_pkg, _state)
+ if not __opts__["test"]:
result = _call_apt(cmd, scope=False, stdin=cmd_in)
- if result['retcode'] != 0:
- log.error(
- 'failed to set state %s for package %s',
- _state, _pkg
- )
+ if result["retcode"] != 0:
+ log.error("failed to set state %s for package %s", _state, _pkg)
else:
- ret[_pkg] = {'old': sel_revmap.get(_pkg),
- 'new': _state}
+ ret[_pkg] = {"old": sel_revmap.get(_pkg), "new": _state}
return ret
def _resolve_deps(name, pkgs, **kwargs):
- '''
+ """
Installs missing dependencies and marks them as auto installed so they
are removed when no more manually installed packages depend on them.
.. versionadded:: 2014.7.0
:depends: - python-apt module
- '''
+ """
missing_deps = []
for pkg_file in pkgs:
deb = apt.debfile.DebPackage(filename=pkg_file, cache=apt.Cache())
@@ -2661,37 +2733,29 @@ def _resolve_deps(name, pkgs, **kwargs):
missing_deps.extend(deb.missing_deps)
if missing_deps:
- cmd = ['apt-get', '-q', '-y']
- cmd = cmd + ['-o', 'DPkg::Options::=--force-confold']
- cmd = cmd + ['-o', 'DPkg::Options::=--force-confdef']
- cmd.append('install')
+ cmd = ["apt-get", "-q", "-y"]
+ cmd = cmd + ["-o", "DPkg::Options::=--force-confold"]
+ cmd = cmd + ["-o", "DPkg::Options::=--force-confdef"]
+ cmd.append("install")
cmd.extend(missing_deps)
- ret = __salt__['cmd.retcode'](
- cmd,
- env=kwargs.get('env'),
- python_shell=False
- )
+ ret = __salt__["cmd.retcode"](cmd, env=kwargs.get("env"), python_shell=False)
if ret != 0:
raise CommandExecutionError(
- 'Error: unable to resolve dependencies for: {0}'.format(name)
+ "Error: unable to resolve dependencies for: {0}".format(name)
)
else:
try:
- cmd = ['apt-mark', 'auto'] + missing_deps
- __salt__['cmd.run'](
- cmd,
- env=kwargs.get('env'),
- python_shell=False
- )
+ cmd = ["apt-mark", "auto"] + missing_deps
+ __salt__["cmd.run"](cmd, env=kwargs.get("env"), python_shell=False)
except MinionError as exc:
raise CommandExecutionError(exc)
return
def owner(*paths):
- '''
+ """
.. versionadded:: 2014.7.0
Return the name of the package that owns the file. Multiple file paths can
@@ -2708,25 +2772,25 @@ def owner(*paths):
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /usr/bin/basename
- '''
+ """
if not paths:
- return ''
+ return ""
ret = {}
for path in paths:
- cmd = ['dpkg', '-S', path]
- output = __salt__['cmd.run_stdout'](cmd,
- output_loglevel='trace',
- python_shell=False)
- ret[path] = output.split(':')[0]
- if 'no path found' in ret[path].lower():
- ret[path] = ''
+ cmd = ["dpkg", "-S", path]
+ output = __salt__["cmd.run_stdout"](
+ cmd, output_loglevel="trace", python_shell=False
+ )
+ ret[path] = output.split(":")[0]
+ if "no path found" in ret[path].lower():
+ ret[path] = ""
if len(ret) == 1:
return next(six.itervalues(ret))
return ret
def show(*names, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Runs an ``apt-cache show`` on the passed package names, and returns the
@@ -2752,14 +2816,14 @@ def show(*names, **kwargs):
salt myminion pkg.show gawk
salt myminion pkg.show 'nginx-*'
salt myminion pkg.show 'nginx-*' filter=description,provides
- '''
+ """
kwargs = salt.utils.args.clean_kwargs(**kwargs)
- refresh = kwargs.pop('refresh', False)
+ refresh = kwargs.pop("refresh", False)
filter_ = salt.utils.args.split_input(
- kwargs.pop('filter', []),
+ kwargs.pop("filter", []),
lambda x: six.text_type(x)
- if not isinstance(x, six.string_types)
- else x.lower()
+ if not isinstance(x, six.string_types)
+ else x.lower(),
)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
@@ -2770,26 +2834,25 @@ def show(*names, **kwargs):
if not names:
return {}
- result = _call_apt(['apt-cache', 'show'] + list(names), scope=False)
+ result = _call_apt(["apt-cache", "show"] + list(names), scope=False)
def _add(ret, pkginfo):
- name = pkginfo.pop('Package', None)
- version = pkginfo.pop('Version', None)
+ name = pkginfo.pop("Package", None)
+ version = pkginfo.pop("Version", None)
if name is not None and version is not None:
ret.setdefault(name, {}).setdefault(version, {}).update(pkginfo)
def _check_filter(key):
key = key.lower()
- return True if key in ('package', 'version') or not filter_ \
- else key in filter_
+ return True if key in ("package", "version") or not filter_ else key in filter_
ret = {}
pkginfo = {}
- for line in salt.utils.itertools.split(result['stdout'], '\n'):
+ for line in salt.utils.itertools.split(result["stdout"], "\n"):
line = line.strip()
if line:
try:
- key, val = [x.strip() for x in line.split(':', 1)]
+ key, val = [x.strip() for x in line.split(":", 1)]
except ValueError:
pass
else:
@@ -2810,7 +2873,7 @@ def show(*names, **kwargs):
def info_installed(*names, **kwargs):
- '''
+ """
Return the information of the named package(s) installed on the system.
.. versionadded:: 2015.8.1
@@ -2831,31 +2894,31 @@ def info_installed(*names, **kwargs):
salt '*' pkg.info_installed
salt '*' pkg.info_installed ...
salt '*' pkg.info_installed failhard=false
- '''
+ """
kwargs = salt.utils.args.clean_kwargs(**kwargs)
- failhard = kwargs.pop('failhard', True)
+ failhard = kwargs.pop("failhard", True)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
ret = dict()
- for pkg_name, pkg_nfo in __salt__['lowpkg.info'](*names, failhard=failhard).items():
+ for pkg_name, pkg_nfo in __salt__["lowpkg.info"](*names, failhard=failhard).items():
t_nfo = dict()
- if pkg_nfo.get('status', 'ii')[1] != 'i':
- continue # return only packages that are really installed
+ if pkg_nfo.get("status", "ii")[1] != "i":
+ continue # return only packages that are really installed
# Translate dpkg-specific keys to a common structure
for key, value in pkg_nfo.items():
- if key == 'package':
- t_nfo['name'] = value
- elif key == 'origin':
- t_nfo['vendor'] = value
- elif key == 'section':
- t_nfo['group'] = value
- elif key == 'maintainer':
- t_nfo['packager'] = value
- elif key == 'homepage':
- t_nfo['url'] = value
- elif key == 'status':
- continue # only installed pkgs are returned, no need for status
+ if key == "package":
+ t_nfo["name"] = value
+ elif key == "origin":
+ t_nfo["vendor"] = value
+ elif key == "section":
+ t_nfo["group"] = value
+ elif key == "maintainer":
+ t_nfo["packager"] = value
+ elif key == "homepage":
+ t_nfo["url"] = value
+ elif key == "status":
+ continue # only installed pkgs are returned, no need for status
else:
t_nfo[key] = value
@@ -2865,38 +2928,32 @@ def info_installed(*names, **kwargs):
def _get_http_proxy_url():
- '''
+ """
Returns the http_proxy_url if proxy_username, proxy_password, proxy_host, and proxy_port
config values are set.
Returns a string.
- '''
- http_proxy_url = ''
- host = __salt__['config.option']('proxy_host')
- port = __salt__['config.option']('proxy_port')
- username = __salt__['config.option']('proxy_username')
- password = __salt__['config.option']('proxy_password')
+ """
+ http_proxy_url = ""
+ host = __salt__["config.option"]("proxy_host")
+ port = __salt__["config.option"]("proxy_port")
+ username = __salt__["config.option"]("proxy_username")
+ password = __salt__["config.option"]("proxy_password")
# Set http_proxy_url for use in various internet facing actions...eg apt-key adv
if host and port:
if username and password:
- http_proxy_url = 'http://{0}:{1}@{2}:{3}'.format(
- username,
- password,
- host,
- port
+ http_proxy_url = "http://{0}:{1}@{2}:{3}".format(
+ username, password, host, port
)
else:
- http_proxy_url = 'http://{0}:{1}'.format(
- host,
- port
- )
+ http_proxy_url = "http://{0}:{1}".format(host, port)
return http_proxy_url
def list_downloaded(root=None, **kwargs):
- '''
+ """
.. versionadded:: 3000?
List prefetched packages downloaded by apt in the local disk.
@@ -2909,21 +2966,23 @@ def list_downloaded(root=None, **kwargs):
.. code-block:: bash
salt '*' pkg.list_downloaded
- '''
- CACHE_DIR = '/var/cache/apt'
+ """
+ CACHE_DIR = "/var/cache/apt"
if root:
CACHE_DIR = os.path.join(root, os.path.relpath(CACHE_DIR, os.path.sep))
ret = {}
for root, dirnames, filenames in salt.utils.path.os_walk(CACHE_DIR):
- for filename in fnmatch.filter(filenames, '*.deb'):
+ for filename in fnmatch.filter(filenames, "*.deb"):
package_path = os.path.join(root, filename)
- pkg_info = __salt__['lowpkg.bin_pkg_info'](package_path)
+ pkg_info = __salt__["lowpkg.bin_pkg_info"](package_path)
pkg_timestamp = int(os.path.getctime(package_path))
- ret.setdefault(pkg_info['name'], {})[pkg_info['version']] = {
- 'path': package_path,
- 'size': os.path.getsize(package_path),
- 'creation_date_time_t': pkg_timestamp,
- 'creation_date_time': datetime.datetime.utcfromtimestamp(pkg_timestamp).isoformat(),
+ ret.setdefault(pkg_info["name"], {})[pkg_info["version"]] = {
+ "path": package_path,
+ "size": os.path.getsize(package_path),
+ "creation_date_time_t": pkg_timestamp,
+ "creation_date_time": datetime.datetime.utcfromtimestamp(
+ pkg_timestamp
+ ).isoformat(),
}
return ret
diff --git a/salt/modules/archive.py b/salt/modules/archive.py
index 483aadb6ac0..86a112c109a 100644
--- a/salt/modules/archive.py
+++ b/salt/modules/archive.py
@@ -1,10 +1,11 @@
# -*- coding: utf-8 -*-
-'''
+"""
A module to wrap (non-Windows) archive calls
.. versionadded:: 2014.1.0
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
+
import contextlib # For < 2.7 compat
import copy
import errno
@@ -17,22 +18,7 @@ import stat
import subprocess
import tarfile
import zipfile
-try:
- from shlex import quote as _quote # pylint: disable=E0611
-except ImportError:
- from pipes import quote as _quote
-# Import third party libs
-from salt.ext import six
-from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module
-try:
- import rarfile
- HAS_RARFILE = True
-except ImportError:
- HAS_RARFILE = False
-
-# Import salt libs
-from salt.exceptions import SaltInvocationError, CommandExecutionError
import salt.utils.decorators
import salt.utils.decorators.path
import salt.utils.files
@@ -41,29 +27,49 @@ import salt.utils.platform
import salt.utils.stringutils
import salt.utils.templates
+# Import salt libs
+from salt.exceptions import CommandExecutionError, SaltInvocationError
+
+# Import third party libs
+from salt.ext import six
+from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
+
+try:
+ from shlex import quote as _quote # pylint: disable=E0611
+except ImportError:
+ from pipes import quote as _quote
+
+
+try:
+ import rarfile
+
+ HAS_RARFILE = True
+except ImportError:
+ HAS_RARFILE = False
+
+
if salt.utils.platform.is_windows():
import win32file
# TODO: Check that the passed arguments are correct
# Don't shadow built-in's.
-__func_alias__ = {
- 'zip_': 'zip',
- 'list_': 'list'
-}
+__func_alias__ = {"zip_": "zip", "list_": "list"}
log = logging.getLogger(__name__)
-def list_(name,
- archive_format=None,
- options=None,
- strip_components=None,
- clean=False,
- verbose=False,
- saltenv='base',
- source_hash=None):
- '''
+def list_(
+ name,
+ archive_format=None,
+ options=None,
+ strip_components=None,
+ clean=False,
+ verbose=False,
+ saltenv="base",
+ source_hash=None,
+):
+ """
.. versionadded:: 2016.11.0
.. versionchanged:: 2016.11.2
The rarfile_ Python module is now supported for listing the contents of
@@ -172,25 +178,28 @@ def list_(name,
salt '*' archive.list https://domain.tld/myfile.zip
salt '*' archive.list https://domain.tld/myfile.zip source_hash=f1d2d2f924e986ac86fdf7b36c94bcdf32beec15
salt '*' archive.list ftp://10.1.2.3/foo.rar
- '''
+ """
+
def _list_tar(name, cached, decompress_cmd, failhard=False):
- '''
+ """
List the contents of a tar archive.
- '''
+ """
dirs = []
files = []
links = []
try:
- open_kwargs = {'name': cached} \
- if not isinstance(cached, subprocess.Popen) \
- else {'fileobj': cached.stdout, 'mode': 'r|'}
+ open_kwargs = (
+ {"name": cached}
+ if not isinstance(cached, subprocess.Popen)
+ else {"fileobj": cached.stdout, "mode": "r|"}
+ )
with contextlib.closing(tarfile.open(**open_kwargs)) as tar_archive:
for member in tar_archive.getmembers():
_member = salt.utils.data.decode(member.name)
if member.issym():
links.append(_member)
elif member.isdir():
- dirs.append(_member + '/')
+ dirs.append(_member + "/")
else:
files.append(_member)
return dirs, files, links
@@ -201,49 +210,55 @@ def list_(name,
stderr = cached.communicate()[1]
if cached.returncode != 0:
raise CommandExecutionError(
- 'Failed to decompress {0}'.format(name),
- info={'error': stderr}
+ "Failed to decompress {0}".format(name),
+ info={"error": stderr},
)
else:
- if not salt.utils.path.which('tar'):
- raise CommandExecutionError('\'tar\' command not available')
+ if not salt.utils.path.which("tar"):
+ raise CommandExecutionError("'tar' command not available")
if decompress_cmd is not None:
# Guard against shell injection
try:
- decompress_cmd = ' '.join(
+ decompress_cmd = " ".join(
[_quote(x) for x in shlex.split(decompress_cmd)]
)
except AttributeError:
- raise CommandExecutionError('Invalid CLI options')
+ raise CommandExecutionError("Invalid CLI options")
else:
- if salt.utils.path.which('xz') \
- and __salt__['cmd.retcode'](['xz', '-t', cached],
- python_shell=False,
- ignore_retcode=True) == 0:
- decompress_cmd = 'xz --decompress --stdout'
+ if (
+ salt.utils.path.which("xz")
+ and __salt__["cmd.retcode"](
+ ["xz", "-t", cached],
+ python_shell=False,
+ ignore_retcode=True,
+ )
+ == 0
+ ):
+ decompress_cmd = "xz --decompress --stdout"
if decompress_cmd:
decompressed = subprocess.Popen(
- '{0} {1}'.format(decompress_cmd, _quote(cached)),
+ "{0} {1}".format(decompress_cmd, _quote(cached)),
shell=True,
stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ stderr=subprocess.PIPE,
+ )
return _list_tar(name, decompressed, None, True)
raise CommandExecutionError(
- 'Unable to list contents of {0}. If this is an XZ-compressed tar '
- 'archive, install XZ Utils to enable listing its contents. If it '
- 'is compressed using something other than XZ, it may be necessary '
- 'to specify CLI options to decompress the archive. See the '
- 'documentation for details.'.format(name)
+ "Unable to list contents of {0}. If this is an XZ-compressed tar "
+ "archive, install XZ Utils to enable listing its contents. If it "
+ "is compressed using something other than XZ, it may be necessary "
+ "to specify CLI options to decompress the archive. See the "
+ "documentation for details.".format(name)
)
def _list_zip(name, cached):
- '''
+ """
List the contents of a zip archive.
Password-protected ZIP archives can still be listed by zipfile, so
there is no reason to invoke the unzip command.
- '''
+ """
dirs = set()
files = []
links = []
@@ -252,7 +267,7 @@ def list_(name,
for member in zip_archive.infolist():
path = member.filename
if salt.utils.platform.is_windows():
- if path.endswith('/'):
+ if path.endswith("/"):
# zipfile.ZipInfo objects on windows use forward
# slash at end of the directory name.
dirs.add(path)
@@ -272,58 +287,57 @@ def list_(name,
# ZIP files created on Windows do not add entries
# to the archive for directories. So, we'll need to
# manually add them.
- dirname = ''.join(path.rpartition('/')[:2])
+ dirname = "".join(path.rpartition("/")[:2])
if dirname:
dirs.add(dirname)
if dirname in files:
files.remove(dirname)
return list(dirs), files, links
except zipfile.BadZipfile:
- raise CommandExecutionError('{0} is not a ZIP file'.format(name))
+ raise CommandExecutionError("{0} is not a ZIP file".format(name))
def _list_rar(name, cached):
- '''
+ """
List the contents of a rar archive.
- '''
+ """
dirs = []
files = []
if HAS_RARFILE:
with rarfile.RarFile(cached) as rf:
for member in rf.infolist():
- path = member.filename.replace('\\', '/')
+ path = member.filename.replace("\\", "/")
if member.isdir():
- dirs.append(path + '/')
+ dirs.append(path + "/")
else:
files.append(path)
else:
- if not salt.utils.path.which('rar'):
+ if not salt.utils.path.which("rar"):
raise CommandExecutionError(
- 'rar command not available, is it installed?'
+ "rar command not available, is it installed?"
)
- output = __salt__['cmd.run'](
- ['rar', 'lt', name],
- python_shell=False,
- ignore_retcode=False)
- matches = re.findall(r'Name:\s*([^\n]+)\s*Type:\s*([^\n]+)', output)
+ output = __salt__["cmd.run"](
+ ["rar", "lt", name], python_shell=False, ignore_retcode=False
+ )
+ matches = re.findall(r"Name:\s*([^\n]+)\s*Type:\s*([^\n]+)", output)
for path, type_ in matches:
- if type_ == 'Directory':
- dirs.append(path + '/')
+ if type_ == "Directory":
+ dirs.append(path + "/")
else:
files.append(path)
if not dirs and not files:
raise CommandExecutionError(
- 'Failed to list {0}, is it a rar file? If so, the '
- 'installed version of rar may be too old to list data in '
- 'a parsable format. Installing the rarfile Python module '
- 'may be an easier workaround if newer rar is not readily '
- 'available.'.format(name),
- info={'error': output}
+ "Failed to list {0}, is it a rar file? If so, the "
+ "installed version of rar may be too old to list data in "
+ "a parsable format. Installing the rarfile Python module "
+ "may be an easier workaround if newer rar is not readily "
+ "available.".format(name),
+ info={"error": output},
)
return dirs, files, []
- cached = __salt__['cp.cache_file'](name, saltenv, source_hash=source_hash)
+ cached = __salt__["cp.cache_file"](name, saltenv, source_hash=source_hash)
if not cached:
- raise CommandExecutionError('Failed to cache {0}'.format(name))
+ raise CommandExecutionError("Failed to cache {0}".format(name))
try:
if strip_components:
@@ -334,23 +348,23 @@ def list_(name,
if strip_components <= 0:
raise CommandExecutionError(
- '\'strip_components\' must be a positive integer'
+ "'strip_components' must be a positive integer"
)
parsed = _urlparse(name)
path = parsed.path or parsed.netloc
def _unsupported_format(archive_format):
- '''
+ """
Raise the proper exception message for the given archive format.
- '''
+ """
if archive_format is None:
raise CommandExecutionError(
- 'Unable to guess archive format, please pass an '
- '\'archive_format\' argument.'
+ "Unable to guess archive format, please pass an "
+ "'archive_format' argument."
)
raise CommandExecutionError(
- 'Unsupported archive format \'{0}\''.format(archive_format)
+ "Unsupported archive format '{0}'".format(archive_format)
)
if not archive_format:
@@ -359,36 +373,34 @@ def list_(name,
_unsupported_format(archive_format)
archive_format = guessed_format
- func = locals().get('_list_' + archive_format)
- if not hasattr(func, '__call__'):
+ func = locals().get("_list_" + archive_format)
+ if not hasattr(func, "__call__"):
_unsupported_format(archive_format)
- args = (options,) if archive_format == 'tar' else ()
+ args = (options,) if archive_format == "tar" else ()
try:
dirs, files, links = func(name, cached, *args)
except (IOError, OSError) as exc:
raise CommandExecutionError(
- 'Failed to list contents of {0}: {1}'.format(
- name, exc.__str__()
- )
+ "Failed to list contents of {0}: {1}".format(name, exc.__str__())
)
except CommandExecutionError as exc:
raise
except Exception as exc: # pylint: disable=broad-except
raise CommandExecutionError(
- 'Uncaught exception \'{0}\' when listing contents of {1}'
- .format(exc, name)
+ "Uncaught exception '{0}' when listing contents of {1}".format(
+ exc, name
+ )
)
if clean:
try:
os.remove(cached)
- log.debug('Cleaned cached archive %s', cached)
+ log.debug("Cleaned cached archive %s", cached)
except OSError as exc:
if exc.errno != errno.ENOENT:
log.warning(
- 'Failed to clean cached archive %s: %s',
- cached, exc.__str__()
+ "Failed to clean cached archive %s: %s", cached, exc.__str__()
)
if strip_components:
@@ -398,28 +410,28 @@ def list_(name,
# Strip off the specified number of directory
# boundaries, and grab what comes after the last
# stripped path separator.
- item[index] = item[index].split(
- os.sep, strip_components)[strip_components]
+ item[index] = item[index].split(os.sep, strip_components)[
+ strip_components
+ ]
except IndexError:
# Path is excluded by strip_components because it is not
# deep enough. Set this to an empty string so it can
# be removed in the generator expression below.
- item[index] = ''
+ item[index] = ""
# Remove all paths which were excluded
item[:] = (x for x in item if x)
item.sort()
if verbose:
- ret = {'dirs': sorted(salt.utils.data.decode_list(dirs)),
- 'files': sorted(salt.utils.data.decode_list(files)),
- 'links': sorted(salt.utils.data.decode_list(links))}
- ret['top_level_dirs'] = [x for x in ret['dirs']
- if x.count('/') == 1]
- ret['top_level_files'] = [x for x in ret['files']
- if x.count('/') == 0]
- ret['top_level_links'] = [x for x in ret['links']
- if x.count('/') == 0]
+ ret = {
+ "dirs": sorted(salt.utils.data.decode_list(dirs)),
+ "files": sorted(salt.utils.data.decode_list(files)),
+ "links": sorted(salt.utils.data.decode_list(links)),
+ }
+ ret["top_level_dirs"] = [x for x in ret["dirs"] if x.count("/") == 1]
+ ret["top_level_files"] = [x for x in ret["files"] if x.count("/") == 0]
+ ret["top_level_links"] = [x for x in ret["links"] if x.count("/") == 0]
else:
ret = sorted(dirs + files + links)
return ret
@@ -428,19 +440,19 @@ def list_(name,
# Reraise with cache path in the error so that the user can examine the
# cached archive for troubleshooting purposes.
info = exc.info or {}
- info['archive location'] = cached
+ info["archive location"] = cached
raise CommandExecutionError(exc.error, info=info)
-_glob_wildcards = re.compile('[*?[]')
+_glob_wildcards = re.compile("[*?[]")
def _glob(pathname):
- '''
+ """
In case ``pathname`` contains glob wildcards, performs expansion and returns
the possibly empty list of matching pathnames. Otherwise returns a list that
contains only ``pathname`` itself.
- '''
+ """
if _glob_wildcards.search(pathname) is None:
return [pathname]
else:
@@ -448,24 +460,21 @@ def _glob(pathname):
def _expand_sources(sources):
- '''
+ """
Expands a user-provided specification of source files into a list of paths.
- '''
+ """
if sources is None:
return []
if isinstance(sources, six.string_types):
- sources = [x.strip() for x in sources.split(',')]
+ sources = [x.strip() for x in sources.split(",")]
elif isinstance(sources, (float, six.integer_types)):
sources = [six.text_type(sources)]
- return [path
- for source in sources
- for path in _glob(source)]
+ return [path for source in sources for path in _glob(source)]
-@salt.utils.decorators.path.which('tar')
-def tar(options, tarfile, sources=None, dest=None,
- cwd=None, template=None, runas=None):
- '''
+@salt.utils.decorators.path.which("tar")
+def tar(options, tarfile, sources=None, dest=None, cwd=None, template=None, runas=None):
+ """
.. note::
This function has changed for version 0.17.0. In prior versions, the
@@ -527,32 +536,30 @@ def tar(options, tarfile, sources=None, dest=None,
salt '*' archive.tar cjvf /tmp/tarfile.tar.bz2 '/tmp/file_*'
# Unpack a tarfile
salt '*' archive.tar xf foo.tar dest=/target/directory
- '''
+ """
if not options:
# Catch instances were people pass an empty string for the "options"
# argument. Someone would have to be really silly to do this, but we
# should at least let them know of their silliness.
- raise SaltInvocationError('Tar options can not be empty')
+ raise SaltInvocationError("Tar options can not be empty")
- cmd = ['tar']
+ cmd = ["tar"]
if options:
cmd.extend(options.split())
- cmd.extend(['{0}'.format(tarfile)])
+ cmd.extend(["{0}".format(tarfile)])
cmd.extend(_expand_sources(sources))
if dest:
- cmd.extend(['-C', '{0}'.format(dest)])
+ cmd.extend(["-C", "{0}".format(dest)])
- return __salt__['cmd.run'](cmd,
- cwd=cwd,
- template=template,
- runas=runas,
- python_shell=False).splitlines()
+ return __salt__["cmd.run"](
+ cmd, cwd=cwd, template=template, runas=runas, python_shell=False
+ ).splitlines()
-@salt.utils.decorators.path.which('gzip')
+@salt.utils.decorators.path.which("gzip")
def gzip(sourcefile, template=None, runas=None, options=None):
- '''
+ """
Uses the gzip command to create gzip files
template : None
@@ -578,21 +585,20 @@ def gzip(sourcefile, template=None, runas=None, options=None):
# Create /tmp/sourcefile.txt.gz
salt '*' archive.gzip /tmp/sourcefile.txt
salt '*' archive.gzip /tmp/sourcefile.txt options='-9 --verbose'
- '''
- cmd = ['gzip']
+ """
+ cmd = ["gzip"]
if options:
cmd.append(options)
- cmd.append('{0}'.format(sourcefile))
+ cmd.append("{0}".format(sourcefile))
- return __salt__['cmd.run'](cmd,
- template=template,
- runas=runas,
- python_shell=False).splitlines()
+ return __salt__["cmd.run"](
+ cmd, template=template, runas=runas, python_shell=False
+ ).splitlines()
-@salt.utils.decorators.path.which('gunzip')
+@salt.utils.decorators.path.which("gunzip")
def gunzip(gzipfile, template=None, runas=None, options=None):
- '''
+ """
Uses the gunzip command to unpack gzip files
template : None
@@ -618,21 +624,20 @@ def gunzip(gzipfile, template=None, runas=None, options=None):
# Create /tmp/sourcefile.txt
salt '*' archive.gunzip /tmp/sourcefile.txt.gz
salt '*' archive.gunzip /tmp/sourcefile.txt options='--verbose'
- '''
- cmd = ['gunzip']
+ """
+ cmd = ["gunzip"]
if options:
cmd.append(options)
- cmd.append('{0}'.format(gzipfile))
+ cmd.append("{0}".format(gzipfile))
- return __salt__['cmd.run'](cmd,
- template=template,
- runas=runas,
- python_shell=False).splitlines()
+ return __salt__["cmd.run"](
+ cmd, template=template, runas=runas, python_shell=False
+ ).splitlines()
-@salt.utils.decorators.path.which('zip')
+@salt.utils.decorators.path.which("zip")
def cmd_zip(zip_file, sources, template=None, cwd=None, runas=None):
- '''
+ """
.. versionadded:: 2015.5.0
In versions 2014.7.x and earlier, this function was known as
``archive.zip``.
@@ -688,20 +693,18 @@ def cmd_zip(zip_file, sources, template=None, cwd=None, runas=None):
salt '*' archive.cmd_zip /tmp/zipfile.zip /tmp/sourcefile1,/tmp/sourcefile2
# Globbing for sources (2017.7.0 and later)
salt '*' archive.cmd_zip /tmp/zipfile.zip '/tmp/sourcefile*'
- '''
- cmd = ['zip', '-r']
- cmd.append('{0}'.format(zip_file))
+ """
+ cmd = ["zip", "-r"]
+ cmd.append("{0}".format(zip_file))
cmd.extend(_expand_sources(sources))
- return __salt__['cmd.run'](cmd,
- cwd=cwd,
- template=template,
- runas=runas,
- python_shell=False).splitlines()
+ return __salt__["cmd.run"](
+ cmd, cwd=cwd, template=template, runas=runas, python_shell=False
+ ).splitlines()
-@salt.utils.decorators.depends('zipfile', fallback_function=cmd_zip)
+@salt.utils.decorators.depends("zipfile", fallback_function=cmd_zip)
def zip_(zip_file, sources, template=None, cwd=None, runas=None, zip64=False):
- '''
+ """
Uses the ``zipfile`` Python module to create zip files
.. versionchanged:: 2015.5.0
@@ -761,15 +764,13 @@ def zip_(zip_file, sources, template=None, cwd=None, runas=None, zip64=False):
salt '*' archive.zip /tmp/zipfile.zip /tmp/sourcefile1,/tmp/sourcefile2
# Globbing for sources (2017.7.0 and later)
salt '*' archive.zip /tmp/zipfile.zip '/tmp/sourcefile*'
- '''
+ """
if runas:
euid = os.geteuid()
egid = os.getegid()
- uinfo = __salt__['user.info'](runas)
+ uinfo = __salt__["user.info"](runas)
if not uinfo:
- raise SaltInvocationError(
- 'User \'{0}\' does not exist'.format(runas)
- )
+ raise SaltInvocationError("User '{0}' does not exist".format(runas))
zip_file, sources = _render_filenames(zip_file, sources, None, template)
sources = _expand_sources(sources)
@@ -777,35 +778,35 @@ def zip_(zip_file, sources, template=None, cwd=None, runas=None, zip64=False):
if not cwd:
for src in sources:
if not os.path.isabs(src):
- raise SaltInvocationError(
- 'Relative paths require the \'cwd\' parameter'
- )
+ raise SaltInvocationError("Relative paths require the 'cwd' parameter")
else:
- err_msg = 'cwd must be absolute'
+ err_msg = "cwd must be absolute"
try:
if not os.path.isabs(cwd):
raise SaltInvocationError(err_msg)
except AttributeError:
raise SaltInvocationError(err_msg)
- if runas and (euid != uinfo['uid'] or egid != uinfo['gid']):
+ if runas and (euid != uinfo["uid"] or egid != uinfo["gid"]):
# Change the egid first, as changing it after the euid will fail
# if the runas user is non-privileged.
- os.setegid(uinfo['gid'])
- os.seteuid(uinfo['uid'])
+ os.setegid(uinfo["gid"])
+ os.seteuid(uinfo["uid"])
try:
exc = None
archived_files = []
- with contextlib.closing(zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED, zip64)) as zfile:
+ with contextlib.closing(
+ zipfile.ZipFile(zip_file, "w", zipfile.ZIP_DEFLATED, zip64)
+ ) as zfile:
for src in sources:
if cwd:
src = os.path.join(cwd, src)
if os.path.exists(src):
if os.path.isabs(src):
- rel_root = '/'
+ rel_root = "/"
else:
- rel_root = cwd if cwd is not None else '/'
+ rel_root = cwd if cwd is not None else "/"
if os.path.isdir(src):
for dir_name, sub_dirs, files in salt.utils.path.os_walk(src):
if cwd and dir_name.startswith(cwd):
@@ -813,7 +814,7 @@ def zip_(zip_file, sources, template=None, cwd=None, runas=None, zip64=False):
else:
arc_dir = os.path.relpath(dir_name, rel_root)
if arc_dir:
- archived_files.append(arc_dir + '/')
+ archived_files.append(arc_dir + "/")
zfile.write(dir_name, arc_dir)
for filename in files:
abs_name = os.path.join(dir_name, filename)
@@ -839,27 +840,29 @@ def zip_(zip_file, sources, template=None, cwd=None, runas=None, zip64=False):
# permission errors in writing to minion log.
if exc == zipfile.LargeZipFile:
raise CommandExecutionError(
- 'Resulting zip file too large, would require ZIP64 support'
- 'which has not been enabled. Rerun command with zip64=True'
+ "Resulting zip file too large, would require ZIP64 support"
+ "which has not been enabled. Rerun command with zip64=True"
)
else:
raise CommandExecutionError(
- 'Exception encountered creating zipfile: {0}'.format(exc)
+ "Exception encountered creating zipfile: {0}".format(exc)
)
return archived_files
-@salt.utils.decorators.path.which('unzip')
-def cmd_unzip(zip_file,
- dest,
- excludes=None,
- options=None,
- template=None,
- runas=None,
- trim_output=False,
- password=None):
- '''
+@salt.utils.decorators.path.which("unzip")
+def cmd_unzip(
+ zip_file,
+ dest,
+ excludes=None,
+ options=None,
+ template=None,
+ runas=None,
+ trim_output=False,
+ password=None,
+):
+ """
.. versionadded:: 2015.5.0
In versions 2014.7.x and earlier, this function was known as
``archive.unzip``.
@@ -926,47 +929,50 @@ def cmd_unzip(zip_file,
.. code-block:: bash
salt '*' archive.cmd_unzip /tmp/zipfile.zip /home/strongbad/ excludes=file_1,file_2
- '''
+ """
if isinstance(excludes, six.string_types):
- excludes = [x.strip() for x in excludes.split(',')]
+ excludes = [x.strip() for x in excludes.split(",")]
elif isinstance(excludes, (float, six.integer_types)):
excludes = [six.text_type(excludes)]
- cmd = ['unzip']
+ cmd = ["unzip"]
if password:
- cmd.extend(['-P', password])
+ cmd.extend(["-P", password])
if options:
cmd.extend(shlex.split(options))
- cmd.extend(['{0}'.format(zip_file), '-d', '{0}'.format(dest)])
+ cmd.extend(["{0}".format(zip_file), "-d", "{0}".format(dest)])
if excludes is not None:
- cmd.append('-x')
+ cmd.append("-x")
cmd.extend(excludes)
- result = __salt__['cmd.run_all'](
+ result = __salt__["cmd.run_all"](
cmd,
template=template,
runas=runas,
python_shell=False,
redirect_stderr=True,
- output_loglevel='quiet' if password else 'debug')
+ output_loglevel="quiet" if password else "debug",
+ )
- if result['retcode'] != 0:
- raise CommandExecutionError(result['stdout'])
+ if result["retcode"] != 0:
+ raise CommandExecutionError(result["stdout"])
- return _trim_files(result['stdout'].splitlines(), trim_output)
+ return _trim_files(result["stdout"].splitlines(), trim_output)
-def unzip(zip_file,
- dest,
- excludes=None,
- options=None,
- template=None,
- runas=None,
- trim_output=False,
- password=None,
- extract_perms=True):
- '''
+def unzip(
+ zip_file,
+ dest,
+ excludes=None,
+ options=None,
+ template=None,
+ runas=None,
+ trim_output=False,
+ password=None,
+ extract_perms=True,
+):
+ """
Uses the ``zipfile`` Python module to unpack zip files
.. versionchanged:: 2015.5.0
@@ -1044,25 +1050,23 @@ def unzip(zip_file,
.. code-block:: bash
salt '*' archive.unzip /tmp/zipfile.zip /home/strongbad/ password='BadPassword'
- '''
+ """
if not excludes:
excludes = []
if runas:
euid = os.geteuid()
egid = os.getegid()
- uinfo = __salt__['user.info'](runas)
+ uinfo = __salt__["user.info"](runas)
if not uinfo:
- raise SaltInvocationError(
- "User '{0}' does not exist".format(runas)
- )
+ raise SaltInvocationError("User '{0}' does not exist".format(runas))
zip_file, dest = _render_filenames(zip_file, dest, None, template)
- if runas and (euid != uinfo['uid'] or egid != uinfo['gid']):
+ if runas and (euid != uinfo["uid"] or egid != uinfo["gid"]):
# Change the egid first, as changing it after the euid will fail
# if the runas user is non-privileged.
- os.setegid(uinfo['gid'])
- os.seteuid(uinfo['uid'])
+ os.setegid(uinfo["gid"])
+ os.seteuid(uinfo["uid"])
try:
# Define cleaned_files here so that an exception will not prevent this
@@ -1073,7 +1077,7 @@ def unzip(zip_file,
files = zfile.namelist()
if isinstance(excludes, six.string_types):
- excludes = [x.strip() for x in excludes.split(',')]
+ excludes = [x.strip() for x in excludes.split(",")]
elif isinstance(excludes, (float, six.integer_types)):
excludes = [six.text_type(excludes)]
@@ -1093,14 +1097,16 @@ def unzip(zip_file,
perm = zfile.getinfo(target).external_attr >> 16
if perm == 0:
umask_ = salt.utils.files.get_umask()
- if target.endswith('/'):
+ if target.endswith("/"):
perm = 0o777 & ~umask_
else:
perm = 0o666 & ~umask_
os.chmod(os.path.join(dest, target), perm)
else:
win32_attr = zfile.getinfo(target).external_attr & 0xFF
- win32file.SetFileAttributes(os.path.join(dest, target), win32_attr)
+ win32file.SetFileAttributes(
+ os.path.join(dest, target), win32_attr
+ )
except Exception as exc: # pylint: disable=broad-except
if runas:
os.seteuid(euid)
@@ -1108,7 +1114,7 @@ def unzip(zip_file,
# Wait to raise the exception until euid/egid are restored to avoid
# permission errors in writing to minion log.
raise CommandExecutionError(
- 'Exception encountered unpacking zipfile: {0}'.format(exc)
+ "Exception encountered unpacking zipfile: {0}".format(exc)
)
finally:
# Restore the euid/egid
@@ -1119,8 +1125,8 @@ def unzip(zip_file,
return _trim_files(cleaned_files, trim_output)
-def is_encrypted(name, clean=False, saltenv='base', source_hash=None):
- '''
+def is_encrypted(name, clean=False, saltenv="base", source_hash=None):
+ """
.. versionadded:: 2016.11.0
Returns ``True`` if the zip archive is password-protected, ``False`` if
@@ -1161,12 +1167,12 @@ def is_encrypted(name, clean=False, saltenv='base', source_hash=None):
salt '*' archive.is_encrypted https://domain.tld/myfile.zip clean=True
salt '*' archive.is_encrypted https://domain.tld/myfile.zip source_hash=f1d2d2f924e986ac86fdf7b36c94bcdf32beec15
salt '*' archive.is_encrypted ftp://10.1.2.3/foo.zip
- '''
- cached = __salt__['cp.cache_file'](name, saltenv, source_hash=source_hash)
+ """
+ cached = __salt__["cp.cache_file"](name, saltenv, source_hash=source_hash)
if not cached:
- raise CommandExecutionError('Failed to cache {0}'.format(name))
+ raise CommandExecutionError("Failed to cache {0}".format(name))
- archive_info = {'archive location': cached}
+ archive_info = {"archive location": cached}
try:
with contextlib.closing(zipfile.ZipFile(cached)) as zip_archive:
zip_archive.testzip()
@@ -1174,8 +1180,7 @@ def is_encrypted(name, clean=False, saltenv='base', source_hash=None):
ret = True
except zipfile.BadZipfile:
raise CommandExecutionError(
- '{0} is not a ZIP file'.format(name),
- info=archive_info
+ "{0} is not a ZIP file".format(name), info=archive_info
)
except Exception as exc: # pylint: disable=broad-except
raise CommandExecutionError(exc.__str__(), info=archive_info)
@@ -1185,19 +1190,18 @@ def is_encrypted(name, clean=False, saltenv='base', source_hash=None):
if clean:
try:
os.remove(cached)
- log.debug('Cleaned cached archive %s', cached)
+ log.debug("Cleaned cached archive %s", cached)
except OSError as exc:
if exc.errno != errno.ENOENT:
log.warning(
- 'Failed to clean cached archive %s: %s',
- cached, exc.__str__()
+ "Failed to clean cached archive %s: %s", cached, exc.__str__()
)
return ret
-@salt.utils.decorators.path.which('rar')
+@salt.utils.decorators.path.which("rar")
def rar(rarfile, sources, template=None, cwd=None, runas=None):
- '''
+ """
Uses `rar for Linux`_ to create rar files
.. _`rar for Linux`: http://www.rarlab.com/
@@ -1236,19 +1240,17 @@ def rar(rarfile, sources, template=None, cwd=None, runas=None):
salt '*' archive.rar /tmp/rarfile.rar /tmp/sourcefile1,/tmp/sourcefile2
# Globbing for sources (2017.7.0 and later)
salt '*' archive.rar /tmp/rarfile.rar '/tmp/sourcefile*'
- '''
- cmd = ['rar', 'a', '-idp', '{0}'.format(rarfile)]
+ """
+ cmd = ["rar", "a", "-idp", "{0}".format(rarfile)]
cmd.extend(_expand_sources(sources))
- return __salt__['cmd.run'](cmd,
- cwd=cwd,
- template=template,
- runas=runas,
- python_shell=False).splitlines()
+ return __salt__["cmd.run"](
+ cmd, cwd=cwd, template=template, runas=runas, python_shell=False
+ ).splitlines()
-@salt.utils.decorators.path.which_bin(('unrar', 'rar'))
+@salt.utils.decorators.path.which_bin(("unrar", "rar"))
def unrar(rarfile, dest, excludes=None, template=None, runas=None, trim_output=False):
- '''
+ """
Uses `rar for Linux`_ to unpack rar files
.. _`rar for Linux`: http://www.rarlab.com/
@@ -1277,71 +1279,70 @@ def unrar(rarfile, dest, excludes=None, template=None, runas=None, trim_output=F
salt '*' archive.unrar /tmp/rarfile.rar /home/strongbad/ excludes=file_1,file_2
- '''
+ """
if isinstance(excludes, six.string_types):
- excludes = [entry.strip() for entry in excludes.split(',')]
+ excludes = [entry.strip() for entry in excludes.split(",")]
- cmd = [salt.utils.path.which_bin(('unrar', 'rar')),
- 'x', '-idp', '{0}'.format(rarfile)]
+ cmd = [
+ salt.utils.path.which_bin(("unrar", "rar")),
+ "x",
+ "-idp",
+ "{0}".format(rarfile),
+ ]
if excludes is not None:
for exclude in excludes:
- cmd.extend(['-x', '{0}'.format(exclude)])
- cmd.append('{0}'.format(dest))
- files = __salt__['cmd.run'](cmd,
- template=template,
- runas=runas,
- python_shell=False).splitlines()
+ cmd.extend(["-x", "{0}".format(exclude)])
+ cmd.append("{0}".format(dest))
+ files = __salt__["cmd.run"](
+ cmd, template=template, runas=runas, python_shell=False
+ ).splitlines()
return _trim_files(files, trim_output)
def _render_filenames(filenames, zip_file, saltenv, template):
- '''
+ """
Process markup in the :param:`filenames` and :param:`zipfile` variables (NOT the
files under the paths they ultimately point to) according to the markup
format provided by :param:`template`.
- '''
+ """
if not template:
return (filenames, zip_file)
# render the path as a template using path_template_engine as the engine
if template not in salt.utils.templates.TEMPLATE_REGISTRY:
raise CommandExecutionError(
- 'Attempted to render file paths with unavailable engine '
- '{0}'.format(template)
+ "Attempted to render file paths with unavailable engine "
+ "{0}".format(template)
)
kwargs = {}
- kwargs['salt'] = __salt__
- kwargs['pillar'] = __pillar__
- kwargs['grains'] = __grains__
- kwargs['opts'] = __opts__
- kwargs['saltenv'] = saltenv
+ kwargs["salt"] = __salt__
+ kwargs["pillar"] = __pillar__
+ kwargs["grains"] = __grains__
+ kwargs["opts"] = __opts__
+ kwargs["saltenv"] = saltenv
def _render(contents):
- '''
+ """
Render :param:`contents` into a literal pathname by writing it to a
temp file, rendering that file, and returning the result.
- '''
+ """
# write out path to temp file
tmp_path_fn = salt.utils.files.mkstemp()
- with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_:
+ with salt.utils.files.fopen(tmp_path_fn, "w+") as fp_:
fp_.write(salt.utils.stringutils.to_str(contents))
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
- tmp_path_fn,
- to_str=True,
- **kwargs
+ tmp_path_fn, to_str=True, **kwargs
)
salt.utils.files.safe_rm(tmp_path_fn)
- if not data['result']:
+ if not data["result"]:
# Failed to render the template
raise CommandExecutionError(
- 'Failed to render file path with error: {0}'.format(
- data['data']
- )
+ "Failed to render file path with error: {0}".format(data["data"])
)
else:
- return data['data']
+ return data["data"]
filenames = _render(filenames)
zip_file = _render(zip_file)
@@ -1349,14 +1350,17 @@ def _render_filenames(filenames, zip_file, saltenv, template):
def _trim_files(files, trim_output):
- '''
+ """
Trim the file list for output.
- '''
+ """
count = 100
if not isinstance(trim_output, bool):
count = trim_output
- if not(isinstance(trim_output, bool) and trim_output is False) and len(files) > count:
+ if (
+ not (isinstance(trim_output, bool) and trim_output is False)
+ and len(files) > count
+ ):
files = files[:count]
files.append("List trimmed after {0} files.".format(count))
diff --git a/salt/modules/arista_pyeapi.py b/salt/modules/arista_pyeapi.py
index 1dcf4fe1271..3095a467f86 100644
--- a/salt/modules/arista_pyeapi.py
+++ b/salt/modules/arista_pyeapi.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Arista pyeapi
=============
@@ -88,21 +88,23 @@ outside a ``pyeapi`` Proxy, e.g.:
Remember that the above applies only when not running in a ``pyeapi`` Proxy
Minion. If you want to use the :mod:`pyeapi Proxy `,
please follow the documentation notes for a proper setup.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python stdlib
import difflib
import logging
+from salt.exceptions import CommandExecutionError
+
# Import Salt libs
from salt.ext import six
-from salt.exceptions import CommandExecutionError
from salt.utils.args import clean_kwargs
# Import third party libs
try:
import pyeapi
+
HAS_PYEAPI = True
except ImportError:
HAS_PYEAPI = False
@@ -111,10 +113,10 @@ except ImportError:
# execution module properties
# -----------------------------------------------------------------------------
-__proxyenabled__ = ['*']
+__proxyenabled__ = ["*"]
# Any Proxy Minion should be able to execute these
-__virtualname__ = 'pyeapi'
+__virtualname__ = "pyeapi"
# The Execution Module will be identified as ``pyeapi``
# -----------------------------------------------------------------------------
@@ -124,14 +126,14 @@ __virtualname__ = 'pyeapi'
log = logging.getLogger(__name__)
PYEAPI_INIT_KWARGS = [
- 'transport',
- 'host',
- 'username',
- 'password',
- 'enablepwd',
- 'port',
- 'timeout',
- 'return_node'
+ "transport",
+ "host",
+ "username",
+ "password",
+ "enablepwd",
+ "port",
+ "timeout",
+ "return_node",
]
# -----------------------------------------------------------------------------
@@ -140,39 +142,46 @@ PYEAPI_INIT_KWARGS = [
def __virtual__():
- '''
+ """
Execution module available only if pyeapi is installed.
- '''
+ """
if not HAS_PYEAPI:
- return False, 'The pyeapi execution module requires pyeapi library to be installed: ``pip install pyeapi``'
+ return (
+ False,
+ "The pyeapi execution module requires pyeapi library to be installed: ``pip install pyeapi``",
+ )
return __virtualname__
+
# -----------------------------------------------------------------------------
# helper functions
# -----------------------------------------------------------------------------
def _prepare_connection(**kwargs):
- '''
+ """
Prepare the connection with the remote network device, and clean up the key
value pairs, removing the args used for the connection init.
- '''
- pyeapi_kwargs = __salt__['config.get']('pyeapi', {})
+ """
+ pyeapi_kwargs = __salt__["config.get"]("pyeapi", {})
pyeapi_kwargs.update(kwargs) # merge the CLI args with the opts/pillar
- init_kwargs, fun_kwargs = __utils__['args.prepare_kwargs'](pyeapi_kwargs, PYEAPI_INIT_KWARGS)
- if 'transport' not in init_kwargs:
- init_kwargs['transport'] = 'https'
+ init_kwargs, fun_kwargs = __utils__["args.prepare_kwargs"](
+ pyeapi_kwargs, PYEAPI_INIT_KWARGS
+ )
+ if "transport" not in init_kwargs:
+ init_kwargs["transport"] = "https"
conn = pyeapi.client.connect(**init_kwargs)
- node = pyeapi.client.Node(conn, enablepwd=init_kwargs.get('enablepwd'))
+ node = pyeapi.client.Node(conn, enablepwd=init_kwargs.get("enablepwd"))
return node, fun_kwargs
+
# -----------------------------------------------------------------------------
# callable functions
# -----------------------------------------------------------------------------
def get_connection(**kwargs):
- '''
+ """
Return the connection object to the pyeapi Node.
.. warning::
@@ -192,16 +201,16 @@ def get_connection(**kwargs):
username='example',
password='example')
show_ver = conn.run_commands(['show version', 'show interfaces'])
- '''
+ """
kwargs = clean_kwargs(**kwargs)
- if 'pyeapi.conn' in __proxy__:
- return __proxy__['pyeapi.conn']()
+ if "pyeapi.conn" in __proxy__:
+ return __proxy__["pyeapi.conn"]()
conn, kwargs = _prepare_connection(**kwargs)
return conn
def call(method, *args, **kwargs):
- '''
+ """
Invoke an arbitrary pyeapi method.
method
@@ -269,17 +278,17 @@ def call(method, *args, **kwargs):
.. code-block:: bash
salt '*' pyeapi.call run_commands "['show version']"
- '''
+ """
kwargs = clean_kwargs(**kwargs)
- if 'pyeapi.call' in __proxy__:
- return __proxy__['pyeapi.call'](method, *args, **kwargs)
+ if "pyeapi.call" in __proxy__:
+ return __proxy__["pyeapi.call"](method, *args, **kwargs)
conn, kwargs = _prepare_connection(**kwargs)
ret = getattr(conn, method)(*args, **kwargs)
return ret
def run_commands(*commands, **kwargs):
- '''
+ """
Sends the commands over the transport to the device.
This function sends the commands to the device using the nodes
@@ -374,30 +383,30 @@ def run_commands(*commands, **kwargs):
52:54:00:3f:e6:d0
version:
4.18.1F
- '''
- encoding = kwargs.pop('encoding', 'json')
- send_enable = kwargs.pop('send_enable', True)
- output = call('run_commands',
- commands,
- encoding=encoding,
- send_enable=send_enable,
- **kwargs)
- if encoding == 'text':
+ """
+ encoding = kwargs.pop("encoding", "json")
+ send_enable = kwargs.pop("send_enable", True)
+ output = call(
+ "run_commands", commands, encoding=encoding, send_enable=send_enable, **kwargs
+ )
+ if encoding == "text":
ret = []
for res in output:
- ret.append(res['output'])
+ ret.append(res["output"])
return ret
return output
-def config(commands=None,
- config_file=None,
- template_engine='jinja',
- context=None,
- defaults=None,
- saltenv='base',
- **kwargs):
- '''
+def config(
+ commands=None,
+ config_file=None,
+ template_engine="jinja",
+ context=None,
+ defaults=None,
+ saltenv="base",
+ **kwargs
+):
+ """
Configures the node with the specified commands.
This method is used to send configuration commands to the node. It
@@ -501,41 +510,38 @@ def config(commands=None,
salt '*' pyeapi.config commands="['ntp server 1.2.3.4', 'ntp server 5.6.7.8']"
salt '*' pyeapi.config config_file=salt://config.txt
salt '*' pyeapi.config config_file=https://bit.ly/2LGLcDy context="{'servers': ['1.2.3.4']}"
- '''
+ """
initial_config = get_config(as_string=True, **kwargs)
if config_file:
- file_str = __salt__['cp.get_file_str'](config_file, saltenv=saltenv)
+ file_str = __salt__["cp.get_file_str"](config_file, saltenv=saltenv)
if file_str is False:
- raise CommandExecutionError('Source file {} not found'.format(config_file))
- log.debug('Fetched from %s', config_file)
+ raise CommandExecutionError("Source file {} not found".format(config_file))
+ log.debug("Fetched from %s", config_file)
log.debug(file_str)
elif commands:
if isinstance(commands, (six.string_types, six.text_type)):
commands = [commands]
- file_str = '\n'.join(commands)
+ file_str = "\n".join(commands)
# unify all the commands in a single file, to render them in a go
if template_engine:
- file_str = __salt__['file.apply_template_on_contents'](file_str,
- template_engine,
- context,
- defaults,
- saltenv)
- log.debug('Rendered:')
+ file_str = __salt__["file.apply_template_on_contents"](
+ file_str, template_engine, context, defaults, saltenv
+ )
+ log.debug("Rendered:")
log.debug(file_str)
# whatever the source of the commands would be, split them line by line
commands = [line for line in file_str.splitlines() if line.strip()]
# push the commands one by one, removing empty lines
- configured = call('config', commands, **kwargs)
+ configured = call("config", commands, **kwargs)
current_config = get_config(as_string=True, **kwargs)
- diff = difflib.unified_diff(initial_config.splitlines(1)[4:], current_config.splitlines(1)[4:])
- return ''.join([x.replace('\r', '') for x in diff])
+ diff = difflib.unified_diff(
+ initial_config.splitlines(1)[4:], current_config.splitlines(1)[4:]
+ )
+ return "".join([x.replace("\r", "") for x in diff])
-def get_config(config='running-config',
- params=None,
- as_string=False,
- **kwargs):
- '''
+def get_config(config="running-config", params=None, as_string=False, **kwargs):
+ """
Retrieves the config from the device.
This method will retrieve the config from the node as either a string
@@ -612,16 +618,14 @@ def get_config(config='running-config',
salt '*' pyeapi.get_config
salt '*' pyeapi.get_config params='section snmp-server'
salt '*' pyeapi.get_config config='startup-config'
- '''
- return call('get_config',
- config=config,
- params=params,
- as_string=as_string,
- **kwargs)
+ """
+ return call(
+ "get_config", config=config, params=params, as_string=as_string, **kwargs
+ )
-def section(regex, config='running-config', **kwargs):
- '''
+def section(regex, config="running-config", **kwargs):
+ """
Return a section of the config.
regex
@@ -689,5 +693,5 @@ def section(regex, config='running-config', **kwargs):
.. code-block:: bash
salt '*'
- '''
- return call('section', regex, config=config, **kwargs)
+ """
+ return call("section", regex, config=config, **kwargs)
diff --git a/salt/modules/artifactory.py b/salt/modules/artifactory.py
index 01019a87f37..e2daf7ef9bf 100644
--- a/salt/modules/artifactory.py
+++ b/salt/modules/artifactory.py
@@ -1,46 +1,69 @@
# -*- coding: utf-8 -*-
-'''
+"""
Module for fetching artifacts from Artifactory
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
-import os
+
import base64
import logging
+import os
+
+import salt.ext.six.moves.http_client # pylint: disable=import-error,redefined-builtin,no-name-in-module
# Import Salt libs
import salt.utils.files
import salt.utils.stringutils
-import salt.ext.six.moves.http_client # pylint: disable=import-error,redefined-builtin,no-name-in-module
-from salt.ext.six.moves import urllib # pylint: disable=no-name-in-module
-from salt.ext.six.moves.urllib.error import HTTPError, URLError # pylint: disable=no-name-in-module
from salt.exceptions import CommandExecutionError
+from salt.ext.six.moves import urllib # pylint: disable=no-name-in-module
+from salt.ext.six.moves.urllib.error import ( # pylint: disable=no-name-in-module
+ HTTPError,
+ URLError,
+)
# Import 3rd party libs
try:
from salt._compat import ElementTree as ET
+
HAS_ELEMENT_TREE = True
except ImportError:
HAS_ELEMENT_TREE = False
log = logging.getLogger(__name__)
-__virtualname__ = 'artifactory'
+__virtualname__ = "artifactory"
def __virtual__():
- '''
+ """
Only load if elementtree xml library is available.
- '''
+ """
if not HAS_ELEMENT_TREE:
- return (False, 'Cannot load {0} module: ElementTree library unavailable'.format(__virtualname__))
+ return (
+ False,
+ "Cannot load {0} module: ElementTree library unavailable".format(
+ __virtualname__
+ ),
+ )
else:
return True
-def get_latest_snapshot(artifactory_url, repository, group_id, artifact_id, packaging, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None, use_literal_group_id=False):
- '''
+def get_latest_snapshot(
+ artifactory_url,
+ repository,
+ group_id,
+ artifact_id,
+ packaging,
+ target_dir="/tmp",
+ target_file=None,
+ classifier=None,
+ username=None,
+ password=None,
+ use_literal_group_id=False,
+):
+ """
Gets latest snapshot of the given artifact
artifactory_url
@@ -63,23 +86,64 @@ def get_latest_snapshot(artifactory_url, repository, group_id, artifact_id, pack
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
- '''
- log.debug("======================== MODULE FUNCTION: artifactory.get_latest_snapshot, artifactory_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, target_dir=%s, classifier=%s)",
- artifactory_url, repository, group_id, artifact_id, packaging, target_dir, classifier)
+ """
+ log.debug(
+ "======================== MODULE FUNCTION: artifactory.get_latest_snapshot, artifactory_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, target_dir=%s, classifier=%s)",
+ artifactory_url,
+ repository,
+ group_id,
+ artifact_id,
+ packaging,
+ target_dir,
+ classifier,
+ )
headers = {}
if username and password:
- headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', ''))
- artifact_metadata = _get_artifact_metadata(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers, use_literal_group_id=use_literal_group_id)
- version = artifact_metadata['latest_version']
- snapshot_url, file_name = _get_snapshot_url(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, packaging=packaging, classifier=classifier, headers=headers, use_literal_group_id=use_literal_group_id)
+ headers["Authorization"] = "Basic {0}".format(
+ base64.encodestring("{0}:{1}".format(username, password)).replace("\n", "")
+ )
+ artifact_metadata = _get_artifact_metadata(
+ artifactory_url=artifactory_url,
+ repository=repository,
+ group_id=group_id,
+ artifact_id=artifact_id,
+ headers=headers,
+ use_literal_group_id=use_literal_group_id,
+ )
+ version = artifact_metadata["latest_version"]
+ snapshot_url, file_name = _get_snapshot_url(
+ artifactory_url=artifactory_url,
+ repository=repository,
+ group_id=group_id,
+ artifact_id=artifact_id,
+ version=version,
+ packaging=packaging,
+ classifier=classifier,
+ headers=headers,
+ use_literal_group_id=use_literal_group_id,
+ )
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(snapshot_url, target_file, headers)
-def get_snapshot(artifactory_url, repository, group_id, artifact_id, packaging, version, snapshot_version=None, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None, use_literal_group_id=False):
- '''
+def get_snapshot(
+ artifactory_url,
+ repository,
+ group_id,
+ artifact_id,
+ packaging,
+ version,
+ snapshot_version=None,
+ target_dir="/tmp",
+ target_file=None,
+ classifier=None,
+ username=None,
+ password=None,
+ use_literal_group_id=False,
+):
+ """
Gets snapshot of the desired version of the artifact
artifactory_url
@@ -104,20 +168,54 @@ def get_snapshot(artifactory_url, repository, group_id, artifact_id, packaging,
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
- '''
- log.debug('======================== MODULE FUNCTION: artifactory.get_snapshot(artifactory_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, version=%s, target_dir=%s, classifier=%s)',
- artifactory_url, repository, group_id, artifact_id, packaging, version, target_dir, classifier)
+ """
+ log.debug(
+ "======================== MODULE FUNCTION: artifactory.get_snapshot(artifactory_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, version=%s, target_dir=%s, classifier=%s)",
+ artifactory_url,
+ repository,
+ group_id,
+ artifact_id,
+ packaging,
+ version,
+ target_dir,
+ classifier,
+ )
headers = {}
if username and password:
- headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', ''))
- snapshot_url, file_name = _get_snapshot_url(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, packaging=packaging, snapshot_version=snapshot_version, classifier=classifier, headers=headers, use_literal_group_id=use_literal_group_id)
+ headers["Authorization"] = "Basic {0}".format(
+ base64.encodestring("{0}:{1}".format(username, password)).replace("\n", "")
+ )
+ snapshot_url, file_name = _get_snapshot_url(
+ artifactory_url=artifactory_url,
+ repository=repository,
+ group_id=group_id,
+ artifact_id=artifact_id,
+ version=version,
+ packaging=packaging,
+ snapshot_version=snapshot_version,
+ classifier=classifier,
+ headers=headers,
+ use_literal_group_id=use_literal_group_id,
+ )
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(snapshot_url, target_file, headers)
-def get_latest_release(artifactory_url, repository, group_id, artifact_id, packaging, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None, use_literal_group_id=False):
- '''
+def get_latest_release(
+ artifactory_url,
+ repository,
+ group_id,
+ artifact_id,
+ packaging,
+ target_dir="/tmp",
+ target_file=None,
+ classifier=None,
+ username=None,
+ password=None,
+ use_literal_group_id=False,
+):
+ """
Gets the latest release of the artifact
artifactory_url
@@ -140,21 +238,59 @@ def get_latest_release(artifactory_url, repository, group_id, artifact_id, packa
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
- '''
- log.debug('======================== MODULE FUNCTION: artifactory.get_latest_release(artifactory_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, target_dir=%s, classifier=%s)',
- artifactory_url, repository, group_id, artifact_id, packaging, target_dir, classifier)
+ """
+ log.debug(
+ "======================== MODULE FUNCTION: artifactory.get_latest_release(artifactory_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, target_dir=%s, classifier=%s)",
+ artifactory_url,
+ repository,
+ group_id,
+ artifact_id,
+ packaging,
+ target_dir,
+ classifier,
+ )
headers = {}
if username and password:
- headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', ''))
- version = __find_latest_version(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers)
- release_url, file_name = _get_release_url(repository, group_id, artifact_id, packaging, version, artifactory_url, classifier, use_literal_group_id)
+ headers["Authorization"] = "Basic {0}".format(
+ base64.encodestring("{0}:{1}".format(username, password)).replace("\n", "")
+ )
+ version = __find_latest_version(
+ artifactory_url=artifactory_url,
+ repository=repository,
+ group_id=group_id,
+ artifact_id=artifact_id,
+ headers=headers,
+ )
+ release_url, file_name = _get_release_url(
+ repository,
+ group_id,
+ artifact_id,
+ packaging,
+ version,
+ artifactory_url,
+ classifier,
+ use_literal_group_id,
+ )
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(release_url, target_file, headers)
-def get_release(artifactory_url, repository, group_id, artifact_id, packaging, version, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None, use_literal_group_id=False):
- '''
+def get_release(
+ artifactory_url,
+ repository,
+ group_id,
+ artifact_id,
+ packaging,
+ version,
+ target_dir="/tmp",
+ target_file=None,
+ classifier=None,
+ username=None,
+ password=None,
+ use_literal_group_id=False,
+):
+ """
Gets the specified release of the artifact
artifactory_url
@@ -179,13 +315,33 @@ def get_release(artifactory_url, repository, group_id, artifact_id, packaging, v
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
- '''
- log.debug('======================== MODULE FUNCTION: artifactory.get_release(artifactory_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, version=%s, target_dir=%s, classifier=%s)',
- artifactory_url, repository, group_id, artifact_id, packaging, version, target_dir, classifier)
+ """
+ log.debug(
+ "======================== MODULE FUNCTION: artifactory.get_release(artifactory_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, version=%s, target_dir=%s, classifier=%s)",
+ artifactory_url,
+ repository,
+ group_id,
+ artifact_id,
+ packaging,
+ version,
+ target_dir,
+ classifier,
+ )
headers = {}
if username and password:
- headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', ''))
- release_url, file_name = _get_release_url(repository, group_id, artifact_id, packaging, version, artifactory_url, classifier, use_literal_group_id)
+ headers["Authorization"] = "Basic {0}".format(
+ base64.encodestring("{0}:{1}".format(username, password)).replace("\n", "")
+ )
+ release_url, file_name = _get_release_url(
+ repository,
+ group_id,
+ artifact_id,
+ packaging,
+ version,
+ artifactory_url,
+ classifier,
+ use_literal_group_id,
+ )
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(release_url, target_file, headers)
@@ -197,160 +353,241 @@ def __resolve_target_file(file_name, target_dir, target_file=None):
return target_file
-def _get_snapshot_url(artifactory_url, repository, group_id, artifact_id, version, packaging, snapshot_version=None, classifier=None, headers=None, use_literal_group_id=False):
+def _get_snapshot_url(
+ artifactory_url,
+ repository,
+ group_id,
+ artifact_id,
+ version,
+ packaging,
+ snapshot_version=None,
+ classifier=None,
+ headers=None,
+ use_literal_group_id=False,
+):
if headers is None:
headers = {}
has_classifier = classifier is not None and classifier != ""
if snapshot_version is None:
try:
- snapshot_version_metadata = _get_snapshot_version_metadata(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, headers=headers)
- if packaging not in snapshot_version_metadata['snapshot_versions']:
- error_message = '''Cannot find requested packaging '{packaging}' in the snapshot version metadata.
+ snapshot_version_metadata = _get_snapshot_version_metadata(
+ artifactory_url=artifactory_url,
+ repository=repository,
+ group_id=group_id,
+ artifact_id=artifact_id,
+ version=version,
+ headers=headers,
+ )
+ if packaging not in snapshot_version_metadata["snapshot_versions"]:
+ error_message = """Cannot find requested packaging '{packaging}' in the snapshot version metadata.
artifactory_url: {artifactory_url}
repository: {repository}
group_id: {group_id}
artifact_id: {artifact_id}
packaging: {packaging}
classifier: {classifier}
- version: {version}'''.format(
- artifactory_url=artifactory_url,
- repository=repository,
- group_id=group_id,
- artifact_id=artifact_id,
- packaging=packaging,
- classifier=classifier,
- version=version)
+ version: {version}""".format(
+ artifactory_url=artifactory_url,
+ repository=repository,
+ group_id=group_id,
+ artifact_id=artifact_id,
+ packaging=packaging,
+ classifier=classifier,
+ version=version,
+ )
raise ArtifactoryError(error_message)
- packaging_with_classifier = packaging if not has_classifier else packaging + ':' + classifier
- if has_classifier and packaging_with_classifier not in snapshot_version_metadata['snapshot_versions']:
- error_message = '''Cannot find requested classifier '{classifier}' in the snapshot version metadata.
+ packaging_with_classifier = (
+ packaging if not has_classifier else packaging + ":" + classifier
+ )
+ if (
+ has_classifier
+ and packaging_with_classifier
+ not in snapshot_version_metadata["snapshot_versions"]
+ ):
+ error_message = """Cannot find requested classifier '{classifier}' in the snapshot version metadata.
artifactory_url: {artifactory_url}
repository: {repository}
group_id: {group_id}
artifact_id: {artifact_id}
packaging: {packaging}
classifier: {classifier}
- version: {version}'''.format(
- artifactory_url=artifactory_url,
- repository=repository,
- group_id=group_id,
- artifact_id=artifact_id,
- packaging=packaging,
- classifier=classifier,
- version=version)
+ version: {version}""".format(
+ artifactory_url=artifactory_url,
+ repository=repository,
+ group_id=group_id,
+ artifact_id=artifact_id,
+ packaging=packaging,
+ classifier=classifier,
+ version=version,
+ )
raise ArtifactoryError(error_message)
- snapshot_version = snapshot_version_metadata['snapshot_versions'][packaging_with_classifier]
+ snapshot_version = snapshot_version_metadata["snapshot_versions"][
+ packaging_with_classifier
+ ]
except CommandExecutionError as err:
- log.error('Could not fetch maven-metadata.xml. Assuming snapshot_version=%s.', version)
+ log.error(
+ "Could not fetch maven-metadata.xml. Assuming snapshot_version=%s.",
+ version,
+ )
snapshot_version = version
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
- file_name = '{artifact_id}-{snapshot_version}{classifier}.{packaging}'.format(
+ file_name = "{artifact_id}-{snapshot_version}{classifier}.{packaging}".format(
artifact_id=artifact_id,
snapshot_version=snapshot_version,
packaging=packaging,
- classifier=__get_classifier_url(classifier))
+ classifier=__get_classifier_url(classifier),
+ )
- snapshot_url = '{artifactory_url}/{repository}/{group_url}/{artifact_id}/{version}/{file_name}'.format(
- artifactory_url=artifactory_url,
- repository=repository,
- group_url=group_url,
- artifact_id=artifact_id,
- version=version,
- file_name=file_name)
- log.debug('snapshot_url=%s', snapshot_url)
+ snapshot_url = "{artifactory_url}/{repository}/{group_url}/{artifact_id}/{version}/{file_name}".format(
+ artifactory_url=artifactory_url,
+ repository=repository,
+ group_url=group_url,
+ artifact_id=artifact_id,
+ version=version,
+ file_name=file_name,
+ )
+ log.debug("snapshot_url=%s", snapshot_url)
return snapshot_url, file_name
-def _get_release_url(repository, group_id, artifact_id, packaging, version, artifactory_url, classifier=None, use_literal_group_id=False):
+def _get_release_url(
+ repository,
+ group_id,
+ artifact_id,
+ packaging,
+ version,
+ artifactory_url,
+ classifier=None,
+ use_literal_group_id=False,
+):
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
# for released versions the suffix for the file is same as version
- file_name = '{artifact_id}-{version}{classifier}.{packaging}'.format(
+ file_name = "{artifact_id}-{version}{classifier}.{packaging}".format(
artifact_id=artifact_id,
version=version,
packaging=packaging,
- classifier=__get_classifier_url(classifier))
+ classifier=__get_classifier_url(classifier),
+ )
- release_url = '{artifactory_url}/{repository}/{group_url}/{artifact_id}/{version}/{file_name}'.format(
- artifactory_url=artifactory_url,
- repository=repository,
- group_url=group_url,
- artifact_id=artifact_id,
- version=version,
- file_name=file_name)
- log.debug('release_url=%s', release_url)
+ release_url = "{artifactory_url}/{repository}/{group_url}/{artifact_id}/{version}/{file_name}".format(
+ artifactory_url=artifactory_url,
+ repository=repository,
+ group_url=group_url,
+ artifact_id=artifact_id,
+ version=version,
+ file_name=file_name,
+ )
+ log.debug("release_url=%s", release_url)
return release_url, file_name
-def _get_artifact_metadata_url(artifactory_url, repository, group_id, artifact_id, use_literal_group_id=False):
+def _get_artifact_metadata_url(
+ artifactory_url, repository, group_id, artifact_id, use_literal_group_id=False
+):
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
# for released versions the suffix for the file is same as version
- artifact_metadata_url = '{artifactory_url}/{repository}/{group_url}/{artifact_id}/maven-metadata.xml'.format(
- artifactory_url=artifactory_url,
- repository=repository,
- group_url=group_url,
- artifact_id=artifact_id)
- log.debug('artifact_metadata_url=%s', artifact_metadata_url)
+ artifact_metadata_url = "{artifactory_url}/{repository}/{group_url}/{artifact_id}/maven-metadata.xml".format(
+ artifactory_url=artifactory_url,
+ repository=repository,
+ group_url=group_url,
+ artifact_id=artifact_id,
+ )
+ log.debug("artifact_metadata_url=%s", artifact_metadata_url)
return artifact_metadata_url
-def _get_artifact_metadata_xml(artifactory_url, repository, group_id, artifact_id, headers, use_literal_group_id=False):
+def _get_artifact_metadata_xml(
+ artifactory_url,
+ repository,
+ group_id,
+ artifact_id,
+ headers,
+ use_literal_group_id=False,
+):
artifact_metadata_url = _get_artifact_metadata_url(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
- use_literal_group_id=use_literal_group_id
+ use_literal_group_id=use_literal_group_id,
)
try:
request = urllib.request.Request(artifact_metadata_url, None, headers)
artifact_metadata_xml = urllib.request.urlopen(request).read()
except (HTTPError, URLError) as err:
- message = 'Could not fetch data from url: {0}. ERROR: {1}'.format(
- artifact_metadata_url,
- err
+ message = "Could not fetch data from url: {0}. ERROR: {1}".format(
+ artifact_metadata_url, err
)
raise CommandExecutionError(message)
- log.debug('artifact_metadata_xml=%s', artifact_metadata_xml)
+ log.debug("artifact_metadata_xml=%s", artifact_metadata_xml)
return artifact_metadata_xml
-def _get_artifact_metadata(artifactory_url, repository, group_id, artifact_id, headers, use_literal_group_id=False):
- metadata_xml = _get_artifact_metadata_xml(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers, use_literal_group_id=use_literal_group_id)
+def _get_artifact_metadata(
+ artifactory_url,
+ repository,
+ group_id,
+ artifact_id,
+ headers,
+ use_literal_group_id=False,
+):
+ metadata_xml = _get_artifact_metadata_xml(
+ artifactory_url=artifactory_url,
+ repository=repository,
+ group_id=group_id,
+ artifact_id=artifact_id,
+ headers=headers,
+ use_literal_group_id=use_literal_group_id,
+ )
root = ET.fromstring(metadata_xml)
- assert group_id == root.find('groupId').text
- assert artifact_id == root.find('artifactId').text
- latest_version = root.find('versioning').find('latest').text
- return {
- 'latest_version': latest_version
- }
+ assert group_id == root.find("groupId").text
+ assert artifact_id == root.find("artifactId").text
+ latest_version = root.find("versioning").find("latest").text
+ return {"latest_version": latest_version}
# functions for handling snapshots
-def _get_snapshot_version_metadata_url(artifactory_url, repository, group_id, artifact_id, version, use_literal_group_id=False):
+def _get_snapshot_version_metadata_url(
+ artifactory_url,
+ repository,
+ group_id,
+ artifact_id,
+ version,
+ use_literal_group_id=False,
+):
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
# for released versions the suffix for the file is same as version
- snapshot_version_metadata_url = '{artifactory_url}/{repository}/{group_url}/{artifact_id}/{version}/maven-metadata.xml'.format(
- artifactory_url=artifactory_url,
- repository=repository,
- group_url=group_url,
- artifact_id=artifact_id,
- version=version)
- log.debug('snapshot_version_metadata_url=%s', snapshot_version_metadata_url)
+ snapshot_version_metadata_url = "{artifactory_url}/{repository}/{group_url}/{artifact_id}/{version}/maven-metadata.xml".format(
+ artifactory_url=artifactory_url,
+ repository=repository,
+ group_url=group_url,
+ artifact_id=artifact_id,
+ version=version,
+ )
+ log.debug("snapshot_version_metadata_url=%s", snapshot_version_metadata_url)
return snapshot_version_metadata_url
-def _get_snapshot_version_metadata_xml(artifactory_url, repository, group_id, artifact_id, version, headers, use_literal_group_id=False):
+def _get_snapshot_version_metadata_xml(
+ artifactory_url,
+ repository,
+ group_id,
+ artifact_id,
+ version,
+ headers,
+ use_literal_group_id=False,
+):
snapshot_version_metadata_url = _get_snapshot_version_metadata_url(
artifactory_url=artifactory_url,
@@ -358,141 +595,161 @@ def _get_snapshot_version_metadata_xml(artifactory_url, repository, group_id, ar
group_id=group_id,
artifact_id=artifact_id,
version=version,
- use_literal_group_id=use_literal_group_id
+ use_literal_group_id=use_literal_group_id,
)
try:
request = urllib.request.Request(snapshot_version_metadata_url, None, headers)
snapshot_version_metadata_xml = urllib.request.urlopen(request).read()
except (HTTPError, URLError) as err:
- message = 'Could not fetch data from url: {0}. ERROR: {1}'.format(
- snapshot_version_metadata_url,
- err
+ message = "Could not fetch data from url: {0}. ERROR: {1}".format(
+ snapshot_version_metadata_url, err
)
raise CommandExecutionError(message)
- log.debug('snapshot_version_metadata_xml=%s', snapshot_version_metadata_xml)
+ log.debug("snapshot_version_metadata_xml=%s", snapshot_version_metadata_xml)
return snapshot_version_metadata_xml
-def _get_snapshot_version_metadata(artifactory_url, repository, group_id, artifact_id, version, headers):
- metadata_xml = _get_snapshot_version_metadata_xml(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, headers=headers)
+def _get_snapshot_version_metadata(
+ artifactory_url, repository, group_id, artifact_id, version, headers
+):
+ metadata_xml = _get_snapshot_version_metadata_xml(
+ artifactory_url=artifactory_url,
+ repository=repository,
+ group_id=group_id,
+ artifact_id=artifact_id,
+ version=version,
+ headers=headers,
+ )
metadata = ET.fromstring(metadata_xml)
- assert group_id == metadata.find('groupId').text
- assert artifact_id == metadata.find('artifactId').text
- assert version == metadata.find('version').text
+ assert group_id == metadata.find("groupId").text
+ assert artifact_id == metadata.find("artifactId").text
+ assert version == metadata.find("version").text
- snapshot_versions = metadata.find('versioning').find('snapshotVersions')
+ snapshot_versions = metadata.find("versioning").find("snapshotVersions")
extension_version_dict = {}
for snapshot_version in snapshot_versions:
- extension = snapshot_version.find('extension').text
- value = snapshot_version.find('value').text
- if snapshot_version.find('classifier') is not None:
- classifier = snapshot_version.find('classifier').text
- extension_version_dict[extension + ':' + classifier] = value
+ extension = snapshot_version.find("extension").text
+ value = snapshot_version.find("value").text
+ if snapshot_version.find("classifier") is not None:
+ classifier = snapshot_version.find("classifier").text
+ extension_version_dict[extension + ":" + classifier] = value
else:
extension_version_dict[extension] = value
- return {
- 'snapshot_versions': extension_version_dict
- }
+ return {"snapshot_versions": extension_version_dict}
-def __get_latest_version_url(artifactory_url, repository, group_id, artifact_id, use_literal_group_id=False):
+def __get_latest_version_url(
+ artifactory_url, repository, group_id, artifact_id, use_literal_group_id=False
+):
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
# for released versions the suffix for the file is same as version
- latest_version_url = '{artifactory_url}/api/search/latestVersion?g={group_url}&a={artifact_id}&repos={repository}'.format(
- artifactory_url=artifactory_url,
- repository=repository,
- group_url=group_url,
- artifact_id=artifact_id)
- log.debug('latest_version_url=%s', latest_version_url)
+ latest_version_url = "{artifactory_url}/api/search/latestVersion?g={group_url}&a={artifact_id}&repos={repository}".format(
+ artifactory_url=artifactory_url,
+ repository=repository,
+ group_url=group_url,
+ artifact_id=artifact_id,
+ )
+ log.debug("latest_version_url=%s", latest_version_url)
return latest_version_url
-def __find_latest_version(artifactory_url, repository, group_id, artifact_id, headers, use_literal_group_id=False):
+def __find_latest_version(
+ artifactory_url,
+ repository,
+ group_id,
+ artifact_id,
+ headers,
+ use_literal_group_id=False,
+):
latest_version_url = __get_latest_version_url(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
- use_literal_group_id=use_literal_group_id
+ use_literal_group_id=use_literal_group_id,
)
try:
request = urllib.request.Request(latest_version_url, None, headers)
version = urllib.request.urlopen(request).read()
except (HTTPError, URLError) as err:
- message = 'Could not fetch data from url: {0}. ERROR: {1}'.format(
- latest_version_url,
- err
+ message = "Could not fetch data from url: {0}. ERROR: {1}".format(
+ latest_version_url, err
)
raise CommandExecutionError(message)
log.debug("Response of: %s", version)
- if version is None or version == '':
- raise ArtifactoryError('Unable to find release version')
+ if version is None or version == "":
+ raise ArtifactoryError("Unable to find release version")
return version
def __save_artifact(artifact_url, target_file, headers):
log.debug("__save_artifact(%s, %s)", artifact_url, target_file)
- result = {
- 'status': False,
- 'changes': {},
- 'comment': ''
- }
+ result = {"status": False, "changes": {}, "comment": ""}
if os.path.isfile(target_file):
log.debug("File %s already exists, checking checksum...", target_file)
checksum_url = artifact_url + ".sha1"
- checksum_success, artifact_sum, checksum_comment = __download(checksum_url, headers)
+ checksum_success, artifact_sum, checksum_comment = __download(
+ checksum_url, headers
+ )
if checksum_success:
log.debug("Downloaded SHA1 SUM: %s", artifact_sum)
- file_sum = __salt__['file.get_hash'](path=target_file, form='sha1')
+ file_sum = __salt__["file.get_hash"](path=target_file, form="sha1")
log.debug("Target file (%s) SHA1 SUM: %s", target_file, file_sum)
if artifact_sum == file_sum:
- result['status'] = True
- result['target_file'] = target_file
- result['comment'] = 'File {0} already exists, checksum matches with Artifactory.\n' \
- 'Checksum URL: {1}'.format(target_file, checksum_url)
+ result["status"] = True
+ result["target_file"] = target_file
+ result["comment"] = (
+ "File {0} already exists, checksum matches with Artifactory.\n"
+ "Checksum URL: {1}".format(target_file, checksum_url)
+ )
return result
else:
- result['comment'] = 'File {0} already exists, checksum does not match with Artifactory!\n'\
- 'Checksum URL: {1}'.format(target_file, checksum_url)
+ result["comment"] = (
+ "File {0} already exists, checksum does not match with Artifactory!\n"
+ "Checksum URL: {1}".format(target_file, checksum_url)
+ )
else:
- result['status'] = False
- result['comment'] = checksum_comment
+ result["status"] = False
+ result["comment"] = checksum_comment
return result
- log.debug('Downloading: %s -> %s', artifact_url, target_file)
+ log.debug("Downloading: %s -> %s", artifact_url, target_file)
try:
request = urllib.request.Request(artifact_url, None, headers)
f = urllib.request.urlopen(request)
with salt.utils.files.fopen(target_file, "wb") as local_file:
local_file.write(salt.utils.stringutils.to_bytes(f.read()))
- result['status'] = True
- result['comment'] = __append_comment(('Artifact downloaded from URL: {0}'.format(artifact_url)), result['comment'])
- result['changes']['downloaded_file'] = target_file
- result['target_file'] = target_file
+ result["status"] = True
+ result["comment"] = __append_comment(
+ ("Artifact downloaded from URL: {0}".format(artifact_url)),
+ result["comment"],
+ )
+ result["changes"]["downloaded_file"] = target_file
+ result["target_file"] = target_file
except (HTTPError, URLError) as e:
- result['status'] = False
- result['comment'] = __get_error_comment(e, artifact_url)
+ result["status"] = False
+ result["comment"] = __get_error_comment(e, artifact_url)
return result
def __get_group_id_subpath(group_id, use_literal_group_id=False):
if not use_literal_group_id:
- group_url = group_id.replace('.', '/')
+ group_url = group_id.replace(".", "/")
return group_url
return group_id
@@ -503,7 +760,7 @@ def __get_classifier_url(classifier):
def __download(request_url, headers):
- log.debug('Downloading content from %s', request_url)
+ log.debug("Downloading content from %s", request_url)
success = False
content = None
@@ -521,22 +778,27 @@ def __download(request_url, headers):
def __get_error_comment(http_error, request_url):
if http_error.code == salt.ext.six.moves.http_client.NOT_FOUND:
- comment = 'HTTP Error 404. Request URL: ' + request_url
+ comment = "HTTP Error 404. Request URL: " + request_url
elif http_error.code == salt.ext.six.moves.http_client.CONFLICT:
- comment = 'HTTP Error 409: Conflict. Requested URL: {0}. \n' \
- 'This error may be caused by reading snapshot artifact from non-snapshot repository.'.format(request_url)
+ comment = (
+ "HTTP Error 409: Conflict. Requested URL: {0}. \n"
+ "This error may be caused by reading snapshot artifact from non-snapshot repository.".format(
+ request_url
+ )
+ )
else:
- comment = 'HTTP Error {err_code}. Request URL: {url}'.format(err_code=http_error.code, url=request_url)
+ comment = "HTTP Error {err_code}. Request URL: {url}".format(
+ err_code=http_error.code, url=request_url
+ )
return comment
-def __append_comment(new_comment, current_comment=''):
- return current_comment+'\n'+new_comment
+def __append_comment(new_comment, current_comment=""):
+ return current_comment + "\n" + new_comment
class ArtifactoryError(Exception):
-
def __init__(self, value):
super(ArtifactoryError, self).__init__()
self.value = value
diff --git a/salt/modules/at.py b/salt/modules/at.py
index bd2d45a4fff..119a8dbac1f 100644
--- a/salt/modules/at.py
+++ b/salt/modules/at.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Wrapper module for at(1)
Also, a 'tag' feature has been added to more
@@ -8,60 +8,61 @@ easily tag jobs.
:platform: linux,openbsd,freebsd
.. versionchanged:: 2017.7.0
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
+import datetime
+
# Import python libs
import re
import time
-import datetime
-
-# Import 3rd-party libs
-# pylint: disable=import-error,redefined-builtin
-from salt.ext.six.moves import map
-# pylint: enable=import-error,redefined-builtin
-from salt.exceptions import CommandNotFoundError
-from salt.ext import six
# Import salt libs
import salt.utils.data
import salt.utils.path
import salt.utils.platform
+# pylint: enable=import-error,redefined-builtin
+from salt.exceptions import CommandNotFoundError
+from salt.ext import six
+
+# Import 3rd-party libs
+# pylint: disable=import-error,redefined-builtin
+from salt.ext.six.moves import map
+
# OS Families that should work (Ubuntu and Debian are the default)
# TODO: Refactor some of this module to remove the checks for binaries
# Tested on OpenBSD 5.0
-BSD = ('OpenBSD', 'FreeBSD')
+BSD = ("OpenBSD", "FreeBSD")
-__virtualname__ = 'at'
+__virtualname__ = "at"
def __virtual__():
- '''
+ """
Most everything has the ability to support at(1)
- '''
+ """
if salt.utils.platform.is_windows() or salt.utils.platform.is_sunos():
- return (False, 'The at module could not be loaded: unsupported platform')
- if salt.utils.path.which('at') is None:
- return (False, 'The at module could not be loaded: at command not found')
+ return (False, "The at module could not be loaded: unsupported platform")
+ if salt.utils.path.which("at") is None:
+ return (False, "The at module could not be loaded: at command not found")
return __virtualname__
def _cmd(binary, *args):
- '''
+ """
Wrapper to run at(1) or return None.
- '''
+ """
binary = salt.utils.path.which(binary)
if not binary:
- raise CommandNotFoundError('{0}: command not found'.format(binary))
+ raise CommandNotFoundError("{0}: command not found".format(binary))
cmd = [binary] + list(args)
- return __salt__['cmd.run_stdout']([binary] + list(args),
- python_shell=False)
+ return __salt__["cmd.run_stdout"]([binary] + list(args), python_shell=False)
def atq(tag=None):
- '''
+ """
List all queued and running jobs or only those with
an optional 'tag'.
@@ -72,78 +73,89 @@ def atq(tag=None):
salt '*' at.atq
salt '*' at.atq [tag]
salt '*' at.atq [job number]
- '''
+ """
jobs = []
# Shim to produce output similar to what __virtual__() should do
# but __salt__ isn't available in __virtual__()
# Tested on CentOS 5.8
- if __grains__['os_family'] == 'RedHat':
- output = _cmd('at', '-l')
+ if __grains__["os_family"] == "RedHat":
+ output = _cmd("at", "-l")
else:
- output = _cmd('atq')
+ output = _cmd("atq")
if output is None:
- return '\'at.atq\' is not available.'
+ return "'at.atq' is not available."
# No jobs so return
- if output == '':
- return {'jobs': jobs}
+ if output == "":
+ return {"jobs": jobs}
# Jobs created with at.at() will use the following
# comment to denote a tagged job.
- job_kw_regex = re.compile(r'^### SALT: (\w+)')
+ job_kw_regex = re.compile(r"^### SALT: (\w+)")
# Split each job into a dictionary and handle
# pulling out tags or only listing jobs with a certain
# tag
for line in output.splitlines():
- job_tag = ''
+ job_tag = ""
# Redhat/CentOS
- if __grains__['os_family'] == 'RedHat':
- job, spec = line.split('\t')
+ if __grains__["os_family"] == "RedHat":
+ job, spec = line.split("\t")
specs = spec.split()
- elif __grains__['os'] == 'OpenBSD':
- if line.startswith(' Rank'):
+ elif __grains__["os"] == "OpenBSD":
+ if line.startswith(" Rank"):
continue
else:
tmp = line.split()
- timestr = ' '.join(tmp[1:5])
+ timestr = " ".join(tmp[1:5])
job = tmp[6]
- specs = datetime.datetime(*(time.strptime(timestr, '%b %d, %Y '
- '%H:%M')[0:5])).isoformat().split('T')
+ specs = (
+ datetime.datetime(
+ *(time.strptime(timestr, "%b %d, %Y " "%H:%M")[0:5])
+ )
+ .isoformat()
+ .split("T")
+ )
specs.append(tmp[7])
specs.append(tmp[5])
- elif __grains__['os'] == 'FreeBSD':
- if line.startswith('Date'):
+ elif __grains__["os"] == "FreeBSD":
+ if line.startswith("Date"):
continue
else:
tmp = line.split()
- timestr = ' '.join(tmp[1:6])
+ timestr = " ".join(tmp[1:6])
job = tmp[8]
- specs = datetime.datetime(*(time.strptime(timestr,
- '%b %d %H:%M:%S %Z %Y')[0:5])).isoformat().split('T')
+ specs = (
+ datetime.datetime(
+ *(time.strptime(timestr, "%b %d %H:%M:%S %Z %Y")[0:5])
+ )
+ .isoformat()
+ .split("T")
+ )
specs.append(tmp[7])
specs.append(tmp[6])
else:
- job, spec = line.split('\t')
+ job, spec = line.split("\t")
tmp = spec.split()
- timestr = ' '.join(tmp[0:5])
- specs = datetime.datetime(*(time.strptime(timestr)
- [0:5])).isoformat().split('T')
+ timestr = " ".join(tmp[0:5])
+ specs = (
+ datetime.datetime(*(time.strptime(timestr)[0:5])).isoformat().split("T")
+ )
specs.append(tmp[5])
specs.append(tmp[6])
# Search for any tags
- atc_out = _cmd('at', '-c', job)
+ atc_out = _cmd("at", "-c", job)
for line in atc_out.splitlines():
tmp = job_kw_regex.match(line)
if tmp:
job_tag = tmp.groups()[0]
- if __grains__['os'] in BSD:
+ if __grains__["os"] in BSD:
job = six.text_type(job)
else:
job = int(job)
@@ -154,17 +166,33 @@ def atq(tag=None):
# If I don't wrap job in an int(), it fails on salt but works on
# salt-call. With the int(), it fails with salt-call but not salt.
if tag == job_tag or tag == job:
- jobs.append({'job': job, 'date': specs[0], 'time': specs[1],
- 'queue': specs[2], 'user': specs[3], 'tag': job_tag})
+ jobs.append(
+ {
+ "job": job,
+ "date": specs[0],
+ "time": specs[1],
+ "queue": specs[2],
+ "user": specs[3],
+ "tag": job_tag,
+ }
+ )
else:
- jobs.append({'job': job, 'date': specs[0], 'time': specs[1],
- 'queue': specs[2], 'user': specs[3], 'tag': job_tag})
+ jobs.append(
+ {
+ "job": job,
+ "date": specs[0],
+ "time": specs[1],
+ "queue": specs[2],
+ "user": specs[3],
+ "tag": job_tag,
+ }
+ )
- return {'jobs': jobs}
+ return {"jobs": jobs}
def atrm(*args):
- '''
+ """
Remove jobs from the queue.
CLI Example:
@@ -174,41 +202,51 @@ def atrm(*args):
salt '*' at.atrm ..
salt '*' at.atrm all
salt '*' at.atrm all [tag]
- '''
+ """
# Need to do this here also since we use atq()
- if not salt.utils.path.which('at'):
- return '\'at.atrm\' is not available.'
+ if not salt.utils.path.which("at"):
+ return "'at.atrm' is not available."
if not args:
- return {'jobs': {'removed': [], 'tag': None}}
+ return {"jobs": {"removed": [], "tag": None}}
# Convert all to strings
args = salt.utils.data.stringify(args)
- if args[0] == 'all':
+ if args[0] == "all":
if len(args) > 1:
- opts = list(list(map(str, [j['job'] for j in atq(args[1])['jobs']])))
- ret = {'jobs': {'removed': opts, 'tag': args[1]}}
+ opts = list(list(map(str, [j["job"] for j in atq(args[1])["jobs"]])))
+ ret = {"jobs": {"removed": opts, "tag": args[1]}}
else:
- opts = list(list(map(str, [j['job'] for j in atq()['jobs']])))
- ret = {'jobs': {'removed': opts, 'tag': None}}
+ opts = list(list(map(str, [j["job"] for j in atq()["jobs"]])))
+ ret = {"jobs": {"removed": opts, "tag": None}}
else:
- opts = list(list(map(str, [i['job'] for i in atq()['jobs']
- if six.text_type(i['job']) in args])))
- ret = {'jobs': {'removed': opts, 'tag': None}}
+ opts = list(
+ list(
+ map(
+ str,
+ [
+ i["job"]
+ for i in atq()["jobs"]
+ if six.text_type(i["job"]) in args
+ ],
+ )
+ )
+ )
+ ret = {"jobs": {"removed": opts, "tag": None}}
# Shim to produce output similar to what __virtual__() should do
# but __salt__ isn't available in __virtual__()
- output = _cmd('at', '-d', ' '.join(opts))
+ output = _cmd("at", "-d", " ".join(opts))
if output is None:
- return '\'at.atrm\' is not available.'
+ return "'at.atrm' is not available."
return ret
def at(*args, **kwargs): # pylint: disable=C0103
- '''
+ """
Add a job to the queue.
The 'timespec' follows the format documented in the
@@ -221,50 +259,50 @@ def at(*args, **kwargs): # pylint: disable=C0103
salt '*' at.at [tag=] [runas=]
salt '*' at.at 12:05am '/sbin/reboot' tag=reboot
salt '*' at.at '3:05am +3 days' 'bin/myscript' tag=nightly runas=jim
- '''
+ """
if len(args) < 2:
- return {'jobs': []}
+ return {"jobs": []}
# Shim to produce output similar to what __virtual__() should do
# but __salt__ isn't available in __virtual__()
- binary = salt.utils.path.which('at')
+ binary = salt.utils.path.which("at")
if not binary:
- return '\'at.at\' is not available.'
+ return "'at.at' is not available."
- if 'tag' in kwargs:
- stdin = '### SALT: {0}\n{1}'.format(kwargs['tag'], ' '.join(args[1:]))
+ if "tag" in kwargs:
+ stdin = "### SALT: {0}\n{1}".format(kwargs["tag"], " ".join(args[1:]))
else:
- stdin = ' '.join(args[1:])
+ stdin = " ".join(args[1:])
cmd = [binary, args[0]]
- cmd_kwargs = {'stdin': stdin, 'python_shell': False}
- if 'runas' in kwargs:
- cmd_kwargs['runas'] = kwargs['runas']
- output = __salt__['cmd.run'](cmd, **cmd_kwargs)
+ cmd_kwargs = {"stdin": stdin, "python_shell": False}
+ if "runas" in kwargs:
+ cmd_kwargs["runas"] = kwargs["runas"]
+ output = __salt__["cmd.run"](cmd, **cmd_kwargs)
if output is None:
- return '\'at.at\' is not available.'
+ return "'at.at' is not available."
- if output.endswith('Garbled time'):
- return {'jobs': [], 'error': 'invalid timespec'}
+ if output.endswith("Garbled time"):
+ return {"jobs": [], "error": "invalid timespec"}
- if output.startswith('warning: commands'):
+ if output.startswith("warning: commands"):
output = output.splitlines()[1]
- if output.startswith('commands will be executed'):
+ if output.startswith("commands will be executed"):
output = output.splitlines()[1]
output = output.split()[1]
- if __grains__['os'] in BSD:
+ if __grains__["os"] in BSD:
return atq(six.text_type(output))
else:
return atq(int(output))
def atc(jobid):
- '''
+ """
Print the at(1) script that will run for the passed job
id. This is mostly for debugging so the output will
just be text.
@@ -274,87 +312,87 @@ def atc(jobid):
.. code-block:: bash
salt '*' at.atc
- '''
+ """
# Shim to produce output similar to what __virtual__() should do
# but __salt__ isn't available in __virtual__()
- output = _cmd('at', '-c', six.text_type(jobid))
+ output = _cmd("at", "-c", six.text_type(jobid))
if output is None:
- return '\'at.atc\' is not available.'
- elif output == '':
- return {'error': 'invalid job id \'{0}\''.format(jobid)}
+ return "'at.atc' is not available."
+ elif output == "":
+ return {"error": "invalid job id '{0}'".format(jobid)}
return output
def _atq(**kwargs):
- '''
+ """
Return match jobs list
- '''
+ """
jobs = []
- runas = kwargs.get('runas', None)
- tag = kwargs.get('tag', None)
- hour = kwargs.get('hour', None)
- minute = kwargs.get('minute', None)
- day = kwargs.get('day', None)
- month = kwargs.get('month', None)
- year = kwargs.get('year', None)
+ runas = kwargs.get("runas", None)
+ tag = kwargs.get("tag", None)
+ hour = kwargs.get("hour", None)
+ minute = kwargs.get("minute", None)
+ day = kwargs.get("day", None)
+ month = kwargs.get("month", None)
+ year = kwargs.get("year", None)
if year and len(six.text_type(year)) == 2:
- year = '20{0}'.format(year)
+ year = "20{0}".format(year)
- jobinfo = atq()['jobs']
+ jobinfo = atq()["jobs"]
if not jobinfo:
- return {'jobs': jobs}
+ return {"jobs": jobs}
for job in jobinfo:
if not runas:
pass
- elif runas == job['user']:
+ elif runas == job["user"]:
pass
else:
continue
if not tag:
pass
- elif tag == job['tag']:
+ elif tag == job["tag"]:
pass
else:
continue
if not hour:
pass
- elif '{0:02d}'.format(int(hour)) == job['time'].split(':')[0]:
+ elif "{0:02d}".format(int(hour)) == job["time"].split(":")[0]:
pass
else:
continue
if not minute:
pass
- elif '{0:02d}'.format(int(minute)) == job['time'].split(':')[1]:
+ elif "{0:02d}".format(int(minute)) == job["time"].split(":")[1]:
pass
else:
continue
if not day:
pass
- elif '{0:02d}'.format(int(day)) == job['date'].split('-')[2]:
+ elif "{0:02d}".format(int(day)) == job["date"].split("-")[2]:
pass
else:
continue
if not month:
pass
- elif '{0:02d}'.format(int(month)) == job['date'].split('-')[1]:
+ elif "{0:02d}".format(int(month)) == job["date"].split("-")[1]:
pass
else:
continue
if not year:
pass
- elif year == job['date'].split('-')[0]:
+ elif year == job["date"].split("-")[0]:
pass
else:
continue
@@ -362,14 +400,14 @@ def _atq(**kwargs):
jobs.append(job)
if not jobs:
- note = 'No match jobs or time format error'
- return {'jobs': jobs, 'note': note}
+ note = "No match jobs or time format error"
+ return {"jobs": jobs, "note": note}
- return {'jobs': jobs}
+ return {"jobs": jobs}
def jobcheck(**kwargs):
- '''
+ """
Check the job from queue.
The kwargs dict include 'hour minute day month year tag runas'
Other parameters will be ignored.
@@ -380,9 +418,9 @@ def jobcheck(**kwargs):
salt '*' at.jobcheck runas=jam day=13
salt '*' at.jobcheck day=13 month=12 year=13 tag=rose
- '''
+ """
if not kwargs:
- return {'error': 'You have given a condition'}
+ return {"error": "You have given a condition"}
return _atq(**kwargs)
diff --git a/salt/modules/at_solaris.py b/salt/modules/at_solaris.py
index 30afef8c07c..86059b35e07 100644
--- a/salt/modules/at_solaris.py
+++ b/salt/modules/at_solaris.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Wrapper for at(1) on Solaris-like systems
.. note::
@@ -11,45 +11,48 @@ Wrapper for at(1) on Solaris-like systems
:platform: solaris,illumos,smartso
.. versionadded:: 2017.7.0
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
+import datetime
+import logging
+
# Import python libs
import re
import time
-import datetime
-import logging
-
-# Import 3rd-party libs
-# pylint: disable=import-error,redefined-builtin
-from salt.ext.six.moves import map
-from salt.ext import six
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
+from salt.ext import six
+
+# Import 3rd-party libs
+# pylint: disable=import-error,redefined-builtin
+from salt.ext.six.moves import map
log = logging.getLogger(__name__)
-__virtualname__ = 'at'
+__virtualname__ = "at"
def __virtual__():
- '''
+ """
We only deal with Solaris' specific version of at
- '''
+ """
if not salt.utils.platform.is_sunos():
- return (False, 'The at module could not be loaded: unsupported platform')
- if not salt.utils.path.which('at') or \
- not salt.utils.path.which('atq') or \
- not salt.utils.path.which('atrm'):
- return (False, 'The at module could not be loaded: at command not found')
+ return (False, "The at module could not be loaded: unsupported platform")
+ if (
+ not salt.utils.path.which("at")
+ or not salt.utils.path.which("atq")
+ or not salt.utils.path.which("atrm")
+ ):
+ return (False, "The at module could not be loaded: at command not found")
return __virtualname__
def atq(tag=None):
- '''
+ """
List all queued and running jobs or only those with
an optional 'tag'.
@@ -60,39 +63,41 @@ def atq(tag=None):
salt '*' at.atq
salt '*' at.atq [tag]
salt '*' at.atq [job number]
- '''
+ """
jobs = []
- res = __salt__['cmd.run_all']('atq')
+ res = __salt__["cmd.run_all"]("atq")
- if res['retcode'] > 0:
- return {'error': res['stderr']}
+ if res["retcode"] > 0:
+ return {"error": res["stderr"]}
# No jobs so return
- if res['stdout'] == 'no files in queue.':
- return {'jobs': jobs}
+ if res["stdout"] == "no files in queue.":
+ return {"jobs": jobs}
# Jobs created with at.at() will use the following
# comment to denote a tagged job.
- job_kw_regex = re.compile(r'^### SALT: (\w+)')
+ job_kw_regex = re.compile(r"^### SALT: (\w+)")
# Split each job into a dictionary and handle
# pulling out tags or only listing jobs with a certain
# tag
- for line in res['stdout'].splitlines():
- job_tag = ''
+ for line in res["stdout"].splitlines():
+ job_tag = ""
# skip header
- if line.startswith(' Rank'):
+ if line.startswith(" Rank"):
continue
# parse job output
tmp = line.split()
- timestr = ' '.join(tmp[1:5])
+ timestr = " ".join(tmp[1:5])
job = tmp[6]
- specs = datetime.datetime(
- *(time.strptime(timestr, '%b %d, %Y %H:%M')[0:5])
- ).isoformat().split('T')
+ specs = (
+ datetime.datetime(*(time.strptime(timestr, "%b %d, %Y %H:%M")[0:5]))
+ .isoformat()
+ .split("T")
+ )
specs.append(tmp[7])
specs.append(tmp[5])
@@ -100,11 +105,9 @@ def atq(tag=None):
job = six.text_type(job)
# search for any tags
- atjob_file = '/var/spool/cron/atjobs/{job}'.format(
- job=job
- )
- if __salt__['file.file_exists'](atjob_file):
- with salt.utils.files.fopen(atjob_file, 'r') as atjob:
+ atjob_file = "/var/spool/cron/atjobs/{job}".format(job=job)
+ if __salt__["file.file_exists"](atjob_file):
+ with salt.utils.files.fopen(atjob_file, "r") as atjob:
for line in atjob:
line = salt.utils.stringutils.to_unicode(line)
tmp = job_kw_regex.match(line)
@@ -113,17 +116,33 @@ def atq(tag=None):
# filter on tags
if not tag:
- jobs.append({'job': job, 'date': specs[0], 'time': specs[1],
- 'queue': specs[2], 'user': specs[3], 'tag': job_tag})
+ jobs.append(
+ {
+ "job": job,
+ "date": specs[0],
+ "time": specs[1],
+ "queue": specs[2],
+ "user": specs[3],
+ "tag": job_tag,
+ }
+ )
elif tag and tag in [job_tag, job]:
- jobs.append({'job': job, 'date': specs[0], 'time': specs[1],
- 'queue': specs[2], 'user': specs[3], 'tag': job_tag})
+ jobs.append(
+ {
+ "job": job,
+ "date": specs[0],
+ "time": specs[1],
+ "queue": specs[2],
+ "user": specs[3],
+ "tag": job_tag,
+ }
+ )
- return {'jobs': jobs}
+ return {"jobs": jobs}
def atrm(*args):
- '''
+ """
Remove jobs from the queue.
CLI Example:
@@ -133,43 +152,42 @@ def atrm(*args):
salt '*' at.atrm ..
salt '*' at.atrm all
salt '*' at.atrm all [tag]
- '''
+ """
if not args:
- return {'jobs': {'removed': [], 'tag': None}}
+ return {"jobs": {"removed": [], "tag": None}}
- if args[0] == 'all':
+ if args[0] == "all":
if len(args) > 1:
- opts = list(list(map(str, [j['job'] for j in atq(args[1])['jobs']])))
- ret = {'jobs': {'removed': opts, 'tag': args[1]}}
+ opts = list(list(map(str, [j["job"] for j in atq(args[1])["jobs"]])))
+ ret = {"jobs": {"removed": opts, "tag": args[1]}}
else:
- opts = list(list(map(str, [j['job'] for j in atq()['jobs']])))
- ret = {'jobs': {'removed': opts, 'tag': None}}
+ opts = list(list(map(str, [j["job"] for j in atq()["jobs"]])))
+ ret = {"jobs": {"removed": opts, "tag": None}}
else:
- opts = list(list(map(str, [i['job'] for i in atq()['jobs']
- if i['job'] in args])))
- ret = {'jobs': {'removed': opts, 'tag': None}}
+ opts = list(
+ list(map(str, [i["job"] for i in atq()["jobs"] if i["job"] in args]))
+ )
+ ret = {"jobs": {"removed": opts, "tag": None}}
# call atrm for each job in ret['jobs']['removed']
- for job in ret['jobs']['removed']:
- res_job = __salt__['cmd.run_all']('atrm {job}'.format(
- job=job
- ))
- if res_job['retcode'] > 0:
- if 'failed' not in ret['jobs']:
- ret['jobs']['failed'] = {}
- ret['jobs']['failed'][job] = res_job['stderr']
+ for job in ret["jobs"]["removed"]:
+ res_job = __salt__["cmd.run_all"]("atrm {job}".format(job=job))
+ if res_job["retcode"] > 0:
+ if "failed" not in ret["jobs"]:
+ ret["jobs"]["failed"] = {}
+ ret["jobs"]["failed"][job] = res_job["stderr"]
# remove failed from list
- if 'failed' in ret['jobs']:
- for job in ret['jobs']['failed']:
- ret['jobs']['removed'].remove(job)
+ if "failed" in ret["jobs"]:
+ for job in ret["jobs"]["failed"]:
+ ret["jobs"]["removed"].remove(job)
return ret
def at(*args, **kwargs): # pylint: disable=C0103
- '''
+ """
Add a job to the queue.
The 'timespec' follows the format documented in the
@@ -182,38 +200,38 @@ def at(*args, **kwargs): # pylint: disable=C0103
salt '*' at.at [tag=] [runas=]
salt '*' at.at 12:05am '/sbin/reboot' tag=reboot
salt '*' at.at '3:05am +3 days' 'bin/myscript' tag=nightly runas=jim
- '''
+ """
# check args
if len(args) < 2:
- return {'jobs': []}
+ return {"jobs": []}
# build job
- if 'tag' in kwargs:
- stdin = '### SALT: {0}\n{1}'.format(kwargs['tag'], ' '.join(args[1:]))
+ if "tag" in kwargs:
+ stdin = "### SALT: {0}\n{1}".format(kwargs["tag"], " ".join(args[1:]))
else:
- stdin = ' '.join(args[1:])
+ stdin = " ".join(args[1:])
- cmd_kwargs = {'stdin': stdin, 'python_shell': False}
- if 'runas' in kwargs:
- cmd_kwargs['runas'] = kwargs['runas']
- res = __salt__['cmd.run_all']('at "{timespec}"'.format(
- timespec=args[0]
- ), **cmd_kwargs)
+ cmd_kwargs = {"stdin": stdin, "python_shell": False}
+ if "runas" in kwargs:
+ cmd_kwargs["runas"] = kwargs["runas"]
+ res = __salt__["cmd.run_all"](
+ 'at "{timespec}"'.format(timespec=args[0]), **cmd_kwargs
+ )
# verify job creation
- if res['retcode'] > 0:
- if 'bad time specification' in res['stderr']:
- return {'jobs': [], 'error': 'invalid timespec'}
- return {'jobs': [], 'error': res['stderr']}
+ if res["retcode"] > 0:
+ if "bad time specification" in res["stderr"]:
+ return {"jobs": [], "error": "invalid timespec"}
+ return {"jobs": [], "error": res["stderr"]}
else:
- jobid = res['stderr'].splitlines()[1]
+ jobid = res["stderr"].splitlines()[1]
jobid = six.text_type(jobid.split()[1])
return atq(jobid)
def atc(jobid):
- '''
+ """
Print the at(1) script that will run for the passed job
id. This is mostly for debugging so the output will
just be text.
@@ -223,87 +241,86 @@ def atc(jobid):
.. code-block:: bash
salt '*' at.atc
- '''
+ """
- atjob_file = '/var/spool/cron/atjobs/{job}'.format(
- job=jobid
- )
- if __salt__['file.file_exists'](atjob_file):
- with salt.utils.files.fopen(atjob_file, 'r') as rfh:
- return ''.join([salt.utils.stringutils.to_unicode(x)
- for x in rfh.readlines()])
+ atjob_file = "/var/spool/cron/atjobs/{job}".format(job=jobid)
+ if __salt__["file.file_exists"](atjob_file):
+ with salt.utils.files.fopen(atjob_file, "r") as rfh:
+ return "".join(
+ [salt.utils.stringutils.to_unicode(x) for x in rfh.readlines()]
+ )
else:
- return {'error': 'invalid job id \'{0}\''.format(jobid)}
+ return {"error": "invalid job id '{0}'".format(jobid)}
def _atq(**kwargs):
- '''
+ """
Return match jobs list
- '''
+ """
jobs = []
- runas = kwargs.get('runas', None)
- tag = kwargs.get('tag', None)
- hour = kwargs.get('hour', None)
- minute = kwargs.get('minute', None)
- day = kwargs.get('day', None)
- month = kwargs.get('month', None)
- year = kwargs.get('year', None)
+ runas = kwargs.get("runas", None)
+ tag = kwargs.get("tag", None)
+ hour = kwargs.get("hour", None)
+ minute = kwargs.get("minute", None)
+ day = kwargs.get("day", None)
+ month = kwargs.get("month", None)
+ year = kwargs.get("year", None)
if year and len(six.text_type(year)) == 2:
- year = '20{0}'.format(year)
+ year = "20{0}".format(year)
- jobinfo = atq()['jobs']
+ jobinfo = atq()["jobs"]
if not jobinfo:
- return {'jobs': jobs}
+ return {"jobs": jobs}
for job in jobinfo:
if not runas:
pass
- elif runas == job['user']:
+ elif runas == job["user"]:
pass
else:
continue
if not tag:
pass
- elif tag == job['tag']:
+ elif tag == job["tag"]:
pass
else:
continue
if not hour:
pass
- elif '{0:02d}'.format(int(hour)) == job['time'].split(':')[0]:
+ elif "{0:02d}".format(int(hour)) == job["time"].split(":")[0]:
pass
else:
continue
if not minute:
pass
- elif '{0:02d}'.format(int(minute)) == job['time'].split(':')[1]:
+ elif "{0:02d}".format(int(minute)) == job["time"].split(":")[1]:
pass
else:
continue
if not day:
pass
- elif '{0:02d}'.format(int(day)) == job['date'].split('-')[2]:
+ elif "{0:02d}".format(int(day)) == job["date"].split("-")[2]:
pass
else:
continue
if not month:
pass
- elif '{0:02d}'.format(int(month)) == job['date'].split('-')[1]:
+ elif "{0:02d}".format(int(month)) == job["date"].split("-")[1]:
pass
else:
continue
if not year:
pass
- elif year == job['date'].split('-')[0]:
+ elif year == job["date"].split("-")[0]:
pass
else:
continue
@@ -311,14 +328,14 @@ def _atq(**kwargs):
jobs.append(job)
if not jobs:
- note = 'No match jobs or time format error'
- return {'jobs': jobs, 'note': note}
+ note = "No match jobs or time format error"
+ return {"jobs": jobs, "note": note}
- return {'jobs': jobs}
+ return {"jobs": jobs}
def jobcheck(**kwargs):
- '''
+ """
Check the job from queue.
The kwargs dict include 'hour minute day month year tag runas'
Other parameters will be ignored.
@@ -329,10 +346,10 @@ def jobcheck(**kwargs):
salt '*' at.jobcheck runas=jam day=13
salt '*' at.jobcheck day=13 month=12 year=13 tag=rose
- '''
+ """
if not kwargs:
- return {'error': 'You have given a condition'}
+ return {"error": "You have given a condition"}
return _atq(**kwargs)
diff --git a/salt/modules/augeas_cfg.py b/salt/modules/augeas_cfg.py
index dd679816b20..5b5feafd2e7 100644
--- a/salt/modules/augeas_cfg.py
+++ b/salt/modules/augeas_cfg.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Manages configuration files via augeas
This module requires the ``augeas`` Python module.
@@ -22,110 +22,113 @@ This module requires the ``augeas`` Python module.
For affected Debian/Ubuntu hosts, installing ``libpython2.7`` has been
known to resolve the issue.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
+import logging
+
# Import python libs
import os
import re
-import logging
-from salt.ext.six.moves import zip
-from salt.ext import six
-
-# Make sure augeas python interface is installed
-HAS_AUGEAS = False
-try:
- from augeas import Augeas as _Augeas # pylint: disable=no-name-in-module
- HAS_AUGEAS = True
-except ImportError:
- pass
# Import salt libs
import salt.utils.args
import salt.utils.data
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError
+from salt.ext import six
+from salt.ext.six.moves import zip
+
+# Make sure augeas python interface is installed
+HAS_AUGEAS = False
+try:
+ from augeas import Augeas as _Augeas # pylint: disable=no-name-in-module
+
+ HAS_AUGEAS = True
+except ImportError:
+ pass
+
log = logging.getLogger(__name__)
# Define the module's virtual name
-__virtualname__ = 'augeas'
+__virtualname__ = "augeas"
METHOD_MAP = {
- 'set': 'set',
- 'setm': 'setm',
- 'mv': 'move',
- 'move': 'move',
- 'ins': 'insert',
- 'insert': 'insert',
- 'rm': 'remove',
- 'remove': 'remove',
+ "set": "set",
+ "setm": "setm",
+ "mv": "move",
+ "move": "move",
+ "ins": "insert",
+ "insert": "insert",
+ "rm": "remove",
+ "remove": "remove",
}
def __virtual__():
- '''
+ """
Only run this module if the augeas python module is installed
- '''
+ """
if HAS_AUGEAS:
return __virtualname__
- return (False, 'Cannot load augeas_cfg module: augeas python module not installed')
+ return (False, "Cannot load augeas_cfg module: augeas python module not installed")
def _recurmatch(path, aug):
- '''
+ """
Recursive generator providing the infrastructure for
augtools print behavior.
This function is based on test_augeas.py from
Harald Hoyer in the python-augeas
repository
- '''
+ """
if path:
- clean_path = path.rstrip('/*')
+ clean_path = path.rstrip("/*")
yield (clean_path, aug.get(path))
- for i in aug.match(clean_path + '/*'):
- i = i.replace('!', '\\!') # escape some dirs
+ for i in aug.match(clean_path + "/*"):
+ i = i.replace("!", "\\!") # escape some dirs
for _match in _recurmatch(i, aug):
yield _match
def _lstrip_word(word, prefix):
- '''
+ """
Return a copy of the string after the specified prefix was removed
from the beginning of the string
- '''
+ """
if six.text_type(word).startswith(prefix):
- return six.text_type(word)[len(prefix):]
+ return six.text_type(word)[len(prefix) :]
return word
def _check_load_paths(load_path):
- '''
+ """
Checks the validity of the load_path, returns a sanitized version
with invalid paths removed.
- '''
+ """
if load_path is None or not isinstance(load_path, six.string_types):
return None
_paths = []
- for _path in load_path.split(':'):
+ for _path in load_path.split(":"):
if os.path.isabs(_path) and os.path.isdir(_path):
_paths.append(_path)
else:
- log.info('Invalid augeas_cfg load_path entry: %s removed', _path)
+ log.info("Invalid augeas_cfg load_path entry: %s removed", _path)
if len(_paths) == 0:
return None
- return ':'.join(_paths)
+ return ":".join(_paths)
def execute(context=None, lens=None, commands=(), load_path=None):
- '''
+ """
Execute Augeas commands
.. versionadded:: 2014.7.0
@@ -152,29 +155,29 @@ def execute(context=None, lens=None, commands=(), load_path=None):
A colon-spearated list of directories that modules should be searched
in. This is in addition to the standard load path and the directories
in AUGEAS_LENS_LIB.
- '''
- ret = {'retval': False}
+ """
+ ret = {"retval": False}
arg_map = {
- 'set': (1, 2),
- 'setm': (2, 3),
- 'move': (2,),
- 'insert': (3,),
- 'remove': (1,),
+ "set": (1, 2),
+ "setm": (2, 3),
+ "move": (2,),
+ "insert": (3,),
+ "remove": (1,),
}
def make_path(path):
- '''
+ """
Return correct path
- '''
+ """
if not context:
return path
- if path.lstrip('/'):
+ if path.lstrip("/"):
if path.startswith(context):
return path
- path = path.lstrip('/')
+ path = path.lstrip("/")
return os.path.join(context, path)
else:
return context
@@ -185,17 +188,17 @@ def execute(context=None, lens=None, commands=(), load_path=None):
aug = _Augeas(flags=flags, loadpath=load_path)
if lens and context:
- aug.add_transform(lens, re.sub('^/files', '', context))
+ aug.add_transform(lens, re.sub("^/files", "", context))
aug.load()
for command in commands:
try:
# first part up to space is always the
# command name (i.e.: set, move)
- cmd, arg = command.split(' ', 1)
+ cmd, arg = command.split(" ", 1)
if cmd not in METHOD_MAP:
- ret['error'] = 'Command {0} is not supported (yet)'.format(cmd)
+ ret["error"] = "Command {0} is not supported (yet)".format(cmd)
return ret
method = METHOD_MAP[cmd]
@@ -204,65 +207,67 @@ def execute(context=None, lens=None, commands=(), load_path=None):
parts = salt.utils.args.shlex_split(arg)
if len(parts) not in nargs:
- err = '{0} takes {1} args: {2}'.format(method, nargs, parts)
+ err = "{0} takes {1} args: {2}".format(method, nargs, parts)
raise ValueError(err)
- if method == 'set':
+ if method == "set":
path = make_path(parts[0])
value = parts[1] if len(parts) == 2 else None
- args = {'path': path, 'value': value}
- elif method == 'setm':
+ args = {"path": path, "value": value}
+ elif method == "setm":
base = make_path(parts[0])
sub = parts[1]
value = parts[2] if len(parts) == 3 else None
- args = {'base': base, 'sub': sub, 'value': value}
- elif method == 'move':
+ args = {"base": base, "sub": sub, "value": value}
+ elif method == "move":
path = make_path(parts[0])
dst = parts[1]
- args = {'src': path, 'dst': dst}
- elif method == 'insert':
+ args = {"src": path, "dst": dst}
+ elif method == "insert":
label, where, path = parts
- if where not in ('before', 'after'):
+ if where not in ("before", "after"):
raise ValueError(
- 'Expected "before" or "after", not {0}'.format(where))
+ 'Expected "before" or "after", not {0}'.format(where)
+ )
path = make_path(path)
- args = {
- 'path': path,
- 'label': label,
- 'before': where == 'before'}
- elif method == 'remove':
+ args = {"path": path, "label": label, "before": where == "before"}
+ elif method == "remove":
path = make_path(parts[0])
- args = {'path': path}
+ args = {"path": path}
except ValueError as err:
log.error(err)
# if command.split fails arg will not be set
- if 'arg' not in locals():
+ if "arg" not in locals():
arg = command
- ret['error'] = 'Invalid formatted command, ' \
- 'see debug log for details: {0}'.format(arg)
+ ret["error"] = (
+ "Invalid formatted command, "
+ "see debug log for details: {0}".format(arg)
+ )
return ret
args = salt.utils.data.decode(args, to_str=True)
- log.debug('%s: %s', method, args)
+ log.debug("%s: %s", method, args)
func = getattr(aug, method)
func(**args)
try:
aug.save()
- ret['retval'] = True
+ ret["retval"] = True
except IOError as err:
- ret['error'] = six.text_type(err)
+ ret["error"] = six.text_type(err)
- if lens and not lens.endswith('.lns'):
- ret['error'] += '\nLenses are normally configured as "name.lns". ' \
- 'Did you mean "{0}.lns"?'.format(lens)
+ if lens and not lens.endswith(".lns"):
+ ret["error"] += (
+ '\nLenses are normally configured as "name.lns". '
+ 'Did you mean "{0}.lns"?'.format(lens)
+ )
aug.close()
return ret
-def get(path, value='', load_path=None):
- '''
+def get(path, value="", load_path=None):
+ """
Get a value for a specific augeas path
CLI Example:
@@ -283,31 +288,31 @@ def get(path, value='', load_path=None):
A colon-spearated list of directories that modules should be searched
in. This is in addition to the standard load path and the directories
in AUGEAS_LENS_LIB.
- '''
+ """
load_path = _check_load_paths(load_path)
aug = _Augeas(loadpath=load_path)
ret = {}
- path = path.rstrip('/')
+ path = path.rstrip("/")
if value:
- path += '/{0}'.format(value.strip('/'))
+ path += "/{0}".format(value.strip("/"))
try:
_match = aug.match(path)
except RuntimeError as err:
- return {'error': six.text_type(err)}
+ return {"error": six.text_type(err)}
if _match:
ret[path] = aug.get(path)
else:
- ret[path] = '' # node does not exist
+ ret[path] = "" # node does not exist
return ret
def setvalue(*args):
- '''
+ """
Set a value for a specific augeas path
CLI Example:
@@ -342,57 +347,55 @@ def setvalue(*args):
Ensures that the following line is present in /etc/sudoers::
%wheel ALL = PASSWD : ALL , NOPASSWD : /usr/bin/apt-get , /usr/bin/aptitude
- '''
+ """
load_path = None
- load_paths = [x for x in args if six.text_type(x).startswith('load_path=')]
+ load_paths = [x for x in args if six.text_type(x).startswith("load_path=")]
if load_paths:
if len(load_paths) > 1:
- raise SaltInvocationError(
- 'Only one \'load_path=\' value is permitted'
- )
+ raise SaltInvocationError("Only one 'load_path=' value is permitted")
else:
- load_path = load_paths[0].split('=', 1)[1]
+ load_path = load_paths[0].split("=", 1)[1]
load_path = _check_load_paths(load_path)
aug = _Augeas(loadpath=load_path)
- ret = {'retval': False}
+ ret = {"retval": False}
tuples = [
- x for x in args
- if not six.text_type(x).startswith('prefix=') and
- not six.text_type(x).startswith('load_path=')]
- prefix = [x for x in args if six.text_type(x).startswith('prefix=')]
+ x
+ for x in args
+ if not six.text_type(x).startswith("prefix=")
+ and not six.text_type(x).startswith("load_path=")
+ ]
+ prefix = [x for x in args if six.text_type(x).startswith("prefix=")]
if prefix:
if len(prefix) > 1:
- raise SaltInvocationError(
- 'Only one \'prefix=\' value is permitted'
- )
+ raise SaltInvocationError("Only one 'prefix=' value is permitted")
else:
- prefix = prefix[0].split('=', 1)[1]
+ prefix = prefix[0].split("=", 1)[1]
if len(tuples) % 2 != 0:
- raise SaltInvocationError('Uneven number of path/value arguments')
+ raise SaltInvocationError("Uneven number of path/value arguments")
tuple_iter = iter(tuples)
for path, value in zip(tuple_iter, tuple_iter):
target_path = path
if prefix:
- target_path = os.path.join(prefix.rstrip('/'), path.lstrip('/'))
+ target_path = os.path.join(prefix.rstrip("/"), path.lstrip("/"))
try:
aug.set(target_path, six.text_type(value))
except ValueError as err:
- ret['error'] = 'Multiple values: {0}'.format(err)
+ ret["error"] = "Multiple values: {0}".format(err)
try:
aug.save()
- ret['retval'] = True
+ ret["retval"] = True
except IOError as err:
- ret['error'] = six.text_type(err)
+ ret["error"] = six.text_type(err)
return ret
-def match(path, value='', load_path=None):
- '''
+def match(path, value="", load_path=None):
+ """
Get matches for path expression
CLI Example:
@@ -413,7 +416,7 @@ def match(path, value='', load_path=None):
A colon-spearated list of directories that modules should be searched
in. This is in addition to the standard load path and the directories
in AUGEAS_LENS_LIB.
- '''
+ """
load_path = _check_load_paths(load_path)
aug = _Augeas(loadpath=load_path)
@@ -433,7 +436,7 @@ def match(path, value='', load_path=None):
def remove(path, load_path=None):
- '''
+ """
Get matches for path expression
CLI Example:
@@ -452,28 +455,28 @@ def remove(path, load_path=None):
A colon-spearated list of directories that modules should be searched
in. This is in addition to the standard load path and the directories
in AUGEAS_LENS_LIB.
- '''
+ """
load_path = _check_load_paths(load_path)
aug = _Augeas(loadpath=load_path)
- ret = {'retval': False}
+ ret = {"retval": False}
try:
count = aug.remove(path)
aug.save()
if count == -1:
- ret['error'] = 'Invalid node'
+ ret["error"] = "Invalid node"
else:
- ret['retval'] = True
+ ret["retval"] = True
except (RuntimeError, IOError) as err:
- ret['error'] = six.text_type(err)
+ ret["error"] = six.text_type(err)
- ret['count'] = count
+ ret["count"] = count
return ret
def ls(path, load_path=None): # pylint: disable=C0103
- '''
+ """
List the direct children of a node
CLI Example:
@@ -491,9 +494,10 @@ def ls(path, load_path=None): # pylint: disable=C0103
A colon-spearated list of directories that modules should be searched
in. This is in addition to the standard load path and the directories
in AUGEAS_LENS_LIB.
- '''
+ """
+
def _match(path):
- ''' Internal match function '''
+ """ Internal match function """
try:
matches = aug.match(salt.utils.stringutils.to_str(path))
except RuntimeError:
@@ -508,23 +512,23 @@ def ls(path, load_path=None): # pylint: disable=C0103
aug = _Augeas(loadpath=load_path)
- path = path.rstrip('/') + '/'
- match_path = path + '*'
+ path = path.rstrip("/") + "/"
+ match_path = path + "*"
matches = _match(match_path)
ret = {}
for key, value in six.iteritems(matches):
name = _lstrip_word(key, path)
- if _match(key + '/*'):
- ret[name + '/'] = value # has sub nodes, e.g. directory
+ if _match(key + "/*"):
+ ret[name + "/"] = value # has sub nodes, e.g. directory
else:
ret[name] = value
return ret
def tree(path, load_path=None):
- '''
+ """
Returns recursively the complete tree of a node
CLI Example:
@@ -542,11 +546,11 @@ def tree(path, load_path=None):
A colon-spearated list of directories that modules should be searched
in. This is in addition to the standard load path and the directories
in AUGEAS_LENS_LIB.
- '''
+ """
load_path = _check_load_paths(load_path)
aug = _Augeas(loadpath=load_path)
- path = path.rstrip('/') + '/'
+ path = path.rstrip("/") + "/"
match_path = path
return dict([i for i in _recurmatch(match_path, aug)])
diff --git a/salt/modules/aws_sqs.py b/salt/modules/aws_sqs.py
index 6204921e890..a6c6d351f32 100644
--- a/salt/modules/aws_sqs.py
+++ b/salt/modules/aws_sqs.py
@@ -1,10 +1,11 @@
# -*- coding: utf-8 -*-
-'''
+"""
Support for the Amazon Simple Queue Service.
-'''
+"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
# Import salt libs
@@ -14,25 +15,25 @@ from salt.ext import six
log = logging.getLogger(__name__)
-_OUTPUT = '--output json'
+_OUTPUT = "--output json"
def __virtual__():
- if salt.utils.path.which('aws'):
+ if salt.utils.path.which("aws"):
# awscli is installed, load the module
return True
- return (False, 'The module aws_sqs could not be loaded: aws command not found')
+ return (False, "The module aws_sqs could not be loaded: aws command not found")
def _region(region):
- '''
+ """
Return the region argument.
- '''
- return ' --region {r}'.format(r=region)
+ """
+ return " --region {r}".format(r=region)
def _run_aws(cmd, region, opts, user, **kwargs):
- '''
+ """
Runs the given command against AWS.
cmd
Command to run
@@ -44,32 +45,29 @@ def _run_aws(cmd, region, opts, user, **kwargs):
Pass in from salt
kwargs
Key-value arguments to pass to the command
- '''
+ """
# These args need a specific key value that aren't
# valid python parameter keys
- receipthandle = kwargs.pop('receipthandle', None)
+ receipthandle = kwargs.pop("receipthandle", None)
if receipthandle:
- kwargs['receipt-handle'] = receipthandle
- num = kwargs.pop('num', None)
+ kwargs["receipt-handle"] = receipthandle
+ num = kwargs.pop("num", None)
if num:
- kwargs['max-number-of-messages'] = num
+ kwargs["max-number-of-messages"] = num
- _formatted_args = [
- '--{0} "{1}"'.format(k, v) for k, v in six.iteritems(kwargs)]
+ _formatted_args = ['--{0} "{1}"'.format(k, v) for k, v in six.iteritems(kwargs)]
- cmd = 'aws sqs {cmd} {args} {region} {out}'.format(
- cmd=cmd,
- args=' '.join(_formatted_args),
- region=_region(region),
- out=_OUTPUT)
+ cmd = "aws sqs {cmd} {args} {region} {out}".format(
+ cmd=cmd, args=" ".join(_formatted_args), region=_region(region), out=_OUTPUT
+ )
- rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False)
+ rtn = __salt__["cmd.run"](cmd, runas=user, python_shell=False)
- return salt.utils.json.loads(rtn) if rtn else ''
+ return salt.utils.json.loads(rtn) if rtn else ""
def receive_message(queue, region, num=1, opts=None, user=None):
- '''
+ """
Receive one or more messages from a queue in a region
queue
@@ -96,24 +94,23 @@ def receive_message(queue, region, num=1, opts=None, user=None):
.. versionadded:: 2014.7.0
- '''
+ """
ret = {
- 'Messages': None,
- }
+ "Messages": None,
+ }
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return ret
- out = _run_aws('receive-message', region, opts, user, queue=url_map[queue],
- num=num)
- ret['Messages'] = out['Messages']
+ out = _run_aws("receive-message", region, opts, user, queue=url_map[queue], num=num)
+ ret["Messages"] = out["Messages"]
return ret
def delete_message(queue, region, receipthandle, opts=None, user=None):
- '''
+ """
Delete one or more messages from a queue in a region
queue
@@ -140,20 +137,26 @@ def delete_message(queue, region, receipthandle, opts=None, user=None):
.. versionadded:: 2014.7.0
- '''
+ """
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return False
- out = _run_aws('delete-message', region, opts, user,
- receipthandle=receipthandle, queue=url_map[queue],)
+ out = _run_aws(
+ "delete-message",
+ region,
+ opts,
+ user,
+ receipthandle=receipthandle,
+ queue=url_map[queue],
+ )
return True
def list_queues(region, opts=None, user=None):
- '''
+ """
List the queues in the selected region.
region
@@ -169,18 +172,18 @@ def list_queues(region, opts=None, user=None):
salt '*' aws_sqs.list_queues
- '''
- out = _run_aws('list-queues', region, opts, user)
+ """
+ out = _run_aws("list-queues", region, opts, user)
ret = {
- 'retcode': 0,
- 'stdout': out['QueueUrls'],
+ "retcode": 0,
+ "stdout": out["QueueUrls"],
}
return ret
def create_queue(name, region, opts=None, user=None):
- '''
+ """
Creates a queue with the correct name.
name
@@ -199,23 +202,21 @@ def create_queue(name, region, opts=None, user=None):
salt '*' aws_sqs.create_queue
- '''
+ """
- create = {'queue-name': name}
- out = _run_aws(
- 'create-queue', region=region, opts=opts,
- user=user, **create)
+ create = {"queue-name": name}
+ out = _run_aws("create-queue", region=region, opts=opts, user=user, **create)
ret = {
- 'retcode': 0,
- 'stdout': out['QueueUrl'],
- 'stderr': '',
+ "retcode": 0,
+ "stdout": out["QueueUrl"],
+ "stderr": "",
}
return ret
def delete_queue(name, region, opts=None, user=None):
- '''
+ """
Deletes a queue in the region.
name
@@ -233,40 +234,35 @@ def delete_queue(name, region, opts=None, user=None):
salt '*' aws_sqs.delete_queue
- '''
+ """
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
logger = logging.getLogger(__name__)
- logger.debug('map %s', six.text_type(url_map))
+ logger.debug("map %s", six.text_type(url_map))
if name in url_map:
- delete = {'queue-url': url_map[name]}
+ delete = {"queue-url": url_map[name]}
- rtn = _run_aws(
- 'delete-queue',
- region=region,
- opts=opts,
- user=user,
- **delete)
+ rtn = _run_aws("delete-queue", region=region, opts=opts, user=user, **delete)
success = True
- err = ''
- out = '{0} deleted'.format(name)
+ err = ""
+ out = "{0} deleted".format(name)
else:
- out = ''
+ out = ""
err = "Delete failed"
success = False
ret = {
- 'retcode': 0 if success else 1,
- 'stdout': out,
- 'stderr': err,
+ "retcode": 0 if success else 1,
+ "stdout": out,
+ "stderr": err,
}
return ret
def queue_exists(name, region, opts=None, user=None):
- '''
+ """
Returns True or False on whether the queue exists in the region
name
@@ -285,15 +281,15 @@ def queue_exists(name, region, opts=None, user=None):
salt '*' aws_sqs.queue_exists
- '''
+ """
output = list_queues(region, opts, user)
return name in _parse_queue_list(output)
def _parse_queue_list(list_output):
- '''
+ """
Parse the queue to get a dict of name -> URL
- '''
- queues = dict((q.split('/')[-1], q) for q in list_output['stdout'])
+ """
+ queues = dict((q.split("/")[-1], q) for q in list_output["stdout"])
return queues
diff --git a/salt/modules/azurearm_compute.py b/salt/modules/azurearm_compute.py
index 79c86d9e495..f66fac2c506 100644
--- a/salt/modules/azurearm_compute.py
+++ b/salt/modules/azurearm_compute.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Azure (ARM) Compute Execution Module
.. versionadded:: 2019.2.0
@@ -44,10 +44,11 @@ Azure (ARM) Compute Execution Module
* ``AZURE_US_GOV_CLOUD``
* ``AZURE_GERMAN_CLOUD``
-'''
+"""
# Python libs
from __future__ import absolute_import
+
import logging
# Azure libs
@@ -56,11 +57,12 @@ try:
import azure.mgmt.compute.models # pylint: disable=unused-import
from msrest.exceptions import SerializationError
from msrestazure.azure_exceptions import CloudError
+
HAS_LIBS = True
except ImportError:
pass
-__virtualname__ = 'azurearm_compute'
+__virtualname__ = "azurearm_compute"
log = logging.getLogger(__name__)
@@ -69,16 +71,18 @@ def __virtual__():
if not HAS_LIBS:
return (
False,
- 'The following dependencies are required to use the AzureARM modules: '
- 'Microsoft Azure SDK for Python >= 2.0rc6, '
- 'MS REST Azure (msrestazure) >= 0.4'
+ "The following dependencies are required to use the AzureARM modules: "
+ "Microsoft Azure SDK for Python >= 2.0rc6, "
+ "MS REST Azure (msrestazure) >= 0.4",
)
return __virtualname__
-def availability_set_create_or_update(name, resource_group, **kwargs): # pylint: disable=invalid-name
- '''
+def availability_set_create_or_update(
+ name, resource_group, **kwargs
+): # pylint: disable=invalid-name
+ """
.. versionadded:: 2019.2.0
Create or update an availability set.
@@ -94,59 +98,61 @@ def availability_set_create_or_update(name, resource_group, **kwargs): # pylint
salt-call azurearm_compute.availability_set_create_or_update testset testgroup
- '''
- if 'location' not in kwargs:
- rg_props = __salt__['azurearm_resource.resource_group_get'](
+ """
+ if "location" not in kwargs:
+ rg_props = __salt__["azurearm_resource.resource_group_get"](
resource_group, **kwargs
)
- if 'error' in rg_props:
- log.error(
- 'Unable to determine location from resource group specified.'
- )
+ if "error" in rg_props:
+ log.error("Unable to determine location from resource group specified.")
return False
- kwargs['location'] = rg_props['location']
+ kwargs["location"] = rg_props["location"]
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
# Use VM names to link to the IDs of existing VMs.
- if isinstance(kwargs.get('virtual_machines'), list):
+ if isinstance(kwargs.get("virtual_machines"), list):
vm_list = []
- for vm_name in kwargs.get('virtual_machines'):
- vm_instance = __salt__['azurearm_compute.virtual_machine_get'](
- name=vm_name,
- resource_group=resource_group,
- **kwargs
+ for vm_name in kwargs.get("virtual_machines"):
+ vm_instance = __salt__["azurearm_compute.virtual_machine_get"](
+ name=vm_name, resource_group=resource_group, **kwargs
)
- if 'error' not in vm_instance:
- vm_list.append({'id': str(vm_instance['id'])})
- kwargs['virtual_machines'] = vm_list
+ if "error" not in vm_instance:
+ vm_list.append({"id": str(vm_instance["id"])})
+ kwargs["virtual_machines"] = vm_list
try:
- setmodel = __utils__['azurearm.create_object_model']('compute', 'AvailabilitySet', **kwargs)
+ setmodel = __utils__["azurearm.create_object_model"](
+ "compute", "AvailabilitySet", **kwargs
+ )
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
av_set = compconn.availability_sets.create_or_update(
resource_group_name=resource_group,
availability_set_name=name,
- parameters=setmodel
+ parameters=setmodel,
)
result = av_set.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
def availability_set_delete(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Delete an availability set.
@@ -162,24 +168,23 @@ def availability_set_delete(name, resource_group, **kwargs):
salt-call azurearm_compute.availability_set_delete testset testgroup
- '''
+ """
result = False
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
try:
compconn.availability_sets.delete(
- resource_group_name=resource_group,
- availability_set_name=name
+ resource_group_name=resource_group, availability_set_name=name
)
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
return result
def availability_set_get(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get a dictionary representing an availability set's properties.
@@ -195,24 +200,23 @@ def availability_set_get(name, resource_group, **kwargs):
salt-call azurearm_compute.availability_set_get testset testgroup
- '''
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ """
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
try:
av_set = compconn.availability_sets.get(
- resource_group_name=resource_group,
- availability_set_name=name
+ resource_group_name=resource_group, availability_set_name=name
)
result = av_set.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def availability_sets_list(resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all availability sets within a resource group.
@@ -226,27 +230,27 @@ def availability_sets_list(resource_group, **kwargs):
salt-call azurearm_compute.availability_sets_list testgroup
- '''
+ """
result = {}
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
try:
- avail_sets = __utils__['azurearm.paged_object_to_list'](
- compconn.availability_sets.list(
- resource_group_name=resource_group
- )
+ avail_sets = __utils__["azurearm.paged_object_to_list"](
+ compconn.availability_sets.list(resource_group_name=resource_group)
)
for avail_set in avail_sets:
- result[avail_set['name']] = avail_set
+ result[avail_set["name"]] = avail_set
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
-def availability_sets_list_available_sizes(name, resource_group, **kwargs): # pylint: disable=invalid-name
- '''
+def availability_sets_list_available_sizes(
+ name, resource_group, **kwargs
+): # pylint: disable=invalid-name
+ """
.. versionadded:: 2019.2.0
List all available virtual machine sizes that can be used to
@@ -264,28 +268,29 @@ def availability_sets_list_available_sizes(name, resource_group, **kwargs): # p
salt-call azurearm_compute.availability_sets_list_available_sizes testset testgroup
- '''
+ """
result = {}
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
try:
- sizes = __utils__['azurearm.paged_object_to_list'](
+ sizes = __utils__["azurearm.paged_object_to_list"](
compconn.availability_sets.list_available_sizes(
- resource_group_name=resource_group,
- availability_set_name=name
+ resource_group_name=resource_group, availability_set_name=name
)
)
for size in sizes:
- result[size['name']] = size
+ result[size["name"]] = size
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
-def virtual_machine_capture(name, destination_name, resource_group, prefix='capture-', overwrite=False, **kwargs):
- '''
+def virtual_machine_capture(
+ name, destination_name, resource_group, prefix="capture-", overwrite=False, **kwargs
+):
+ """
.. versionadded:: 2019.2.0
Captures the VM by copying virtual hard disks of the VM and outputs
@@ -308,13 +313,13 @@ def virtual_machine_capture(name, destination_name, resource_group, prefix='capt
salt-call azurearm_compute.virtual_machine_capture testvm testcontainer testgroup
- '''
+ """
# pylint: disable=invalid-name
VirtualMachineCaptureParameters = getattr(
- azure.mgmt.compute.models, 'VirtualMachineCaptureParameters'
+ azure.mgmt.compute.models, "VirtualMachineCaptureParameters"
)
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
try:
# pylint: disable=invalid-name
vm = compconn.virtual_machines.capture(
@@ -323,21 +328,21 @@ def virtual_machine_capture(name, destination_name, resource_group, prefix='capt
parameters=VirtualMachineCaptureParameters(
vhd_prefix=prefix,
destination_container_name=destination_name,
- overwrite_vhds=overwrite
- )
+ overwrite_vhds=overwrite,
+ ),
)
vm.wait()
vm_result = vm.result()
result = vm_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def virtual_machine_get(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Retrieves information about the model view or the instance view of a
@@ -354,27 +359,27 @@ def virtual_machine_get(name, resource_group, **kwargs):
salt-call azurearm_compute.virtual_machine_get testvm testgroup
- '''
- expand = kwargs.get('expand')
+ """
+ expand = kwargs.get("expand")
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
try:
# pylint: disable=invalid-name
vm = compconn.virtual_machines.get(
- resource_group_name=resource_group,
- vm_name=name,
- expand=expand
+ resource_group_name=resource_group, vm_name=name, expand=expand
)
result = vm.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
-def virtual_machine_convert_to_managed_disks(name, resource_group, **kwargs): # pylint: disable=invalid-name
- '''
+def virtual_machine_convert_to_managed_disks(
+ name, resource_group, **kwargs
+): # pylint: disable=invalid-name
+ """
.. versionadded:: 2019.2.0
Converts virtual machine disks from blob-based to managed disks. Virtual
@@ -391,26 +396,25 @@ def virtual_machine_convert_to_managed_disks(name, resource_group, **kwargs): #
salt-call azurearm_compute.virtual_machine_convert_to_managed_disks testvm testgroup
- '''
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ """
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
try:
# pylint: disable=invalid-name
vm = compconn.virtual_machines.convert_to_managed_disks(
- resource_group_name=resource_group,
- vm_name=name
+ resource_group_name=resource_group, vm_name=name
)
vm.wait()
vm_result = vm.result()
result = vm_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def virtual_machine_deallocate(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Power off a virtual machine and deallocate compute resources.
@@ -426,26 +430,25 @@ def virtual_machine_deallocate(name, resource_group, **kwargs):
salt-call azurearm_compute.virtual_machine_deallocate testvm testgroup
- '''
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ """
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
try:
# pylint: disable=invalid-name
vm = compconn.virtual_machines.deallocate(
- resource_group_name=resource_group,
- vm_name=name
+ resource_group_name=resource_group, vm_name=name
)
vm.wait()
vm_result = vm.result()
result = vm_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def virtual_machine_generalize(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Set the state of a virtual machine to 'generalized'.
@@ -461,23 +464,22 @@ def virtual_machine_generalize(name, resource_group, **kwargs):
salt-call azurearm_compute.virtual_machine_generalize testvm testgroup
- '''
+ """
result = False
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
try:
compconn.virtual_machines.generalize(
- resource_group_name=resource_group,
- vm_name=name
+ resource_group_name=resource_group, vm_name=name
)
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
return result
def virtual_machines_list(resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all virtual machines within a resource group.
@@ -491,26 +493,24 @@ def virtual_machines_list(resource_group, **kwargs):
salt-call azurearm_compute.virtual_machines_list testgroup
- '''
+ """
result = {}
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
try:
- vms = __utils__['azurearm.paged_object_to_list'](
- compconn.virtual_machines.list(
- resource_group_name=resource_group
- )
+ vms = __utils__["azurearm.paged_object_to_list"](
+ compconn.virtual_machines.list(resource_group_name=resource_group)
)
for vm in vms: # pylint: disable=invalid-name
- result[vm['name']] = vm
+ result[vm["name"]] = vm
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def virtual_machines_list_all(**kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all virtual machines within a subscription.
@@ -521,24 +521,26 @@ def virtual_machines_list_all(**kwargs):
salt-call azurearm_compute.virtual_machines_list_all
- '''
+ """
result = {}
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
try:
- vms = __utils__['azurearm.paged_object_to_list'](
+ vms = __utils__["azurearm.paged_object_to_list"](
compconn.virtual_machines.list_all()
)
for vm in vms: # pylint: disable=invalid-name
- result[vm['name']] = vm
+ result[vm["name"]] = vm
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
-def virtual_machines_list_available_sizes(name, resource_group, **kwargs): # pylint: disable=invalid-name
- '''
+def virtual_machines_list_available_sizes(
+ name, resource_group, **kwargs
+): # pylint: disable=invalid-name
+ """
.. versionadded:: 2019.2.0
Lists all available virtual machine sizes to which the specified virtual
@@ -555,27 +557,26 @@ def virtual_machines_list_available_sizes(name, resource_group, **kwargs): # py
salt-call azurearm_compute.virtual_machines_list_available_sizes testvm testgroup
- '''
+ """
result = {}
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
try:
- sizes = __utils__['azurearm.paged_object_to_list'](
+ sizes = __utils__["azurearm.paged_object_to_list"](
compconn.virtual_machines.list_available_sizes(
- resource_group_name=resource_group,
- vm_name=name
+ resource_group_name=resource_group, vm_name=name
)
)
for size in sizes:
- result[size['name']] = size
+ result[size["name"]] = size
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def virtual_machine_power_off(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Power off (stop) a virtual machine.
@@ -591,26 +592,25 @@ def virtual_machine_power_off(name, resource_group, **kwargs):
salt-call azurearm_compute.virtual_machine_power_off testvm testgroup
- '''
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ """
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
try:
# pylint: disable=invalid-name
vm = compconn.virtual_machines.power_off(
- resource_group_name=resource_group,
- vm_name=name
+ resource_group_name=resource_group, vm_name=name
)
vm.wait()
vm_result = vm.result()
result = vm_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def virtual_machine_restart(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Restart a virtual machine.
@@ -626,26 +626,25 @@ def virtual_machine_restart(name, resource_group, **kwargs):
salt-call azurearm_compute.virtual_machine_restart testvm testgroup
- '''
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ """
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
try:
# pylint: disable=invalid-name
vm = compconn.virtual_machines.restart(
- resource_group_name=resource_group,
- vm_name=name
+ resource_group_name=resource_group, vm_name=name
)
vm.wait()
vm_result = vm.result()
result = vm_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def virtual_machine_start(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Power on (start) a virtual machine.
@@ -661,26 +660,25 @@ def virtual_machine_start(name, resource_group, **kwargs):
salt-call azurearm_compute.virtual_machine_start testvm testgroup
- '''
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ """
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
try:
# pylint: disable=invalid-name
vm = compconn.virtual_machines.start(
- resource_group_name=resource_group,
- vm_name=name
+ resource_group_name=resource_group, vm_name=name
)
vm.wait()
vm_result = vm.result()
result = vm_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def virtual_machine_redeploy(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Redeploy a virtual machine.
@@ -696,19 +694,18 @@ def virtual_machine_redeploy(name, resource_group, **kwargs):
salt-call azurearm_compute.virtual_machine_redeploy testvm testgroup
- '''
- compconn = __utils__['azurearm.get_client']('compute', **kwargs)
+ """
+ compconn = __utils__["azurearm.get_client"]("compute", **kwargs)
try:
# pylint: disable=invalid-name
vm = compconn.virtual_machines.redeploy(
- resource_group_name=resource_group,
- vm_name=name
+ resource_group_name=resource_group, vm_name=name
)
vm.wait()
vm_result = vm.result()
result = vm_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("compute", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
diff --git a/salt/modules/azurearm_dns.py b/salt/modules/azurearm_dns.py
index e8f9e2e88c7..dcd84df8e2d 100644
--- a/salt/modules/azurearm_dns.py
+++ b/salt/modules/azurearm_dns.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Azure (ARM) DNS Execution Module
.. versionadded:: 3000
@@ -18,37 +18,43 @@ Azure (ARM) DNS Execution Module
* `azure-mgmt-web `_ >= 0.32.0
* `azure-storage `_ >= 0.34.3
* `msrestazure `_ >= 0.4.21
+
:platform: linux
+:configuration:
+ This module requires Azure Resource Manager credentials to be passed as keyword arguments
+ to every function in order to work properly.
-:configuration: This module requires Azure Resource Manager credentials to be passed as keyword arguments
-to every function in order to work properly.
-
- Required provider parameters:
+Required provider parameters:
if using username and password:
- * ``subscription_id``
- * ``username``
- * ``password``
+
+ * ``subscription_id``
+ * ``username``
+ * ``password``
if using a service principal:
- * ``subscription_id``
- * ``tenant``
- * ``client_id``
- * ``secret``
- Optional provider parameters:
+ * ``subscription_id``
+ * ``tenant``
+ * ``client_id``
+ * ``secret``
+
+Optional provider parameters:
**cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud.
- Possible values:
- * ``AZURE_PUBLIC_CLOUD`` (default)
- * ``AZURE_CHINA_CLOUD``
- * ``AZURE_US_GOV_CLOUD``
- * ``AZURE_GERMAN_CLOUD``
-'''
+ Possible values:
+
+ * ``AZURE_PUBLIC_CLOUD`` (default)
+ * ``AZURE_CHINA_CLOUD``
+ * ``AZURE_US_GOV_CLOUD``
+ * ``AZURE_GERMAN_CLOUD``
+
+"""
# Python libs
from __future__ import absolute_import
+
import logging
# Azure libs
@@ -57,11 +63,12 @@ try:
import azure.mgmt.dns.models # pylint: disable=unused-import
from msrest.exceptions import SerializationError
from msrestazure.azure_exceptions import CloudError
+
HAS_LIBS = True
except ImportError:
pass
-__virtualname__ = 'azurearm_dns'
+__virtualname__ = "azurearm_dns"
log = logging.getLogger(__name__)
@@ -70,16 +77,16 @@ def __virtual__():
if not HAS_LIBS:
return (
False,
- 'The following dependencies are required to use the AzureARM modules: '
- 'Microsoft Azure SDK for Python >= 2.0rc6, '
- 'MS REST Azure (msrestazure) >= 0.4'
+ "The following dependencies are required to use the AzureARM modules: "
+ "Microsoft Azure SDK for Python >= 2.0rc6, "
+ "MS REST Azure (msrestazure) >= 0.4",
)
return __virtualname__
def record_set_create_or_update(name, zone_name, resource_group, record_type, **kwargs):
- '''
+ """
.. versionadded:: 3000
Creates or updates a record set within a DNS zone.
@@ -90,9 +97,10 @@ def record_set_create_or_update(name, zone_name, resource_group, record_type, **
:param resource_group: The name of the resource group.
- :param record_type: The type of DNS record in this record set. Record sets of type SOA can be
- updated but not created (they are created when the DNS zone is created).
- Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'
+ :param record_type:
+ The type of DNS record in this record set. Record sets of type SOA can be
+ updated but not created (they are created when the DNS zone is created).
+ Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'
CLI Example:
@@ -101,13 +109,17 @@ def record_set_create_or_update(name, zone_name, resource_group, record_type, **
salt-call azurearm_dns.record_set_create_or_update myhost myzone testgroup A
arecords='[{ipv4_address: 10.0.0.1}]' ttl=300
- '''
- dnsconn = __utils__['azurearm.get_client']('dns', **kwargs)
+ """
+ dnsconn = __utils__["azurearm.get_client"]("dns", **kwargs)
try:
- record_set_model = __utils__['azurearm.create_object_model']('dns', 'RecordSet', **kwargs)
+ record_set_model = __utils__["azurearm.create_object_model"](
+ "dns", "RecordSet", **kwargs
+ )
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
@@ -117,21 +129,23 @@ def record_set_create_or_update(name, zone_name, resource_group, record_type, **
resource_group_name=resource_group,
record_type=record_type,
parameters=record_set_model,
- if_match=kwargs.get('if_match'),
- if_none_match=kwargs.get('if_none_match')
+ if_match=kwargs.get("if_match"),
+ if_none_match=kwargs.get("if_none_match"),
)
result = record_set.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("dns", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
def record_set_delete(name, zone_name, resource_group, record_type, **kwargs):
- '''
+ """
.. versionadded:: 3000
Deletes a record set from a DNS zone. This operation cannot be undone.
@@ -142,9 +156,10 @@ def record_set_delete(name, zone_name, resource_group, record_type, **kwargs):
:param resource_group: The name of the resource group.
- :param record_type: The type of DNS record in this record set. Record sets of type SOA cannot be
- deleted (they are deleted when the DNS zone is deleted).
- Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'
+ :param record_type:
+ The type of DNS record in this record set. Record sets of type SOA cannot be
+ deleted (they are deleted when the DNS zone is deleted).
+ Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'
CLI Example:
@@ -152,26 +167,26 @@ def record_set_delete(name, zone_name, resource_group, record_type, **kwargs):
salt-call azurearm_dns.record_set_delete myhost myzone testgroup A
- '''
+ """
result = False
- dnsconn = __utils__['azurearm.get_client']('dns', **kwargs)
+ dnsconn = __utils__["azurearm.get_client"]("dns", **kwargs)
try:
record_set = dnsconn.record_sets.delete(
relative_record_set_name=name,
zone_name=zone_name,
resource_group_name=resource_group,
record_type=record_type,
- if_match=kwargs.get('if_match')
+ if_match=kwargs.get("if_match"),
)
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("dns", str(exc), **kwargs)
return result
def record_set_get(name, zone_name, resource_group, record_type, **kwargs):
- '''
+ """
.. versionadded:: 3000
Get a dictionary representing a record set's properties.
@@ -182,8 +197,9 @@ def record_set_get(name, zone_name, resource_group, record_type, **kwargs):
:param resource_group: The name of the resource group.
- :param record_type: The type of DNS record in this record set.
- Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'
+ :param record_type:
+ The type of DNS record in this record set.
+ Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'
CLI Example:
@@ -191,26 +207,28 @@ def record_set_get(name, zone_name, resource_group, record_type, **kwargs):
salt-call azurearm_dns.record_set_get '@' myzone testgroup SOA
- '''
- dnsconn = __utils__['azurearm.get_client']('dns', **kwargs)
+ """
+ dnsconn = __utils__["azurearm.get_client"]("dns", **kwargs)
try:
record_set = dnsconn.record_sets.get(
relative_record_set_name=name,
zone_name=zone_name,
resource_group_name=resource_group,
- record_type=record_type
+ record_type=record_type,
)
result = record_set.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("dns", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
-def record_sets_list_by_type(zone_name, resource_group, record_type, top=None, recordsetnamesuffix=None, **kwargs):
- '''
+def record_sets_list_by_type(
+ zone_name, resource_group, record_type, top=None, recordsetnamesuffix=None, **kwargs
+):
+ """
.. versionadded:: 3000
Lists the record sets of a specified type in a DNS zone.
@@ -219,14 +237,17 @@ def record_sets_list_by_type(zone_name, resource_group, record_type, top=None, r
:param resource_group: The name of the resource group.
- :param record_type: The type of record sets to enumerate.
- Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'
+ :param record_type:
+ The type of record sets to enumerate.
+ Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'
- :param top: The maximum number of record sets to return. If not specified,
- returns up to 100 record sets.
+ :param top:
+ The maximum number of record sets to return. If not specified,
+ returns up to 100 record sets.
- :param recordsetnamesuffix: The suffix label of the record set name that has
- to be used to filter the record set enumerations.
+ :param recordsetnamesuffix:
+ The suffix label of the record set name that has
+ to be used to filter the record set enumerations.
CLI Example:
@@ -234,31 +255,33 @@ def record_sets_list_by_type(zone_name, resource_group, record_type, top=None, r
salt-call azurearm_dns.record_sets_list_by_type myzone testgroup SOA
- '''
+ """
result = {}
- dnsconn = __utils__['azurearm.get_client']('dns', **kwargs)
+ dnsconn = __utils__["azurearm.get_client"]("dns", **kwargs)
try:
- record_sets = __utils__['azurearm.paged_object_to_list'](
+ record_sets = __utils__["azurearm.paged_object_to_list"](
dnsconn.record_sets.list_by_type(
zone_name=zone_name,
resource_group_name=resource_group,
record_type=record_type,
top=top,
- recordsetnamesuffix=recordsetnamesuffix
+ recordsetnamesuffix=recordsetnamesuffix,
)
)
for record_set in record_sets:
- result[record_set['name']] = record_set
+ result[record_set["name"]] = record_set
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("dns", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
-def record_sets_list_by_dns_zone(zone_name, resource_group, top=None, recordsetnamesuffix=None, **kwargs):
- '''
+def record_sets_list_by_dns_zone(
+ zone_name, resource_group, top=None, recordsetnamesuffix=None, **kwargs
+):
+ """
.. versionadded:: 3000
Lists all record sets in a DNS zone.
@@ -267,11 +290,13 @@ def record_sets_list_by_dns_zone(zone_name, resource_group, top=None, recordsetn
:param resource_group: The name of the resource group.
- :param top: The maximum number of record sets to return. If not specified,
- returns up to 100 record sets.
+ :param top:
+ The maximum number of record sets to return. If not specified,
+ returns up to 100 record sets.
- :param recordsetnamesuffix: The suffix label of the record set name that has
- to be used to filter the record set enumerations.
+ :param recordsetnamesuffix:
+ The suffix label of the record set name that has
+ to be used to filter the record set enumerations.
CLI Example:
@@ -279,30 +304,30 @@ def record_sets_list_by_dns_zone(zone_name, resource_group, top=None, recordsetn
salt-call azurearm_dns.record_sets_list_by_dns_zone myzone testgroup
- '''
+ """
result = {}
- dnsconn = __utils__['azurearm.get_client']('dns', **kwargs)
+ dnsconn = __utils__["azurearm.get_client"]("dns", **kwargs)
try:
- record_sets = __utils__['azurearm.paged_object_to_list'](
+ record_sets = __utils__["azurearm.paged_object_to_list"](
dnsconn.record_sets.list_by_dns_zone(
zone_name=zone_name,
resource_group_name=resource_group,
top=top,
- recordsetnamesuffix=recordsetnamesuffix
+ recordsetnamesuffix=recordsetnamesuffix,
)
)
for record_set in record_sets:
- result[record_set['name']] = record_set
+ result[record_set["name"]] = record_set
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("dns", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def zone_create_or_update(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 3000
Creates or updates a DNS zone. Does not modify DNS records within the zone.
@@ -317,23 +342,29 @@ def zone_create_or_update(name, resource_group, **kwargs):
salt-call azurearm_dns.zone_create_or_update myzone testgroup
- '''
+ """
# DNS zones are global objects
- kwargs['location'] = 'global'
+ kwargs["location"] = "global"
- dnsconn = __utils__['azurearm.get_client']('dns', **kwargs)
+ dnsconn = __utils__["azurearm.get_client"]("dns", **kwargs)
# Convert list of ID strings to list of dictionaries with id key.
- if isinstance(kwargs.get('registration_virtual_networks'), list):
- kwargs['registration_virtual_networks'] = [{'id': vnet} for vnet in kwargs['registration_virtual_networks']]
+ if isinstance(kwargs.get("registration_virtual_networks"), list):
+ kwargs["registration_virtual_networks"] = [
+ {"id": vnet} for vnet in kwargs["registration_virtual_networks"]
+ ]
- if isinstance(kwargs.get('resolution_virtual_networks'), list):
- kwargs['resolution_virtual_networks'] = [{'id': vnet} for vnet in kwargs['resolution_virtual_networks']]
+ if isinstance(kwargs.get("resolution_virtual_networks"), list):
+ kwargs["resolution_virtual_networks"] = [
+ {"id": vnet} for vnet in kwargs["resolution_virtual_networks"]
+ ]
try:
- zone_model = __utils__['azurearm.create_object_model']('dns', 'Zone', **kwargs)
+ zone_model = __utils__["azurearm.create_object_model"]("dns", "Zone", **kwargs)
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
@@ -341,21 +372,23 @@ def zone_create_or_update(name, resource_group, **kwargs):
zone_name=name,
resource_group_name=resource_group,
parameters=zone_model,
- if_match=kwargs.get('if_match'),
- if_none_match=kwargs.get('if_none_match')
+ if_match=kwargs.get("if_match"),
+ if_none_match=kwargs.get("if_none_match"),
)
result = zone.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("dns", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
def zone_delete(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 3000
Delete a DNS zone within a resource group.
@@ -370,25 +403,25 @@ def zone_delete(name, resource_group, **kwargs):
salt-call azurearm_dns.zone_delete myzone testgroup
- '''
+ """
result = False
- dnsconn = __utils__['azurearm.get_client']('dns', **kwargs)
+ dnsconn = __utils__["azurearm.get_client"]("dns", **kwargs)
try:
zone = dnsconn.zones.delete(
zone_name=name,
resource_group_name=resource_group,
- if_match=kwargs.get('if_match')
+ if_match=kwargs.get("if_match"),
)
zone.wait()
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("dns", str(exc), **kwargs)
return result
def zone_get(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 3000
Get a dictionary representing a DNS zone's properties, but not the
@@ -404,32 +437,30 @@ def zone_get(name, resource_group, **kwargs):
salt-call azurearm_dns.zone_get myzone testgroup
- '''
- dnsconn = __utils__['azurearm.get_client']('dns', **kwargs)
+ """
+ dnsconn = __utils__["azurearm.get_client"]("dns", **kwargs)
try:
- zone = dnsconn.zones.get(
- zone_name=name,
- resource_group_name=resource_group
- )
+ zone = dnsconn.zones.get(zone_name=name, resource_group_name=resource_group)
result = zone.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("dns", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def zones_list_by_resource_group(resource_group, top=None, **kwargs):
- '''
+ """
.. versionadded:: 3000
Lists the DNS zones in a resource group.
:param resource_group: The name of the resource group.
- :param top: The maximum number of DNS zones to return. If not specified,
- returns up to 100 zones.
+ :param top:
+ The maximum number of DNS zones to return. If not specified,
+ returns up to 100 zones.
CLI Example:
@@ -437,34 +468,34 @@ def zones_list_by_resource_group(resource_group, top=None, **kwargs):
salt-call azurearm_dns.zones_list_by_resource_group testgroup
- '''
+ """
result = {}
- dnsconn = __utils__['azurearm.get_client']('dns', **kwargs)
+ dnsconn = __utils__["azurearm.get_client"]("dns", **kwargs)
try:
- zones = __utils__['azurearm.paged_object_to_list'](
+ zones = __utils__["azurearm.paged_object_to_list"](
dnsconn.zones.list_by_resource_group(
- resource_group_name=resource_group,
- top=top
+ resource_group_name=resource_group, top=top
)
)
for zone in zones:
- result[zone['name']] = zone
+ result[zone["name"]] = zone
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("dns", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def zones_list(top=None, **kwargs):
- '''
+ """
.. versionadded:: 3000
Lists the DNS zones in all resource groups in a subscription.
- :param top: The maximum number of DNS zones to return. If not specified,
- returns up to 100 zones.
+ :param top:
+ The maximum number of DNS zones to return. If not specified,
+ eturns up to 100 zones.
CLI Example:
@@ -472,16 +503,16 @@ def zones_list(top=None, **kwargs):
salt-call azurearm_dns.zones_list
- '''
+ """
result = {}
- dnsconn = __utils__['azurearm.get_client']('dns', **kwargs)
+ dnsconn = __utils__["azurearm.get_client"]("dns", **kwargs)
try:
- zones = __utils__['azurearm.paged_object_to_list'](dnsconn.zones.list(top=top))
+ zones = __utils__["azurearm.paged_object_to_list"](dnsconn.zones.list(top=top))
for zone in zones:
- result[zone['name']] = zone
+ result[zone["name"]] = zone
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("dns", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
diff --git a/salt/modules/azurearm_network.py b/salt/modules/azurearm_network.py
index e5f027ececb..53278d20402 100644
--- a/salt/modules/azurearm_network.py
+++ b/salt/modules/azurearm_network.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Azure (ARM) Network Execution Module
.. versionadded:: 2019.2.0
@@ -44,10 +44,11 @@ Azure (ARM) Network Execution Module
* ``AZURE_US_GOV_CLOUD``
* ``AZURE_GERMAN_CLOUD``
-'''
+"""
# Python libs
from __future__ import absolute_import
+
import logging
# Salt libs
@@ -60,11 +61,12 @@ try:
import azure.mgmt.network.models # pylint: disable=unused-import
from msrest.exceptions import SerializationError
from msrestazure.azure_exceptions import CloudError
+
HAS_LIBS = True
except ImportError:
pass
-__virtualname__ = 'azurearm_network'
+__virtualname__ = "azurearm_network"
log = logging.getLogger(__name__)
@@ -73,16 +75,16 @@ def __virtual__():
if not HAS_LIBS:
return (
False,
- 'The following dependencies are required to use the AzureARM modules: '
- 'Microsoft Azure SDK for Python >= 2.0rc6, '
- 'MS REST Azure (msrestazure) >= 0.4'
+ "The following dependencies are required to use the AzureARM modules: "
+ "Microsoft Azure SDK for Python >= 2.0rc6, "
+ "MS REST Azure (msrestazure) >= 0.4",
)
return __virtualname__
def check_dns_name_availability(name, region, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Check whether a domain name in the current zone is available for use.
@@ -97,24 +99,24 @@ def check_dns_name_availability(name, region, **kwargs):
salt-call azurearm_network.check_dns_name_availability testdnsname westus
- '''
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ """
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
check_dns_name = netconn.check_dns_name_availability(
- location=region,
- domain_name_label=name
+ location=region, domain_name_label=name
)
result = check_dns_name.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
-def check_ip_address_availability(ip_address, virtual_network, resource_group,
- **kwargs):
- '''
+def check_ip_address_availability(
+ ip_address, virtual_network, resource_group, **kwargs
+):
+ """
.. versionadded:: 2019.2.0
Check that a private ip address is available within the specified
@@ -134,23 +136,24 @@ def check_ip_address_availability(ip_address, virtual_network, resource_group,
salt-call azurearm_network.check_ip_address_availability 10.0.0.4 testnet testgroup
- '''
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ """
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
check_ip = netconn.virtual_networks.check_ip_address_availability(
resource_group_name=resource_group,
virtual_network_name=virtual_network,
- ip_address=ip_address)
+ ip_address=ip_address,
+ )
result = check_ip.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def default_security_rule_get(name, security_group, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get details about a default security rule within a security group.
@@ -169,37 +172,33 @@ def default_security_rule_get(name, security_group, resource_group, **kwargs):
salt-call azurearm_network.default_security_rule_get DenyAllOutBound testnsg testgroup
- '''
+ """
result = {}
default_rules = default_security_rules_list(
- security_group=security_group,
- resource_group=resource_group,
- **kwargs
+ security_group=security_group, resource_group=resource_group, **kwargs
)
- if isinstance(default_rules, dict) and 'error' in default_rules:
+ if isinstance(default_rules, dict) and "error" in default_rules:
return default_rules
try:
for default_rule in default_rules:
- if default_rule['name'] == name:
+ if default_rule["name"] == name:
result = default_rule
if not result:
result = {
- 'error': 'Unable to find {0} in {1}!'.format(name, security_group)
+ "error": "Unable to find {0} in {1}!".format(name, security_group)
}
except KeyError as exc:
- log.error(
- 'Unable to find {0} in {1}!'.format(name, security_group)
- )
- result = {'error': str(exc)}
+ log.error("Unable to find {0} in {1}!".format(name, security_group))
+ result = {"error": str(exc)}
return result
def default_security_rules_list(security_group, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List default security rules within a security group.
@@ -215,31 +214,27 @@ def default_security_rules_list(security_group, resource_group, **kwargs):
salt-call azurearm_network.default_security_rules_list testnsg testgroup
- '''
+ """
result = {}
secgroup = network_security_group_get(
- security_group=security_group,
- resource_group=resource_group,
- **kwargs
+ security_group=security_group, resource_group=resource_group, **kwargs
)
- if 'error' in secgroup:
+ if "error" in secgroup:
return secgroup
try:
- result = secgroup['default_security_rules']
+ result = secgroup["default_security_rules"]
except KeyError as exc:
- log.error(
- 'No default security rules found for {0}!'.format(security_group)
- )
- result = {'error': str(exc)}
+ log.error("No default security rules found for {0}!".format(security_group))
+ result = {"error": str(exc)}
return result
def security_rules_list(security_group, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List security rules within a network security group.
@@ -255,27 +250,40 @@ def security_rules_list(security_group, resource_group, **kwargs):
salt-call azurearm_network.security_rules_list testnsg testgroup
- '''
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ """
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
secrules = netconn.security_rules.list(
network_security_group_name=security_group,
- resource_group_name=resource_group
+ resource_group_name=resource_group,
)
- result = __utils__['azurearm.paged_object_to_list'](secrules)
+ result = __utils__["azurearm.paged_object_to_list"](secrules)
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
-def security_rule_create_or_update(name, access, direction, priority, protocol, security_group, resource_group,
- source_address_prefix=None, destination_address_prefix=None, source_port_range=None,
- destination_port_range=None, source_address_prefixes=None,
- destination_address_prefixes=None, source_port_ranges=None,
- destination_port_ranges=None, **kwargs):
- '''
+def security_rule_create_or_update(
+ name,
+ access,
+ direction,
+ priority,
+ protocol,
+ security_group,
+ resource_group,
+ source_address_prefix=None,
+ destination_address_prefix=None,
+ source_port_range=None,
+ destination_port_range=None,
+ source_address_prefixes=None,
+ destination_address_prefixes=None,
+ source_port_ranges=None,
+ destination_port_ranges=None,
+ **kwargs
+):
+ """
.. versionadded:: 2019.2.0
Create or update a security rule within a specified network security group.
@@ -342,32 +350,34 @@ def security_rule_create_or_update(name, access, direction, priority, protocol,
source_address_prefix='*' destination_address_prefix=internet source_port_range='*' \
destination_port_range='1-1024'
- '''
+ """
exclusive_params = [
- ('source_port_ranges', 'source_port_range'),
- ('source_address_prefixes', 'source_address_prefix'),
- ('destination_port_ranges', 'destination_port_range'),
- ('destination_address_prefixes', 'destination_address_prefix'),
+ ("source_port_ranges", "source_port_range"),
+ ("source_address_prefixes", "source_address_prefix"),
+ ("destination_port_ranges", "destination_port_range"),
+ ("destination_address_prefixes", "destination_address_prefix"),
]
for params in exclusive_params:
# pylint: disable=eval-used
if not eval(params[0]) and not eval(params[1]):
log.error(
- 'Either the {0} or {1} parameter must be provided!'.format(params[0], params[1])
+ "Either the {0} or {1} parameter must be provided!".format(
+ params[0], params[1]
+ )
)
return False
# pylint: disable=eval-used
if eval(params[0]):
# pylint: disable=exec-used
- exec('{0} = None'.format(params[1]))
+ exec("{0} = None".format(params[1]))
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- rulemodel = __utils__['azurearm.create_object_model'](
- 'network',
- 'SecurityRule',
+ rulemodel = __utils__["azurearm.create_object_model"](
+ "network",
+ "SecurityRule",
name=name,
access=access,
direction=direction,
@@ -384,7 +394,9 @@ def security_rule_create_or_update(name, access, direction, priority, protocol,
**kwargs
)
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
@@ -392,23 +404,24 @@ def security_rule_create_or_update(name, access, direction, priority, protocol,
resource_group_name=resource_group,
network_security_group_name=security_group,
security_rule_name=name,
- security_rule_parameters=rulemodel
+ security_rule_parameters=rulemodel,
)
secrule.wait()
secrule_result = secrule.result()
result = secrule_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
-def security_rule_delete(security_rule, security_group, resource_group,
- **kwargs):
- '''
+def security_rule_delete(security_rule, security_group, resource_group, **kwargs):
+ """
.. versionadded:: 2019.2.0
Delete a security rule within a specified security group.
@@ -427,25 +440,25 @@ def security_rule_delete(security_rule, security_group, resource_group,
salt-call azurearm_network.security_rule_delete testrule1 testnsg testgroup
- '''
+ """
result = False
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
secrule = netconn.security_rules.delete(
network_security_group_name=security_group,
resource_group_name=resource_group,
- security_rule_name=security_rule
+ security_rule_name=security_rule,
)
secrule.wait()
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
return result
def security_rule_get(security_rule, security_group, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get a security rule within a specified network security group.
@@ -464,24 +477,26 @@ def security_rule_get(security_rule, security_group, resource_group, **kwargs):
salt-call azurearm_network.security_rule_get testrule1 testnsg testgroup
- '''
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ """
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
secrule = netconn.security_rules.get(
network_security_group_name=security_group,
resource_group_name=resource_group,
- security_rule_name=security_rule
+ security_rule_name=security_rule,
)
result = secrule.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
-def network_security_group_create_or_update(name, resource_group, **kwargs): # pylint: disable=invalid-name
- '''
+def network_security_group_create_or_update(
+ name, resource_group, **kwargs
+): # pylint: disable=invalid-name
+ """
.. versionadded:: 2019.2.0
Create or update a network security group.
@@ -497,47 +512,51 @@ def network_security_group_create_or_update(name, resource_group, **kwargs): #
salt-call azurearm_network.network_security_group_create_or_update testnsg testgroup
- '''
- if 'location' not in kwargs:
- rg_props = __salt__['azurearm_resource.resource_group_get'](
+ """
+ if "location" not in kwargs:
+ rg_props = __salt__["azurearm_resource.resource_group_get"](
resource_group, **kwargs
)
- if 'error' in rg_props:
- log.error(
- 'Unable to determine location from resource group specified.'
- )
+ if "error" in rg_props:
+ log.error("Unable to determine location from resource group specified.")
return False
- kwargs['location'] = rg_props['location']
+ kwargs["location"] = rg_props["location"]
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- secgroupmodel = __utils__['azurearm.create_object_model']('network', 'NetworkSecurityGroup', **kwargs)
+ secgroupmodel = __utils__["azurearm.create_object_model"](
+ "network", "NetworkSecurityGroup", **kwargs
+ )
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
secgroup = netconn.network_security_groups.create_or_update(
resource_group_name=resource_group,
network_security_group_name=name,
- parameters=secgroupmodel
+ parameters=secgroupmodel,
)
secgroup.wait()
secgroup_result = secgroup.result()
result = secgroup_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
def network_security_group_delete(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Delete a network security group within a resource group.
@@ -553,24 +572,23 @@ def network_security_group_delete(name, resource_group, **kwargs):
salt-call azurearm_network.network_security_group_delete testnsg testgroup
- '''
+ """
result = False
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
secgroup = netconn.network_security_groups.delete(
- resource_group_name=resource_group,
- network_security_group_name=name
+ resource_group_name=resource_group, network_security_group_name=name
)
secgroup.wait()
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
return result
def network_security_group_get(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get details about a network security group within a resource group.
@@ -586,23 +604,22 @@ def network_security_group_get(name, resource_group, **kwargs):
salt-call azurearm_network.network_security_group_get testnsg testgroup
- '''
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ """
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
secgroup = netconn.network_security_groups.get(
- resource_group_name=resource_group,
- network_security_group_name=name
+ resource_group_name=resource_group, network_security_group_name=name
)
result = secgroup.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def network_security_groups_list(resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all network security groups within a resource group.
@@ -616,26 +633,24 @@ def network_security_groups_list(resource_group, **kwargs):
salt-call azurearm_network.network_security_groups_list testgroup
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- secgroups = __utils__['azurearm.paged_object_to_list'](
- netconn.network_security_groups.list(
- resource_group_name=resource_group
- )
+ secgroups = __utils__["azurearm.paged_object_to_list"](
+ netconn.network_security_groups.list(resource_group_name=resource_group)
)
for secgroup in secgroups:
- result[secgroup['name']] = secgroup
+ result[secgroup["name"]] = secgroup
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def network_security_groups_list_all(**kwargs): # pylint: disable=invalid-name
- '''
+ """
.. versionadded:: 2019.2.0
List all network security groups within a subscription.
@@ -646,24 +661,24 @@ def network_security_groups_list_all(**kwargs): # pylint: disable=invalid-name
salt-call azurearm_network.network_security_groups_list_all
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- secgroups = __utils__['azurearm.paged_object_to_list'](
+ secgroups = __utils__["azurearm.paged_object_to_list"](
netconn.network_security_groups.list_all()
)
for secgroup in secgroups:
- result[secgroup['name']] = secgroup
+ result[secgroup["name"]] = secgroup
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def subnets_list(virtual_network, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all subnets within a virtual network.
@@ -679,28 +694,27 @@ def subnets_list(virtual_network, resource_group, **kwargs):
salt-call azurearm_network.subnets_list testnet testgroup
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- subnets = __utils__['azurearm.paged_object_to_list'](
+ subnets = __utils__["azurearm.paged_object_to_list"](
netconn.subnets.list(
- resource_group_name=resource_group,
- virtual_network_name=virtual_network
+ resource_group_name=resource_group, virtual_network_name=virtual_network
)
)
for subnet in subnets:
- result[subnet['name']] = subnet
+ result[subnet["name"]] = subnet
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def subnet_get(name, virtual_network, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get details about a specific subnet.
@@ -719,25 +733,27 @@ def subnet_get(name, virtual_network, resource_group, **kwargs):
salt-call azurearm_network.subnet_get testsubnet testnet testgroup
- '''
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ """
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
subnet = netconn.subnets.get(
resource_group_name=resource_group,
virtual_network_name=virtual_network,
- subnet_name=name
+ subnet_name=name,
)
result = subnet.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
-def subnet_create_or_update(name, address_prefix, virtual_network, resource_group, **kwargs):
- '''
+def subnet_create_or_update(
+ name, address_prefix, virtual_network, resource_group, **kwargs
+):
+ """
.. versionadded:: 2019.2.0
Create or update a subnet.
@@ -759,39 +775,39 @@ def subnet_create_or_update(name, address_prefix, virtual_network, resource_grou
salt-call azurearm_network.subnet_create_or_update testsubnet \
'10.0.0.0/24' testnet testgroup
- '''
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ """
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
# Use NSG name to link to the ID of an existing NSG.
- if kwargs.get('network_security_group'):
+ if kwargs.get("network_security_group"):
nsg = network_security_group_get(
- name=kwargs['network_security_group'],
+ name=kwargs["network_security_group"],
resource_group=resource_group,
**kwargs
)
- if 'error' not in nsg:
- kwargs['network_security_group'] = {'id': str(nsg['id'])}
+ if "error" not in nsg:
+ kwargs["network_security_group"] = {"id": str(nsg["id"])}
# Use Route Table name to link to the ID of an existing Route Table.
- if kwargs.get('route_table'):
+ if kwargs.get("route_table"):
rt_table = route_table_get(
- name=kwargs['route_table'],
- resource_group=resource_group,
- **kwargs
+ name=kwargs["route_table"], resource_group=resource_group, **kwargs
)
- if 'error' not in rt_table:
- kwargs['route_table'] = {'id': str(rt_table['id'])}
+ if "error" not in rt_table:
+ kwargs["route_table"] = {"id": str(rt_table["id"])}
try:
- snetmodel = __utils__['azurearm.create_object_model'](
- 'network',
- 'Subnet',
+ snetmodel = __utils__["azurearm.create_object_model"](
+ "network",
+ "Subnet",
address_prefix=address_prefix,
resource_group=resource_group,
**kwargs
)
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
@@ -805,16 +821,18 @@ def subnet_create_or_update(name, address_prefix, virtual_network, resource_grou
sn_result = subnet.result()
result = sn_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
def subnet_delete(name, virtual_network, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Delete a subnet.
@@ -833,25 +851,25 @@ def subnet_delete(name, virtual_network, resource_group, **kwargs):
salt-call azurearm_network.subnet_delete testsubnet testnet testgroup
- '''
+ """
result = False
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
subnet = netconn.subnets.delete(
resource_group_name=resource_group,
virtual_network_name=virtual_network,
- subnet_name=name
+ subnet_name=name,
)
subnet.wait()
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
return result
def virtual_networks_list_all(**kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all virtual networks within a subscription.
@@ -862,23 +880,25 @@ def virtual_networks_list_all(**kwargs):
salt-call azurearm_network.virtual_networks_list_all
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- vnets = __utils__['azurearm.paged_object_to_list'](netconn.virtual_networks.list_all())
+ vnets = __utils__["azurearm.paged_object_to_list"](
+ netconn.virtual_networks.list_all()
+ )
for vnet in vnets:
- result[vnet['name']] = vnet
+ result[vnet["name"]] = vnet
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def virtual_networks_list(resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all virtual networks within a resource group.
@@ -892,31 +912,26 @@ def virtual_networks_list(resource_group, **kwargs):
salt-call azurearm_network.virtual_networks_list testgroup
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- vnets = __utils__['azurearm.paged_object_to_list'](
- netconn.virtual_networks.list(
- resource_group_name=resource_group
- )
+ vnets = __utils__["azurearm.paged_object_to_list"](
+ netconn.virtual_networks.list(resource_group_name=resource_group)
)
for vnet in vnets:
- result[vnet['name']] = vnet
+ result[vnet["name"]] = vnet
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
# pylint: disable=invalid-name
-def virtual_network_create_or_update(name,
- address_prefixes,
- resource_group,
- **kwargs):
- '''
+def virtual_network_create_or_update(name, address_prefixes, resource_group, **kwargs):
+ """
.. versionadded:: 2019.2.0
Create or update a virtual network.
@@ -937,62 +952,62 @@ def virtual_network_create_or_update(name,
salt-call azurearm_network.virtual_network_create_or_update \
testnet ['10.0.0.0/16'] testgroup
- '''
- if 'location' not in kwargs:
- rg_props = __salt__['azurearm_resource.resource_group_get'](
+ """
+ if "location" not in kwargs:
+ rg_props = __salt__["azurearm_resource.resource_group_get"](
resource_group, **kwargs
)
- if 'error' in rg_props:
- log.error(
- 'Unable to determine location from resource group specified.'
- )
+ if "error" in rg_props:
+ log.error("Unable to determine location from resource group specified.")
return False
- kwargs['location'] = rg_props['location']
+ kwargs["location"] = rg_props["location"]
if not isinstance(address_prefixes, list):
- log.error(
- 'Address prefixes must be specified as a list!'
- )
+ log.error("Address prefixes must be specified as a list!")
return False
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
- address_space = {'address_prefixes': address_prefixes}
- dhcp_options = {'dns_servers': kwargs.get('dns_servers')}
+ address_space = {"address_prefixes": address_prefixes}
+ dhcp_options = {"dns_servers": kwargs.get("dns_servers")}
try:
- vnetmodel = __utils__['azurearm.create_object_model'](
- 'network',
- 'VirtualNetwork',
+ vnetmodel = __utils__["azurearm.create_object_model"](
+ "network",
+ "VirtualNetwork",
address_space=address_space,
dhcp_options=dhcp_options,
**kwargs
)
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
vnet = netconn.virtual_networks.create_or_update(
virtual_network_name=name,
resource_group_name=resource_group,
- parameters=vnetmodel
+ parameters=vnetmodel,
)
vnet.wait()
vnet_result = vnet.result()
result = vnet_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
def virtual_network_delete(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Delete a virtual network.
@@ -1008,24 +1023,23 @@ def virtual_network_delete(name, resource_group, **kwargs):
salt-call azurearm_network.virtual_network_delete testnet testgroup
- '''
+ """
result = False
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
vnet = netconn.virtual_networks.delete(
- virtual_network_name=name,
- resource_group_name=resource_group
+ virtual_network_name=name, resource_group_name=resource_group
)
vnet.wait()
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
return result
def virtual_network_get(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get details about a specific virtual network.
@@ -1041,23 +1055,22 @@ def virtual_network_get(name, resource_group, **kwargs):
salt-call azurearm_network.virtual_network_get testnet testgroup
- '''
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ """
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
vnet = netconn.virtual_networks.get(
- virtual_network_name=name,
- resource_group_name=resource_group
+ virtual_network_name=name, resource_group_name=resource_group
)
result = vnet.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def load_balancers_list_all(**kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all load balancers within a subscription.
@@ -1068,23 +1081,25 @@ def load_balancers_list_all(**kwargs):
salt-call azurearm_network.load_balancers_list_all
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- load_balancers = __utils__['azurearm.paged_object_to_list'](netconn.load_balancers.list_all())
+ load_balancers = __utils__["azurearm.paged_object_to_list"](
+ netconn.load_balancers.list_all()
+ )
for load_balancer in load_balancers:
- result[load_balancer['name']] = load_balancer
+ result[load_balancer["name"]] = load_balancer
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def load_balancers_list(resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all load balancers within a resource group.
@@ -1098,27 +1113,25 @@ def load_balancers_list(resource_group, **kwargs):
salt-call azurearm_network.load_balancers_list testgroup
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- load_balancers = __utils__['azurearm.paged_object_to_list'](
- netconn.load_balancers.list(
- resource_group_name=resource_group
- )
+ load_balancers = __utils__["azurearm.paged_object_to_list"](
+ netconn.load_balancers.list(resource_group_name=resource_group)
)
for load_balancer in load_balancers:
- result[load_balancer['name']] = load_balancer
+ result[load_balancer["name"]] = load_balancer
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def load_balancer_get(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get details about a specific load balancer.
@@ -1134,23 +1147,22 @@ def load_balancer_get(name, resource_group, **kwargs):
salt-call azurearm_network.load_balancer_get testlb testgroup
- '''
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ """
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
load_balancer = netconn.load_balancers.get(
- load_balancer_name=name,
- resource_group_name=resource_group
+ load_balancer_name=name, resource_group_name=resource_group
)
result = load_balancer.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def load_balancer_create_or_update(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Create or update a load balancer within a specified resource group.
@@ -1166,165 +1178,179 @@ def load_balancer_create_or_update(name, resource_group, **kwargs):
salt-call azurearm_network.load_balancer_create_or_update testlb testgroup
- '''
- if 'location' not in kwargs:
- rg_props = __salt__['azurearm_resource.resource_group_get'](
+ """
+ if "location" not in kwargs:
+ rg_props = __salt__["azurearm_resource.resource_group_get"](
resource_group, **kwargs
)
- if 'error' in rg_props:
- log.error(
- 'Unable to determine location from resource group specified.'
- )
+ if "error" in rg_props:
+ log.error("Unable to determine location from resource group specified.")
return False
- kwargs['location'] = rg_props['location']
+ kwargs["location"] = rg_props["location"]
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
- if isinstance(kwargs.get('frontend_ip_configurations'), list):
- for idx in range(0, len(kwargs['frontend_ip_configurations'])):
+ if isinstance(kwargs.get("frontend_ip_configurations"), list):
+ for idx in range(0, len(kwargs["frontend_ip_configurations"])):
# Use Public IP Address name to link to the ID of an existing Public IP
- if 'public_ip_address' in kwargs['frontend_ip_configurations'][idx]:
+ if "public_ip_address" in kwargs["frontend_ip_configurations"][idx]:
pub_ip = public_ip_address_get(
- name=kwargs['frontend_ip_configurations'][idx]['public_ip_address'],
+ name=kwargs["frontend_ip_configurations"][idx]["public_ip_address"],
resource_group=resource_group,
**kwargs
)
- if 'error' not in pub_ip:
- kwargs['frontend_ip_configurations'][idx]['public_ip_address'] = {'id': str(pub_ip['id'])}
+ if "error" not in pub_ip:
+ kwargs["frontend_ip_configurations"][idx]["public_ip_address"] = {
+ "id": str(pub_ip["id"])
+ }
# Use Subnet name to link to the ID of an existing Subnet
- elif 'subnet' in kwargs['frontend_ip_configurations'][idx]:
- vnets = virtual_networks_list(
- resource_group=resource_group,
- **kwargs
- )
- if 'error' not in vnets:
+ elif "subnet" in kwargs["frontend_ip_configurations"][idx]:
+ vnets = virtual_networks_list(resource_group=resource_group, **kwargs)
+ if "error" not in vnets:
for vnet in vnets:
subnets = subnets_list(
virtual_network=vnet,
resource_group=resource_group,
**kwargs
)
- if kwargs['frontend_ip_configurations'][idx]['subnet'] in subnets:
- kwargs['frontend_ip_configurations'][idx]['subnet'] = {
- 'id': str(subnets[kwargs['frontend_ip_configurations'][idx]['subnet']]['id'])
+ if (
+ kwargs["frontend_ip_configurations"][idx]["subnet"]
+ in subnets
+ ):
+ kwargs["frontend_ip_configurations"][idx]["subnet"] = {
+ "id": str(
+ subnets[
+ kwargs["frontend_ip_configurations"][idx][
+ "subnet"
+ ]
+ ]["id"]
+ )
}
break
- id_url = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/loadBalancers/{2}/{3}/{4}'
+ id_url = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/loadBalancers/{2}/{3}/{4}"
- if isinstance(kwargs.get('load_balancing_rules'), list):
- for idx in range(0, len(kwargs['load_balancing_rules'])):
+ if isinstance(kwargs.get("load_balancing_rules"), list):
+ for idx in range(0, len(kwargs["load_balancing_rules"])):
# Link to sub-objects which might be created at the same time as the load balancer
- if 'frontend_ip_configuration' in kwargs['load_balancing_rules'][idx]:
- kwargs['load_balancing_rules'][idx]['frontend_ip_configuration'] = {
- 'id': id_url.format(
- kwargs.get('subscription_id'),
+ if "frontend_ip_configuration" in kwargs["load_balancing_rules"][idx]:
+ kwargs["load_balancing_rules"][idx]["frontend_ip_configuration"] = {
+ "id": id_url.format(
+ kwargs.get("subscription_id"),
resource_group,
name,
- 'frontendIPConfigurations',
- kwargs['load_balancing_rules'][idx]['frontend_ip_configuration']
+ "frontendIPConfigurations",
+ kwargs["load_balancing_rules"][idx][
+ "frontend_ip_configuration"
+ ],
)
}
- if 'backend_address_pool' in kwargs['load_balancing_rules'][idx]:
- kwargs['load_balancing_rules'][idx]['backend_address_pool'] = {
- 'id': id_url.format(
- kwargs.get('subscription_id'),
+ if "backend_address_pool" in kwargs["load_balancing_rules"][idx]:
+ kwargs["load_balancing_rules"][idx]["backend_address_pool"] = {
+ "id": id_url.format(
+ kwargs.get("subscription_id"),
resource_group,
name,
- 'backendAddressPools',
- kwargs['load_balancing_rules'][idx]['backend_address_pool']
+ "backendAddressPools",
+ kwargs["load_balancing_rules"][idx]["backend_address_pool"],
)
}
- if 'probe' in kwargs['load_balancing_rules'][idx]:
- kwargs['load_balancing_rules'][idx]['probe'] = {
- 'id': id_url.format(
- kwargs.get('subscription_id'),
+ if "probe" in kwargs["load_balancing_rules"][idx]:
+ kwargs["load_balancing_rules"][idx]["probe"] = {
+ "id": id_url.format(
+ kwargs.get("subscription_id"),
resource_group,
name,
- 'probes',
- kwargs['load_balancing_rules'][idx]['probe']
+ "probes",
+ kwargs["load_balancing_rules"][idx]["probe"],
)
}
- if isinstance(kwargs.get('inbound_nat_rules'), list):
- for idx in range(0, len(kwargs['inbound_nat_rules'])):
+ if isinstance(kwargs.get("inbound_nat_rules"), list):
+ for idx in range(0, len(kwargs["inbound_nat_rules"])):
# Link to sub-objects which might be created at the same time as the load balancer
- if 'frontend_ip_configuration' in kwargs['inbound_nat_rules'][idx]:
- kwargs['inbound_nat_rules'][idx]['frontend_ip_configuration'] = {
- 'id': id_url.format(
- kwargs.get('subscription_id'),
+ if "frontend_ip_configuration" in kwargs["inbound_nat_rules"][idx]:
+ kwargs["inbound_nat_rules"][idx]["frontend_ip_configuration"] = {
+ "id": id_url.format(
+ kwargs.get("subscription_id"),
resource_group,
name,
- 'frontendIPConfigurations',
- kwargs['inbound_nat_rules'][idx]['frontend_ip_configuration']
+ "frontendIPConfigurations",
+ kwargs["inbound_nat_rules"][idx]["frontend_ip_configuration"],
)
}
- if isinstance(kwargs.get('inbound_nat_pools'), list):
- for idx in range(0, len(kwargs['inbound_nat_pools'])):
+ if isinstance(kwargs.get("inbound_nat_pools"), list):
+ for idx in range(0, len(kwargs["inbound_nat_pools"])):
# Link to sub-objects which might be created at the same time as the load balancer
- if 'frontend_ip_configuration' in kwargs['inbound_nat_pools'][idx]:
- kwargs['inbound_nat_pools'][idx]['frontend_ip_configuration'] = {
- 'id': id_url.format(
- kwargs.get('subscription_id'),
+ if "frontend_ip_configuration" in kwargs["inbound_nat_pools"][idx]:
+ kwargs["inbound_nat_pools"][idx]["frontend_ip_configuration"] = {
+ "id": id_url.format(
+ kwargs.get("subscription_id"),
resource_group,
name,
- 'frontendIPConfigurations',
- kwargs['inbound_nat_pools'][idx]['frontend_ip_configuration']
+ "frontendIPConfigurations",
+ kwargs["inbound_nat_pools"][idx]["frontend_ip_configuration"],
)
}
- if isinstance(kwargs.get('outbound_nat_rules'), list):
- for idx in range(0, len(kwargs['outbound_nat_rules'])):
+ if isinstance(kwargs.get("outbound_nat_rules"), list):
+ for idx in range(0, len(kwargs["outbound_nat_rules"])):
# Link to sub-objects which might be created at the same time as the load balancer
- if 'frontend_ip_configuration' in kwargs['outbound_nat_rules'][idx]:
- kwargs['outbound_nat_rules'][idx]['frontend_ip_configuration'] = {
- 'id': id_url.format(
- kwargs.get('subscription_id'),
+ if "frontend_ip_configuration" in kwargs["outbound_nat_rules"][idx]:
+ kwargs["outbound_nat_rules"][idx]["frontend_ip_configuration"] = {
+ "id": id_url.format(
+ kwargs.get("subscription_id"),
resource_group,
name,
- 'frontendIPConfigurations',
- kwargs['outbound_nat_rules'][idx]['frontend_ip_configuration']
+ "frontendIPConfigurations",
+ kwargs["outbound_nat_rules"][idx]["frontend_ip_configuration"],
)
}
- if 'backend_address_pool' in kwargs['outbound_nat_rules'][idx]:
- kwargs['outbound_nat_rules'][idx]['backend_address_pool'] = {
- 'id': id_url.format(
- kwargs.get('subscription_id'),
+ if "backend_address_pool" in kwargs["outbound_nat_rules"][idx]:
+ kwargs["outbound_nat_rules"][idx]["backend_address_pool"] = {
+ "id": id_url.format(
+ kwargs.get("subscription_id"),
resource_group,
name,
- 'backendAddressPools',
- kwargs['outbound_nat_rules'][idx]['backend_address_pool']
+ "backendAddressPools",
+ kwargs["outbound_nat_rules"][idx]["backend_address_pool"],
)
}
try:
- lbmodel = __utils__['azurearm.create_object_model']('network', 'LoadBalancer', **kwargs)
+ lbmodel = __utils__["azurearm.create_object_model"](
+ "network", "LoadBalancer", **kwargs
+ )
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
load_balancer = netconn.load_balancers.create_or_update(
resource_group_name=resource_group,
load_balancer_name=name,
- parameters=lbmodel
+ parameters=lbmodel,
)
load_balancer.wait()
lb_result = load_balancer.result()
result = lb_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
def load_balancer_delete(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Delete a load balancer.
@@ -1340,24 +1366,23 @@ def load_balancer_delete(name, resource_group, **kwargs):
salt-call azurearm_network.load_balancer_delete testlb testgroup
- '''
+ """
result = False
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
load_balancer = netconn.load_balancers.delete(
- load_balancer_name=name,
- resource_group_name=resource_group
+ load_balancer_name=name, resource_group_name=resource_group
)
load_balancer.wait()
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
return result
def usages_list(location, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List subscription network usage for a location.
@@ -1370,19 +1395,21 @@ def usages_list(location, **kwargs):
salt-call azurearm_network.usages_list westus
- '''
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ """
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- result = __utils__['azurearm.paged_object_to_list'](netconn.usages.list(location))
+ result = __utils__["azurearm.paged_object_to_list"](
+ netconn.usages.list(location)
+ )
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def network_interface_delete(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Delete a network interface.
@@ -1398,25 +1425,24 @@ def network_interface_delete(name, resource_group, **kwargs):
salt-call azurearm_network.network_interface_delete test-iface0 testgroup
- '''
+ """
result = False
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
nic = netconn.network_interfaces.delete(
- network_interface_name=name,
- resource_group_name=resource_group
+ network_interface_name=name, resource_group_name=resource_group
)
nic.wait()
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
return result
def network_interface_get(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get details about a specific network interface.
@@ -1432,25 +1458,25 @@ def network_interface_get(name, resource_group, **kwargs):
salt-call azurearm_network.network_interface_get test-iface0 testgroup
- '''
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ """
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
nic = netconn.network_interfaces.get(
- network_interface_name=name,
- resource_group_name=resource_group
+ network_interface_name=name, resource_group_name=resource_group
)
result = nic.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
# pylint: disable=invalid-name
-def network_interface_create_or_update(name, ip_configurations, subnet, virtual_network,
- resource_group, **kwargs):
- '''
+def network_interface_create_or_update(
+ name, ip_configurations, subnet, virtual_network, resource_group, **kwargs
+):
+ """
.. versionadded:: 2019.2.0
Create or update a network interface within a specified resource group.
@@ -1475,40 +1501,36 @@ def network_interface_create_or_update(name, ip_configurations, subnet, virtual_
salt-call azurearm_network.network_interface_create_or_update test-iface0 [{'name': 'testipconfig1'}] \
testsubnet testnet testgroup
- '''
- if 'location' not in kwargs:
- rg_props = __salt__['azurearm_resource.resource_group_get'](
+ """
+ if "location" not in kwargs:
+ rg_props = __salt__["azurearm_resource.resource_group_get"](
resource_group, **kwargs
)
- if 'error' in rg_props:
- log.error(
- 'Unable to determine location from resource group specified.'
- )
+ if "error" in rg_props:
+ log.error("Unable to determine location from resource group specified.")
return False
- kwargs['location'] = rg_props['location']
+ kwargs["location"] = rg_props["location"]
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
# Use NSG name to link to the ID of an existing NSG.
- if kwargs.get('network_security_group'):
+ if kwargs.get("network_security_group"):
nsg = network_security_group_get(
- name=kwargs['network_security_group'],
+ name=kwargs["network_security_group"],
resource_group=resource_group,
**kwargs
)
- if 'error' not in nsg:
- kwargs['network_security_group'] = {'id': str(nsg['id'])}
+ if "error" not in nsg:
+ kwargs["network_security_group"] = {"id": str(nsg["id"])}
# Use VM name to link to the ID of an existing VM.
- if kwargs.get('virtual_machine'):
- vm_instance = __salt__['azurearm_compute.virtual_machine_get'](
- name=kwargs['virtual_machine'],
- resource_group=resource_group,
- **kwargs
+ if kwargs.get("virtual_machine"):
+ vm_instance = __salt__["azurearm_compute.virtual_machine_get"](
+ name=kwargs["virtual_machine"], resource_group=resource_group, **kwargs
)
- if 'error' not in vm_instance:
- kwargs['virtual_machine'] = {'id': str(vm_instance['id'])}
+ if "error" not in vm_instance:
+ kwargs["virtual_machine"] = {"id": str(vm_instance["id"])}
# Loop through IP Configurations and build each dictionary to pass to model creation.
if isinstance(ip_configurations, list):
@@ -1518,60 +1540,67 @@ def network_interface_create_or_update(name, ip_configurations, subnet, virtual_
resource_group=resource_group,
**kwargs
)
- if 'error' not in subnet:
- subnet = {'id': str(subnet['id'])}
+ if "error" not in subnet:
+ subnet = {"id": str(subnet["id"])}
for ipconfig in ip_configurations:
- if 'name' in ipconfig:
- ipconfig['subnet'] = subnet
- if isinstance(ipconfig.get('application_gateway_backend_address_pools'), list):
+ if "name" in ipconfig:
+ ipconfig["subnet"] = subnet
+ if isinstance(
+ ipconfig.get("application_gateway_backend_address_pools"), list
+ ):
# TODO: Add ID lookup for referenced object names
pass
- if isinstance(ipconfig.get('load_balancer_backend_address_pools'), list):
+ if isinstance(
+ ipconfig.get("load_balancer_backend_address_pools"), list
+ ):
# TODO: Add ID lookup for referenced object names
pass
- if isinstance(ipconfig.get('load_balancer_inbound_nat_rules'), list):
+ if isinstance(
+ ipconfig.get("load_balancer_inbound_nat_rules"), list
+ ):
# TODO: Add ID lookup for referenced object names
pass
- if ipconfig.get('public_ip_address'):
+ if ipconfig.get("public_ip_address"):
pub_ip = public_ip_address_get(
- name=ipconfig['public_ip_address'],
+ name=ipconfig["public_ip_address"],
resource_group=resource_group,
**kwargs
)
- if 'error' not in pub_ip:
- ipconfig['public_ip_address'] = {'id': str(pub_ip['id'])}
+ if "error" not in pub_ip:
+ ipconfig["public_ip_address"] = {"id": str(pub_ip["id"])}
try:
- nicmodel = __utils__['azurearm.create_object_model'](
- 'network',
- 'NetworkInterface',
- ip_configurations=ip_configurations,
- **kwargs
+ nicmodel = __utils__["azurearm.create_object_model"](
+ "network", "NetworkInterface", ip_configurations=ip_configurations, **kwargs
)
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
interface = netconn.network_interfaces.create_or_update(
resource_group_name=resource_group,
network_interface_name=name,
- parameters=nicmodel
+ parameters=nicmodel,
)
interface.wait()
nic_result = interface.result()
result = nic_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
def network_interfaces_list_all(**kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all network interfaces within a subscription.
@@ -1582,23 +1611,25 @@ def network_interfaces_list_all(**kwargs):
salt-call azurearm_network.network_interfaces_list_all
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- nics = __utils__['azurearm.paged_object_to_list'](netconn.network_interfaces.list_all())
+ nics = __utils__["azurearm.paged_object_to_list"](
+ netconn.network_interfaces.list_all()
+ )
for nic in nics:
- result[nic['name']] = nic
+ result[nic["name"]] = nic
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def network_interfaces_list(resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all network interfaces within a resource group.
@@ -1612,28 +1643,26 @@ def network_interfaces_list(resource_group, **kwargs):
salt-call azurearm_network.network_interfaces_list testgroup
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- nics = __utils__['azurearm.paged_object_to_list'](
- netconn.network_interfaces.list(
- resource_group_name=resource_group
- )
+ nics = __utils__["azurearm.paged_object_to_list"](
+ netconn.network_interfaces.list(resource_group_name=resource_group)
)
for nic in nics:
- result[nic['name']] = nic
+ result[nic["name"]] = nic
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
# pylint: disable=invalid-name
def network_interface_get_effective_route_table(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get all route tables for a specific network interface.
@@ -1649,27 +1678,28 @@ def network_interface_get_effective_route_table(name, resource_group, **kwargs):
salt-call azurearm_network.network_interface_get_effective_route_table test-iface0 testgroup
- '''
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ """
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
nic = netconn.network_interfaces.get_effective_route_table(
- network_interface_name=name,
- resource_group_name=resource_group
+ network_interface_name=name, resource_group_name=resource_group
)
nic.wait()
tables = nic.result()
tables = tables.as_dict()
- result = tables['value']
+ result = tables["value"]
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
# pylint: disable=invalid-name
-def network_interface_list_effective_network_security_groups(name, resource_group, **kwargs):
- '''
+def network_interface_list_effective_network_security_groups(
+ name, resource_group, **kwargs
+):
+ """
.. versionadded:: 2019.2.0
Get all network security groups applied to a specific network interface.
@@ -1685,30 +1715,28 @@ def network_interface_list_effective_network_security_groups(name, resource_grou
salt-call azurearm_network.network_interface_list_effective_network_security_groups test-iface0 testgroup
- '''
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ """
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
nic = netconn.network_interfaces.list_effective_network_security_groups(
- network_interface_name=name,
- resource_group_name=resource_group
+ network_interface_name=name, resource_group_name=resource_group
)
nic.wait()
groups = nic.result()
groups = groups.as_dict()
- result = groups['value']
+ result = groups["value"]
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
# pylint: disable=invalid-name
-def list_virtual_machine_scale_set_vm_network_interfaces(scale_set,
- vm_index,
- resource_group,
- **kwargs):
- '''
+def list_virtual_machine_scale_set_vm_network_interfaces(
+ scale_set, vm_index, resource_group, **kwargs
+):
+ """
.. versionadded:: 2019.2.0
Get information about all network interfaces in a specific virtual machine within a scale set.
@@ -1726,30 +1754,32 @@ def list_virtual_machine_scale_set_vm_network_interfaces(scale_set,
salt-call azurearm_network.list_virtual_machine_scale_set_vm_network_interfaces testset testvm testgroup
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- nics = __utils__['azurearm.paged_object_to_list'](
+ nics = __utils__["azurearm.paged_object_to_list"](
netconn.network_interfaces.list_virtual_machine_scale_set_vm_network_interfaces(
virtual_machine_scale_set_name=scale_set,
virtualmachine_index=vm_index,
- resource_group_name=resource_group
+ resource_group_name=resource_group,
)
)
for nic in nics:
- result[nic['name']] = nic
+ result[nic["name"]] = nic
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
# pylint: disable=invalid-name
-def list_virtual_machine_scale_set_network_interfaces(scale_set, resource_group, **kwargs):
- '''
+def list_virtual_machine_scale_set_network_interfaces(
+ scale_set, resource_group, **kwargs
+):
+ """
.. versionadded:: 2019.2.0
Get information about all network interfaces within a scale set.
@@ -1765,29 +1795,31 @@ def list_virtual_machine_scale_set_network_interfaces(scale_set, resource_group,
salt-call azurearm_network.list_virtual_machine_scale_set_vm_network_interfaces testset testgroup
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- nics = __utils__['azurearm.paged_object_to_list'](
+ nics = __utils__["azurearm.paged_object_to_list"](
netconn.network_interfaces.list_virtual_machine_scale_set_network_interfaces(
virtual_machine_scale_set_name=scale_set,
- resource_group_name=resource_group
+ resource_group_name=resource_group,
)
)
for nic in nics:
- result[nic['name']] = nic
+ result[nic["name"]] = nic
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
# pylint: disable=invalid-name
-def get_virtual_machine_scale_set_network_interface(name, scale_set, vm_index, resource_group, **kwargs):
- '''
+def get_virtual_machine_scale_set_network_interface(
+ name, scale_set, vm_index, resource_group, **kwargs
+):
+ """
.. versionadded:: 2019.2.0
Get information about a specfic network interface within a scale set.
@@ -1807,29 +1839,29 @@ def get_virtual_machine_scale_set_network_interface(name, scale_set, vm_index, r
salt-call azurearm_network.get_virtual_machine_scale_set_network_interface test-iface0 testset testvm testgroup
- '''
- expand = kwargs.get('expand')
+ """
+ expand = kwargs.get("expand")
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
nic = netconn.network_interfaces.list_virtual_machine_scale_set_vm_network_interfaces(
network_interface_name=name,
virtual_machine_scale_set_name=scale_set,
virtualmachine_index=vm_index,
resource_group_name=resource_group,
- exapnd=expand
+ exapnd=expand,
)
result = nic.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def public_ip_address_delete(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Delete a public IP address.
@@ -1845,24 +1877,23 @@ def public_ip_address_delete(name, resource_group, **kwargs):
salt-call azurearm_network.public_ip_address_delete test-pub-ip testgroup
- '''
+ """
result = False
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
pub_ip = netconn.public_ip_addresses.delete(
- public_ip_address_name=name,
- resource_group_name=resource_group
+ public_ip_address_name=name, resource_group_name=resource_group
)
pub_ip.wait()
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
return result
def public_ip_address_get(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get details about a specific public IP address.
@@ -1878,27 +1909,27 @@ def public_ip_address_get(name, resource_group, **kwargs):
salt-call azurearm_network.public_ip_address_get test-pub-ip testgroup
- '''
- expand = kwargs.get('expand')
+ """
+ expand = kwargs.get("expand")
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
pub_ip = netconn.public_ip_addresses.get(
public_ip_address_name=name,
resource_group_name=resource_group,
- expand=expand
+ expand=expand,
)
result = pub_ip.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def public_ip_address_create_or_update(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Create or update a public IP address within a specified resource group.
@@ -1914,47 +1945,51 @@ def public_ip_address_create_or_update(name, resource_group, **kwargs):
salt-call azurearm_network.public_ip_address_create_or_update test-ip-0 testgroup
- '''
- if 'location' not in kwargs:
- rg_props = __salt__['azurearm_resource.resource_group_get'](
+ """
+ if "location" not in kwargs:
+ rg_props = __salt__["azurearm_resource.resource_group_get"](
resource_group, **kwargs
)
- if 'error' in rg_props:
- log.error(
- 'Unable to determine location from resource group specified.'
- )
+ if "error" in rg_props:
+ log.error("Unable to determine location from resource group specified.")
return False
- kwargs['location'] = rg_props['location']
+ kwargs["location"] = rg_props["location"]
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- pub_ip_model = __utils__['azurearm.create_object_model']('network', 'PublicIPAddress', **kwargs)
+ pub_ip_model = __utils__["azurearm.create_object_model"](
+ "network", "PublicIPAddress", **kwargs
+ )
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
ip = netconn.public_ip_addresses.create_or_update(
resource_group_name=resource_group,
public_ip_address_name=name,
- parameters=pub_ip_model
+ parameters=pub_ip_model,
)
ip.wait()
ip_result = ip.result()
result = ip_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
def public_ip_addresses_list_all(**kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all public IP addresses within a subscription.
@@ -1965,23 +2000,25 @@ def public_ip_addresses_list_all(**kwargs):
salt-call azurearm_network.public_ip_addresses_list_all
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- pub_ips = __utils__['azurearm.paged_object_to_list'](netconn.public_ip_addresses.list_all())
+ pub_ips = __utils__["azurearm.paged_object_to_list"](
+ netconn.public_ip_addresses.list_all()
+ )
for ip in pub_ips:
- result[ip['name']] = ip
+ result[ip["name"]] = ip
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def public_ip_addresses_list(resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all public IP addresses within a resource group.
@@ -1995,27 +2032,25 @@ def public_ip_addresses_list(resource_group, **kwargs):
salt-call azurearm_network.public_ip_addresses_list testgroup
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- pub_ips = __utils__['azurearm.paged_object_to_list'](
- netconn.public_ip_addresses.list(
- resource_group_name=resource_group
- )
+ pub_ips = __utils__["azurearm.paged_object_to_list"](
+ netconn.public_ip_addresses.list(resource_group_name=resource_group)
)
for ip in pub_ips:
- result[ip['name']] = ip
+ result[ip["name"]] = ip
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def route_filter_rule_delete(name, route_filter, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Delete a route filter rule.
@@ -2033,25 +2068,25 @@ def route_filter_rule_delete(name, route_filter, resource_group, **kwargs):
salt-call azurearm_network.route_filter_rule_delete test-rule test-filter testgroup
- '''
+ """
result = False
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
rule = netconn.route_filter_rules.delete(
resource_group_name=resource_group,
route_filter_name=route_filter,
- rule_name=name
+ rule_name=name,
)
rule.wait()
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
return result
def route_filter_rule_get(name, route_filter, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get details about a specific route filter rule.
@@ -2069,26 +2104,28 @@ def route_filter_rule_get(name, route_filter, resource_group, **kwargs):
salt-call azurearm_network.route_filter_rule_get test-rule test-filter testgroup
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
rule = netconn.route_filter_rules.get(
resource_group_name=resource_group,
route_filter_name=route_filter,
- rule_name=name
+ rule_name=name,
)
result = rule.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
-def route_filter_rule_create_or_update(name, access, communities, route_filter, resource_group, **kwargs):
- '''
+def route_filter_rule_create_or_update(
+ name, access, communities, route_filter, resource_group, **kwargs
+):
+ """
.. versionadded:: 2019.2.0
Create or update a rule within a specified route filter.
@@ -2111,37 +2148,35 @@ def route_filter_rule_create_or_update(name, access, communities, route_filter,
salt-call azurearm_network.route_filter_rule_create_or_update \
test-rule allow "['12076:51006']" test-filter testgroup
- '''
+ """
if not isinstance(communities, list):
- log.error(
- 'The communities parameter must be a list of strings!'
- )
+ log.error("The communities parameter must be a list of strings!")
return False
- if 'location' not in kwargs:
- rg_props = __salt__['azurearm_resource.resource_group_get'](
+ if "location" not in kwargs:
+ rg_props = __salt__["azurearm_resource.resource_group_get"](
resource_group, **kwargs
)
- if 'error' in rg_props:
- log.error(
- 'Unable to determine location from resource group specified.'
- )
+ if "error" in rg_props:
+ log.error("Unable to determine location from resource group specified.")
return False
- kwargs['location'] = rg_props['location']
+ kwargs["location"] = rg_props["location"]
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- rule_model = __utils__['azurearm.create_object_model'](
- 'network',
- 'RouteFilterRule',
+ rule_model = __utils__["azurearm.create_object_model"](
+ "network",
+ "RouteFilterRule",
access=access,
communities=communities,
**kwargs
)
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
@@ -2149,25 +2184,27 @@ def route_filter_rule_create_or_update(name, access, communities, route_filter,
resource_group_name=resource_group,
route_filter_name=route_filter,
rule_name=name,
- route_filter_rule_parameters=rule_model
+ route_filter_rule_parameters=rule_model,
)
rule.wait()
rule_result = rule.result()
result = rule_result.as_dict()
except CloudError as exc:
message = str(exc)
- if kwargs.get('subscription_id') == str(message).strip():
- message = 'Subscription not authorized for this operation!'
- __utils__['azurearm.log_cloud_error']('network', message, **kwargs)
- result = {'error': message}
+ if kwargs.get("subscription_id") == str(message).strip():
+ message = "Subscription not authorized for this operation!"
+ __utils__["azurearm.log_cloud_error"]("network", message, **kwargs)
+ result = {"error": message}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
def route_filter_rules_list(route_filter, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all routes within a route filter.
@@ -2183,28 +2220,27 @@ def route_filter_rules_list(route_filter, resource_group, **kwargs):
salt-call azurearm_network.route_filter_rules_list test-filter testgroup
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- rules = __utils__['azurearm.paged_object_to_list'](
+ rules = __utils__["azurearm.paged_object_to_list"](
netconn.route_filter_rules.list_by_route_filter(
- resource_group_name=resource_group,
- route_filter_name=route_filter
+ resource_group_name=resource_group, route_filter_name=route_filter
)
)
for rule in rules:
- result[rule['name']] = rule
+ result[rule["name"]] = rule
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def route_filter_delete(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Delete a route filter.
@@ -2220,24 +2256,23 @@ def route_filter_delete(name, resource_group, **kwargs):
salt-call azurearm_network.route_filter_delete test-filter testgroup
- '''
+ """
result = False
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
route_filter = netconn.route_filters.delete(
- route_filter_name=name,
- resource_group_name=resource_group
+ route_filter_name=name, resource_group_name=resource_group
)
route_filter.wait()
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
return result
def route_filter_get(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get details about a specific route filter.
@@ -2253,27 +2288,25 @@ def route_filter_get(name, resource_group, **kwargs):
salt-call azurearm_network.route_filter_get test-filter testgroup
- '''
- expand = kwargs.get('expand')
+ """
+ expand = kwargs.get("expand")
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
route_filter = netconn.route_filters.get(
- route_filter_name=name,
- resource_group_name=resource_group,
- expand=expand
+ route_filter_name=name, resource_group_name=resource_group, expand=expand
)
result = route_filter.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def route_filter_create_or_update(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Create or update a route filter within a specified resource group.
@@ -2289,47 +2322,51 @@ def route_filter_create_or_update(name, resource_group, **kwargs):
salt-call azurearm_network.route_filter_create_or_update test-filter testgroup
- '''
- if 'location' not in kwargs:
- rg_props = __salt__['azurearm_resource.resource_group_get'](
+ """
+ if "location" not in kwargs:
+ rg_props = __salt__["azurearm_resource.resource_group_get"](
resource_group, **kwargs
)
- if 'error' in rg_props:
- log.error(
- 'Unable to determine location from resource group specified.'
- )
+ if "error" in rg_props:
+ log.error("Unable to determine location from resource group specified.")
return False
- kwargs['location'] = rg_props['location']
+ kwargs["location"] = rg_props["location"]
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- rt_filter_model = __utils__['azurearm.create_object_model']('network', 'RouteFilter', **kwargs)
+ rt_filter_model = __utils__["azurearm.create_object_model"](
+ "network", "RouteFilter", **kwargs
+ )
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
rt_filter = netconn.route_filters.create_or_update(
resource_group_name=resource_group,
route_filter_name=name,
- route_filter_parameters=rt_filter_model
+ route_filter_parameters=rt_filter_model,
)
rt_filter.wait()
rt_result = rt_filter.result()
result = rt_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
def route_filters_list(resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all route filters within a resource group.
@@ -2343,27 +2380,27 @@ def route_filters_list(resource_group, **kwargs):
salt-call azurearm_network.route_filters_list testgroup
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- filters = __utils__['azurearm.paged_object_to_list'](
+ filters = __utils__["azurearm.paged_object_to_list"](
netconn.route_filters.list_by_resource_group(
resource_group_name=resource_group
)
)
for route_filter in filters:
- result[route_filter['name']] = route_filter
+ result[route_filter["name"]] = route_filter
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def route_filters_list_all(**kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all route filters within a subscription.
@@ -2374,23 +2411,25 @@ def route_filters_list_all(**kwargs):
salt-call azurearm_network.route_filters_list_all
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- filters = __utils__['azurearm.paged_object_to_list'](netconn.route_filters.list())
+ filters = __utils__["azurearm.paged_object_to_list"](
+ netconn.route_filters.list()
+ )
for route_filter in filters:
- result[route_filter['name']] = route_filter
+ result[route_filter["name"]] = route_filter
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def route_delete(name, route_table, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Delete a route from a route table.
@@ -2408,25 +2447,25 @@ def route_delete(name, route_table, resource_group, **kwargs):
salt-call azurearm_network.route_delete test-rt test-rt-table testgroup
- '''
+ """
result = False
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
route = netconn.routes.delete(
resource_group_name=resource_group,
route_table_name=route_table,
- route_name=name
+ route_name=name,
)
route.wait()
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
return result
def route_get(name, route_table, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get details about a specific route.
@@ -2444,27 +2483,34 @@ def route_get(name, route_table, resource_group, **kwargs):
salt-call azurearm_network.route_get test-rt test-rt-table testgroup
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
route = netconn.routes.get(
resource_group_name=resource_group,
route_table_name=route_table,
- route_name=name
+ route_name=name,
)
result = route.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
-def route_create_or_update(name, address_prefix, next_hop_type, route_table, resource_group,
- next_hop_ip_address=None, **kwargs):
- '''
+def route_create_or_update(
+ name,
+ address_prefix,
+ next_hop_type,
+ route_table,
+ resource_group,
+ next_hop_ip_address=None,
+ **kwargs
+):
+ """
.. versionadded:: 2019.2.0
Create or update a route within a specified route table.
@@ -2490,20 +2536,22 @@ def route_create_or_update(name, address_prefix, next_hop_type, route_table, res
salt-call azurearm_network.route_create_or_update test-rt '10.0.0.0/8' test-rt-table testgroup
- '''
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ """
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- rt_model = __utils__['azurearm.create_object_model'](
- 'network',
- 'Route',
+ rt_model = __utils__["azurearm.create_object_model"](
+ "network",
+ "Route",
address_prefix=address_prefix,
next_hop_type=next_hop_type,
next_hop_ip_address=next_hop_ip_address,
**kwargs
)
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
@@ -2511,22 +2559,24 @@ def route_create_or_update(name, address_prefix, next_hop_type, route_table, res
resource_group_name=resource_group,
route_table_name=route_table,
route_name=name,
- route_parameters=rt_model
+ route_parameters=rt_model,
)
route.wait()
rt_result = route.result()
result = rt_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
def routes_list(route_table, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all routes within a route table.
@@ -2542,28 +2592,27 @@ def routes_list(route_table, resource_group, **kwargs):
salt-call azurearm_network.routes_list test-rt-table testgroup
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- routes = __utils__['azurearm.paged_object_to_list'](
+ routes = __utils__["azurearm.paged_object_to_list"](
netconn.routes.list(
- resource_group_name=resource_group,
- route_table_name=route_table
+ resource_group_name=resource_group, route_table_name=route_table
)
)
for route in routes:
- result[route['name']] = route
+ result[route["name"]] = route
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def route_table_delete(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Delete a route table.
@@ -2579,24 +2628,23 @@ def route_table_delete(name, resource_group, **kwargs):
salt-call azurearm_network.route_table_delete test-rt-table testgroup
- '''
+ """
result = False
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
table = netconn.route_tables.delete(
- route_table_name=name,
- resource_group_name=resource_group
+ route_table_name=name, resource_group_name=resource_group
)
table.wait()
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
return result
def route_table_get(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get details about a specific route table.
@@ -2612,27 +2660,25 @@ def route_table_get(name, resource_group, **kwargs):
salt-call azurearm_network.route_table_get test-rt-table testgroup
- '''
- expand = kwargs.get('expand')
+ """
+ expand = kwargs.get("expand")
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
table = netconn.route_tables.get(
- route_table_name=name,
- resource_group_name=resource_group,
- expand=expand
+ route_table_name=name, resource_group_name=resource_group, expand=expand
)
result = table.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def route_table_create_or_update(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Create or update a route table within a specified resource group.
@@ -2648,47 +2694,51 @@ def route_table_create_or_update(name, resource_group, **kwargs):
salt-call azurearm_network.route_table_create_or_update test-rt-table testgroup
- '''
- if 'location' not in kwargs:
- rg_props = __salt__['azurearm_resource.resource_group_get'](
+ """
+ if "location" not in kwargs:
+ rg_props = __salt__["azurearm_resource.resource_group_get"](
resource_group, **kwargs
)
- if 'error' in rg_props:
- log.error(
- 'Unable to determine location from resource group specified.'
- )
+ if "error" in rg_props:
+ log.error("Unable to determine location from resource group specified.")
return False
- kwargs['location'] = rg_props['location']
+ kwargs["location"] = rg_props["location"]
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- rt_tbl_model = __utils__['azurearm.create_object_model']('network', 'RouteTable', **kwargs)
+ rt_tbl_model = __utils__["azurearm.create_object_model"](
+ "network", "RouteTable", **kwargs
+ )
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
table = netconn.route_tables.create_or_update(
resource_group_name=resource_group,
route_table_name=name,
- parameters=rt_tbl_model
+ parameters=rt_tbl_model,
)
table.wait()
tbl_result = table.result()
result = tbl_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
def route_tables_list(resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all route tables within a resource group.
@@ -2702,27 +2752,25 @@ def route_tables_list(resource_group, **kwargs):
salt-call azurearm_network.route_tables_list testgroup
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- tables = __utils__['azurearm.paged_object_to_list'](
- netconn.route_tables.list(
- resource_group_name=resource_group
- )
+ tables = __utils__["azurearm.paged_object_to_list"](
+ netconn.route_tables.list(resource_group_name=resource_group)
)
for table in tables:
- result[table['name']] = table
+ result[table["name"]] = table
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def route_tables_list_all(**kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all route tables within a subscription.
@@ -2733,16 +2781,18 @@ def route_tables_list_all(**kwargs):
salt-call azurearm_network.route_tables_list_all
- '''
+ """
result = {}
- netconn = __utils__['azurearm.get_client']('network', **kwargs)
+ netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
- tables = __utils__['azurearm.paged_object_to_list'](netconn.route_tables.list_all())
+ tables = __utils__["azurearm.paged_object_to_list"](
+ netconn.route_tables.list_all()
+ )
for table in tables:
- result[table['name']] = table
+ result[table["name"]] = table
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
diff --git a/salt/modules/azurearm_resource.py b/salt/modules/azurearm_resource.py
index 6716d508d1b..2b45ebfd4ce 100644
--- a/salt/modules/azurearm_resource.py
+++ b/salt/modules/azurearm_resource.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Azure (ARM) Resource Execution Module
.. versionadded:: 2019.2.0
@@ -44,10 +44,11 @@ Azure (ARM) Resource Execution Module
* ``AZURE_US_GOV_CLOUD``
* ``AZURE_GERMAN_CLOUD``
-'''
+"""
# Python libs
from __future__ import absolute_import
+
import logging
# Salt Libs
@@ -59,11 +60,12 @@ try:
import azure.mgmt.resource.resources.models # pylint: disable=unused-import
from msrest.exceptions import SerializationError
from msrestazure.azure_exceptions import CloudError
+
HAS_LIBS = True
except ImportError:
pass
-__virtualname__ = 'azurearm_resource'
+__virtualname__ = "azurearm_resource"
log = logging.getLogger(__name__)
@@ -72,16 +74,16 @@ def __virtual__():
if not HAS_LIBS:
return (
False,
- 'The following dependencies are required to use the AzureARM modules: '
- 'Microsoft Azure SDK for Python >= 2.0rc6, '
- 'MS REST Azure (msrestazure) >= 0.4'
+ "The following dependencies are required to use the AzureARM modules: "
+ "Microsoft Azure SDK for Python >= 2.0rc6, "
+ "MS REST Azure (msrestazure) >= 0.4",
)
return __virtualname__
def resource_groups_list(**kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all resource groups within a subscription.
@@ -92,23 +94,25 @@ def resource_groups_list(**kwargs):
salt-call azurearm_resource.resource_groups_list
- '''
+ """
result = {}
- resconn = __utils__['azurearm.get_client']('resource', **kwargs)
+ resconn = __utils__["azurearm.get_client"]("resource", **kwargs)
try:
- groups = __utils__['azurearm.paged_object_to_list'](resconn.resource_groups.list())
+ groups = __utils__["azurearm.paged_object_to_list"](
+ resconn.resource_groups.list()
+ )
for group in groups:
- result[group['name']] = group
+ result[group["name"]] = group
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def resource_group_check_existence(name, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Check for the existence of a named resource group in the current subscription.
@@ -121,20 +125,20 @@ def resource_group_check_existence(name, **kwargs):
salt-call azurearm_resource.resource_group_check_existence testgroup
- '''
+ """
result = False
- resconn = __utils__['azurearm.get_client']('resource', **kwargs)
+ resconn = __utils__["azurearm.get_client"]("resource", **kwargs)
try:
result = resconn.resource_groups.check_existence(name)
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
return result
def resource_group_get(name, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get a dictionary representing a resource group's properties.
@@ -147,22 +151,24 @@ def resource_group_get(name, **kwargs):
salt-call azurearm_resource.resource_group_get testgroup
- '''
+ """
result = {}
- resconn = __utils__['azurearm.get_client']('resource', **kwargs)
+ resconn = __utils__["azurearm.get_client"]("resource", **kwargs)
try:
group = resconn.resource_groups.get(name)
result = group.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
-def resource_group_create_or_update(name, location, **kwargs): # pylint: disable=invalid-name
- '''
+def resource_group_create_or_update(
+ name, location, **kwargs
+): # pylint: disable=invalid-name
+ """
.. versionadded:: 2019.2.0
Create or update a resource group in a given location.
@@ -178,26 +184,26 @@ def resource_group_create_or_update(name, location, **kwargs): # pylint: disabl
salt-call azurearm_resource.resource_group_create_or_update testgroup westus
- '''
+ """
result = {}
- resconn = __utils__['azurearm.get_client']('resource', **kwargs)
+ resconn = __utils__["azurearm.get_client"]("resource", **kwargs)
resource_group_params = {
- 'location': location,
- 'managed_by': kwargs.get('managed_by'),
- 'tags': kwargs.get('tags'),
+ "location": location,
+ "managed_by": kwargs.get("managed_by"),
+ "tags": kwargs.get("tags"),
}
try:
group = resconn.resource_groups.create_or_update(name, resource_group_params)
result = group.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def resource_group_delete(name, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Delete a resource group from the subscription.
@@ -210,21 +216,21 @@ def resource_group_delete(name, **kwargs):
salt-call azurearm_resource.resource_group_delete testgroup
- '''
+ """
result = False
- resconn = __utils__['azurearm.get_client']('resource', **kwargs)
+ resconn = __utils__["azurearm.get_client"]("resource", **kwargs)
try:
group = resconn.resource_groups.delete(name)
group.wait()
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
return result
def deployment_operation_get(operation, deployment, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get a deployment operation within a deployment.
@@ -242,25 +248,25 @@ def deployment_operation_get(operation, deployment, resource_group, **kwargs):
salt-call azurearm_resource.deployment_operation_get XXXXX testdeploy testgroup
- '''
- resconn = __utils__['azurearm.get_client']('resource', **kwargs)
+ """
+ resconn = __utils__["azurearm.get_client"]("resource", **kwargs)
try:
operation = resconn.deployment_operations.get(
resource_group_name=resource_group,
deployment_name=deployment,
- operation_id=operation
+ operation_id=operation,
)
result = operation.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def deployment_operations_list(name, resource_group, result_limit=10, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all deployment operations within a deployment.
@@ -279,29 +285,29 @@ def deployment_operations_list(name, resource_group, result_limit=10, **kwargs):
salt-call azurearm_resource.deployment_operations_list testdeploy testgroup
- '''
+ """
result = {}
- resconn = __utils__['azurearm.get_client']('resource', **kwargs)
+ resconn = __utils__["azurearm.get_client"]("resource", **kwargs)
try:
- operations = __utils__['azurearm.paged_object_to_list'](
+ operations = __utils__["azurearm.paged_object_to_list"](
resconn.deployment_operations.list(
resource_group_name=resource_group,
deployment_name=name,
- top=result_limit
+ top=result_limit,
)
)
for oper in operations:
- result[oper['operation_id']] = oper
+ result[oper["operation_id"]] = oper
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def deployment_delete(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Delete a deployment.
@@ -317,24 +323,23 @@ def deployment_delete(name, resource_group, **kwargs):
salt-call azurearm_resource.deployment_delete testdeploy testgroup
- '''
+ """
result = False
- resconn = __utils__['azurearm.get_client']('resource', **kwargs)
+ resconn = __utils__["azurearm.get_client"]("resource", **kwargs)
try:
deploy = resconn.deployments.delete(
- deployment_name=name,
- resource_group_name=resource_group
+ deployment_name=name, resource_group_name=resource_group
)
deploy.wait()
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
return result
def deployment_check_existence(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Check the existence of a deployment.
@@ -350,25 +355,31 @@ def deployment_check_existence(name, resource_group, **kwargs):
salt-call azurearm_resource.deployment_check_existence testdeploy testgroup
- '''
+ """
result = False
- resconn = __utils__['azurearm.get_client']('resource', **kwargs)
+ resconn = __utils__["azurearm.get_client"]("resource", **kwargs)
try:
result = resconn.deployments.check_existence(
- deployment_name=name,
- resource_group_name=resource_group
+ deployment_name=name, resource_group_name=resource_group
)
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
return result
-def deployment_create_or_update(name, resource_group, deploy_mode='incremental',
- debug_setting='none', deploy_params=None,
- parameters_link=None, deploy_template=None,
- template_link=None, **kwargs):
- '''
+def deployment_create_or_update(
+ name,
+ resource_group,
+ deploy_mode="incremental",
+ debug_setting="none",
+ deploy_params=None,
+ parameters_link=None,
+ deploy_template=None,
+ template_link=None,
+ **kwargs
+):
+ """
.. versionadded:: 2019.2.0
Deploys resources to a resource group.
@@ -411,69 +422,69 @@ def deployment_create_or_update(name, resource_group, deploy_mode='incremental',
salt-call azurearm_resource.deployment_create_or_update testdeploy testgroup
- '''
- resconn = __utils__['azurearm.get_client']('resource', **kwargs)
+ """
+ resconn = __utils__["azurearm.get_client"]("resource", **kwargs)
- prop_kwargs = {'mode': deploy_mode}
- prop_kwargs['debug_setting'] = {'detail_level': debug_setting}
+ prop_kwargs = {"mode": deploy_mode}
+ prop_kwargs["debug_setting"] = {"detail_level": debug_setting}
if deploy_params:
- prop_kwargs['parameters'] = deploy_params
+ prop_kwargs["parameters"] = deploy_params
else:
if isinstance(parameters_link, dict):
- prop_kwargs['parameters_link'] = parameters_link
+ prop_kwargs["parameters_link"] = parameters_link
else:
- prop_kwargs['parameters_link'] = {'uri': parameters_link}
+ prop_kwargs["parameters_link"] = {"uri": parameters_link}
if deploy_template:
- prop_kwargs['template'] = deploy_template
+ prop_kwargs["template"] = deploy_template
else:
if isinstance(template_link, dict):
- prop_kwargs['template_link'] = template_link
+ prop_kwargs["template_link"] = template_link
else:
- prop_kwargs['template_link'] = {'uri': template_link}
+ prop_kwargs["template_link"] = {"uri": template_link}
deploy_kwargs = kwargs.copy()
deploy_kwargs.update(prop_kwargs)
try:
- deploy_model = __utils__['azurearm.create_object_model'](
- 'resource',
- 'DeploymentProperties',
- **deploy_kwargs
+ deploy_model = __utils__["azurearm.create_object_model"](
+ "resource", "DeploymentProperties", **deploy_kwargs
)
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
validate = deployment_validate(
- name=name,
- resource_group=resource_group,
- **deploy_kwargs
+ name=name, resource_group=resource_group, **deploy_kwargs
)
- if 'error' in validate:
+ if "error" in validate:
result = validate
else:
deploy = resconn.deployments.create_or_update(
deployment_name=name,
resource_group_name=resource_group,
- properties=deploy_model
+ properties=deploy_model,
)
deploy.wait()
deploy_result = deploy.result()
result = deploy_result.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
def deployment_get(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get details about a specific deployment.
@@ -489,23 +500,22 @@ def deployment_get(name, resource_group, **kwargs):
salt-call azurearm_resource.deployment_get testdeploy testgroup
- '''
- resconn = __utils__['azurearm.get_client']('resource', **kwargs)
+ """
+ resconn = __utils__["azurearm.get_client"]("resource", **kwargs)
try:
deploy = resconn.deployments.get(
- deployment_name=name,
- resource_group_name=resource_group
+ deployment_name=name, resource_group_name=resource_group
)
result = deploy.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def deployment_cancel(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Cancel a deployment if in 'Accepted' or 'Running' state.
@@ -521,29 +531,32 @@ def deployment_cancel(name, resource_group, **kwargs):
salt-call azurearm_resource.deployment_cancel testdeploy testgroup
- '''
- resconn = __utils__['azurearm.get_client']('resource', **kwargs)
+ """
+ resconn = __utils__["azurearm.get_client"]("resource", **kwargs)
try:
resconn.deployments.cancel(
- deployment_name=name,
- resource_group_name=resource_group
+ deployment_name=name, resource_group_name=resource_group
)
- result = {'result': True}
+ result = {"result": True}
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {
- 'error': str(exc),
- 'result': False
- }
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc), "result": False}
return result
-def deployment_validate(name, resource_group, deploy_mode=None,
- debug_setting=None, deploy_params=None,
- parameters_link=None, deploy_template=None,
- template_link=None, **kwargs):
- '''
+def deployment_validate(
+ name,
+ resource_group,
+ deploy_mode=None,
+ debug_setting=None,
+ deploy_params=None,
+ parameters_link=None,
+ deploy_template=None,
+ template_link=None,
+ **kwargs
+):
+ """
.. versionadded:: 2019.2.0
Validates whether the specified template is syntactically correct
@@ -587,39 +600,39 @@ def deployment_validate(name, resource_group, deploy_mode=None,
salt-call azurearm_resource.deployment_validate testdeploy testgroup
- '''
- resconn = __utils__['azurearm.get_client']('resource', **kwargs)
+ """
+ resconn = __utils__["azurearm.get_client"]("resource", **kwargs)
- prop_kwargs = {'mode': deploy_mode}
- prop_kwargs['debug_setting'] = {'detail_level': debug_setting}
+ prop_kwargs = {"mode": deploy_mode}
+ prop_kwargs["debug_setting"] = {"detail_level": debug_setting}
if deploy_params:
- prop_kwargs['parameters'] = deploy_params
+ prop_kwargs["parameters"] = deploy_params
else:
if isinstance(parameters_link, dict):
- prop_kwargs['parameters_link'] = parameters_link
+ prop_kwargs["parameters_link"] = parameters_link
else:
- prop_kwargs['parameters_link'] = {'uri': parameters_link}
+ prop_kwargs["parameters_link"] = {"uri": parameters_link}
if deploy_template:
- prop_kwargs['template'] = deploy_template
+ prop_kwargs["template"] = deploy_template
else:
if isinstance(template_link, dict):
- prop_kwargs['template_link'] = template_link
+ prop_kwargs["template_link"] = template_link
else:
- prop_kwargs['template_link'] = {'uri': template_link}
+ prop_kwargs["template_link"] = {"uri": template_link}
deploy_kwargs = kwargs.copy()
deploy_kwargs.update(prop_kwargs)
try:
- deploy_model = __utils__['azurearm.create_object_model'](
- 'resource',
- 'DeploymentProperties',
- **deploy_kwargs
+ deploy_model = __utils__["azurearm.create_object_model"](
+ "resource", "DeploymentProperties", **deploy_kwargs
)
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
@@ -630,20 +643,22 @@ def deployment_validate(name, resource_group, deploy_mode=None,
deploy = resconn.deployments.validate(
deployment_name=name,
resource_group_name=resource_group,
- properties=deploy_model
+ properties=deploy_model,
)
result = deploy.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
def deployment_export_template(name, resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Exports the template used for the specified deployment.
@@ -659,23 +674,22 @@ def deployment_export_template(name, resource_group, **kwargs):
salt-call azurearm_resource.deployment_export_template testdeploy testgroup
- '''
- resconn = __utils__['azurearm.get_client']('resource', **kwargs)
+ """
+ resconn = __utils__["azurearm.get_client"]("resource", **kwargs)
try:
deploy = resconn.deployments.export_template(
- deployment_name=name,
- resource_group_name=resource_group
+ deployment_name=name, resource_group_name=resource_group
)
result = deploy.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def deployments_list(resource_group, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all deployments within a resource group.
@@ -686,27 +700,27 @@ def deployments_list(resource_group, **kwargs):
salt-call azurearm_resource.deployments_list testgroup
- '''
+ """
result = {}
- resconn = __utils__['azurearm.get_client']('resource', **kwargs)
+ resconn = __utils__["azurearm.get_client"]("resource", **kwargs)
try:
- deployments = __utils__['azurearm.paged_object_to_list'](
+ deployments = __utils__["azurearm.paged_object_to_list"](
resconn.deployments.list_by_resource_group(
resource_group_name=resource_group
)
)
for deploy in deployments:
- result[deploy['name']] = deploy
+ result[deploy["name"]] = deploy
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def subscriptions_list_locations(subscription_id=None, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all locations for a subscription.
@@ -719,33 +733,33 @@ def subscriptions_list_locations(subscription_id=None, **kwargs):
salt-call azurearm_resource.subscriptions_list_locations XXXXXXXX
- '''
+ """
result = {}
if not subscription_id:
- subscription_id = kwargs.get('subscription_id')
- elif not kwargs.get('subscription_id'):
- kwargs['subscription_id'] = subscription_id
+ subscription_id = kwargs.get("subscription_id")
+ elif not kwargs.get("subscription_id"):
+ kwargs["subscription_id"] = subscription_id
- subconn = __utils__['azurearm.get_client']('subscription', **kwargs)
+ subconn = __utils__["azurearm.get_client"]("subscription", **kwargs)
try:
- locations = __utils__['azurearm.paged_object_to_list'](
+ locations = __utils__["azurearm.paged_object_to_list"](
subconn.subscriptions.list_locations(
- subscription_id=kwargs['subscription_id']
+ subscription_id=kwargs["subscription_id"]
)
)
for loc in locations:
- result[loc['name']] = loc
+ result[loc["name"]] = loc
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def subscription_get(subscription_id=None, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get details about a subscription.
@@ -758,30 +772,30 @@ def subscription_get(subscription_id=None, **kwargs):
salt-call azurearm_resource.subscription_get XXXXXXXX
- '''
+ """
result = {}
if not subscription_id:
- subscription_id = kwargs.get('subscription_id')
- elif not kwargs.get('subscription_id'):
- kwargs['subscription_id'] = subscription_id
+ subscription_id = kwargs.get("subscription_id")
+ elif not kwargs.get("subscription_id"):
+ kwargs["subscription_id"] = subscription_id
- subconn = __utils__['azurearm.get_client']('subscription', **kwargs)
+ subconn = __utils__["azurearm.get_client"]("subscription", **kwargs)
try:
subscription = subconn.subscriptions.get(
- subscription_id=kwargs.get('subscription_id')
+ subscription_id=kwargs.get("subscription_id")
)
result = subscription.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def subscriptions_list(**kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all subscriptions for a tenant.
@@ -792,23 +806,23 @@ def subscriptions_list(**kwargs):
salt-call azurearm_resource.subscriptions_list
- '''
+ """
result = {}
- subconn = __utils__['azurearm.get_client']('subscription', **kwargs)
+ subconn = __utils__["azurearm.get_client"]("subscription", **kwargs)
try:
- subs = __utils__['azurearm.paged_object_to_list'](subconn.subscriptions.list())
+ subs = __utils__["azurearm.paged_object_to_list"](subconn.subscriptions.list())
for sub in subs:
- result[sub['subscription_id']] = sub
+ result[sub["subscription_id"]] = sub
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def tenants_list(**kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all tenants for your account.
@@ -819,23 +833,23 @@ def tenants_list(**kwargs):
salt-call azurearm_resource.tenants_list
- '''
+ """
result = {}
- subconn = __utils__['azurearm.get_client']('subscription', **kwargs)
+ subconn = __utils__["azurearm.get_client"]("subscription", **kwargs)
try:
- tenants = __utils__['azurearm.paged_object_to_list'](subconn.tenants.list())
+ tenants = __utils__["azurearm.paged_object_to_list"](subconn.tenants.list())
for tenant in tenants:
- result[tenant['tenant_id']] = tenant
+ result[tenant["tenant_id"]] = tenant
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def policy_assignment_delete(name, scope, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Delete a policy assignment.
@@ -851,24 +865,23 @@ def policy_assignment_delete(name, scope, **kwargs):
salt-call azurearm_resource.policy_assignment_delete testassign \
/subscriptions/bc75htn-a0fhsi-349b-56gh-4fghti-f84852
- '''
+ """
result = False
- polconn = __utils__['azurearm.get_client']('policy', **kwargs)
+ polconn = __utils__["azurearm.get_client"]("policy", **kwargs)
try:
# pylint: disable=unused-variable
policy = polconn.policy_assignments.delete(
- policy_assignment_name=name,
- scope=scope
+ policy_assignment_name=name, scope=scope
)
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
return result
def policy_assignment_create(name, scope, definition_name, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Create a policy assignment.
@@ -886,8 +899,8 @@ def policy_assignment_create(name, scope, definition_name, **kwargs):
salt-call azurearm_resource.policy_assignment_create testassign \
/subscriptions/bc75htn-a0fhsi-349b-56gh-4fghti-f84852 testpolicy
- '''
- polconn = __utils__['azurearm.get_client']('policy', **kwargs)
+ """
+ polconn = __utils__["azurearm.get_client"]("policy", **kwargs)
# "get" doesn't work for built-in policies per https://github.com/Azure/azure-cli/issues/692
# Uncomment this section when the ticket above is resolved.
@@ -900,53 +913,59 @@ def policy_assignment_create(name, scope, definition_name, **kwargs):
# Delete this section when the ticket above is resolved.
# BEGIN
- definition_list = policy_definitions_list(
- **kwargs
- )
+ definition_list = policy_definitions_list(**kwargs)
if definition_name in definition_list:
definition = definition_list[definition_name]
else:
- definition = {'error': 'The policy definition named "{0}" could not be found.'.format(definition_name)}
+ definition = {
+ "error": 'The policy definition named "{0}" could not be found.'.format(
+ definition_name
+ )
+ }
# END
- if 'error' not in definition:
- definition_id = str(definition['id'])
+ if "error" not in definition:
+ definition_id = str(definition["id"])
- prop_kwargs = {'policy_definition_id': definition_id}
+ prop_kwargs = {"policy_definition_id": definition_id}
policy_kwargs = kwargs.copy()
policy_kwargs.update(prop_kwargs)
try:
- policy_model = __utils__['azurearm.create_object_model'](
- 'resource.policy',
- 'PolicyAssignment',
- **policy_kwargs
+ policy_model = __utils__["azurearm.create_object_model"](
+ "resource.policy", "PolicyAssignment", **policy_kwargs
)
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
policy = polconn.policy_assignments.create(
- scope=scope,
- policy_assignment_name=name,
- parameters=policy_model
+ scope=scope, policy_assignment_name=name, parameters=policy_model
)
result = policy.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
else:
- result = {'error': 'The policy definition named "{0}" could not be found.'.format(definition_name)}
+ result = {
+ "error": 'The policy definition named "{0}" could not be found.'.format(
+ definition_name
+ )
+ }
return result
def policy_assignment_get(name, scope, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get details about a specific policy assignment.
@@ -962,23 +981,24 @@ def policy_assignment_get(name, scope, **kwargs):
salt-call azurearm_resource.policy_assignment_get testassign \
/subscriptions/bc75htn-a0fhsi-349b-56gh-4fghti-f84852
- '''
- polconn = __utils__['azurearm.get_client']('policy', **kwargs)
+ """
+ polconn = __utils__["azurearm.get_client"]("policy", **kwargs)
try:
policy = polconn.policy_assignments.get(
- policy_assignment_name=name,
- scope=scope
+ policy_assignment_name=name, scope=scope
)
result = policy.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
-def policy_assignments_list_for_resource_group(resource_group, **kwargs): # pylint: disable=invalid-name
- '''
+def policy_assignments_list_for_resource_group(
+ resource_group, **kwargs
+): # pylint: disable=invalid-name
+ """
.. versionadded:: 2019.2.0
List all policy assignments for a resource group.
@@ -991,28 +1011,27 @@ def policy_assignments_list_for_resource_group(resource_group, **kwargs): # pyl
salt-call azurearm_resource.policy_assignments_list_for_resource_group testgroup
- '''
+ """
result = {}
- polconn = __utils__['azurearm.get_client']('policy', **kwargs)
+ polconn = __utils__["azurearm.get_client"]("policy", **kwargs)
try:
- policy_assign = __utils__['azurearm.paged_object_to_list'](
+ policy_assign = __utils__["azurearm.paged_object_to_list"](
polconn.policy_assignments.list_for_resource_group(
- resource_group_name=resource_group,
- filter=kwargs.get('filter')
+ resource_group_name=resource_group, filter=kwargs.get("filter")
)
)
for assign in policy_assign:
- result[assign['name']] = assign
+ result[assign["name"]] = assign
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def policy_assignments_list(**kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all policy assignments for a subscription.
@@ -1023,23 +1042,27 @@ def policy_assignments_list(**kwargs):
salt-call azurearm_resource.policy_assignments_list
- '''
+ """
result = {}
- polconn = __utils__['azurearm.get_client']('policy', **kwargs)
+ polconn = __utils__["azurearm.get_client"]("policy", **kwargs)
try:
- policy_assign = __utils__['azurearm.paged_object_to_list'](polconn.policy_assignments.list())
+ policy_assign = __utils__["azurearm.paged_object_to_list"](
+ polconn.policy_assignments.list()
+ )
for assign in policy_assign:
- result[assign['name']] = assign
+ result[assign["name"]] = assign
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
-def policy_definition_create_or_update(name, policy_rule, **kwargs): # pylint: disable=invalid-name
- '''
+def policy_definition_create_or_update(
+ name, policy_rule, **kwargs
+): # pylint: disable=invalid-name
+ """
.. versionadded:: 2019.2.0
Create or update a policy definition.
@@ -1055,46 +1078,49 @@ def policy_definition_create_or_update(name, policy_rule, **kwargs): # pylint:
salt-call azurearm_resource.policy_definition_create_or_update testpolicy '{...rule definition..}'
- '''
+ """
if not isinstance(policy_rule, dict):
- result = {'error': 'The policy rule must be a dictionary!'}
+ result = {"error": "The policy rule must be a dictionary!"}
return result
- polconn = __utils__['azurearm.get_client']('policy', **kwargs)
+ polconn = __utils__["azurearm.get_client"]("policy", **kwargs)
# Convert OrderedDict to dict
- prop_kwargs = {'policy_rule': salt.utils.json.loads(salt.utils.json.dumps(policy_rule))}
+ prop_kwargs = {
+ "policy_rule": salt.utils.json.loads(salt.utils.json.dumps(policy_rule))
+ }
policy_kwargs = kwargs.copy()
policy_kwargs.update(prop_kwargs)
try:
- policy_model = __utils__['azurearm.create_object_model'](
- 'resource.policy',
- 'PolicyDefinition',
- **policy_kwargs
+ policy_model = __utils__["azurearm.create_object_model"](
+ "resource.policy", "PolicyDefinition", **policy_kwargs
)
except TypeError as exc:
- result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be built. ({0})".format(str(exc))
+ }
return result
try:
policy = polconn.policy_definitions.create_or_update(
- policy_definition_name=name,
- parameters=policy_model
+ policy_definition_name=name, parameters=policy_model
)
result = policy.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
except SerializationError as exc:
- result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
+ result = {
+ "error": "The object model could not be parsed. ({0})".format(str(exc))
+ }
return result
def policy_definition_delete(name, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Delete a policy definition.
@@ -1107,23 +1133,21 @@ def policy_definition_delete(name, **kwargs):
salt-call azurearm_resource.policy_definition_delete testpolicy
- '''
+ """
result = False
- polconn = __utils__['azurearm.get_client']('policy', **kwargs)
+ polconn = __utils__["azurearm.get_client"]("policy", **kwargs)
try:
# pylint: disable=unused-variable
- policy = polconn.policy_definitions.delete(
- policy_definition_name=name
- )
+ policy = polconn.policy_definitions.delete(policy_definition_name=name)
result = True
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
return result
def policy_definition_get(name, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
Get details about a specific policy definition.
@@ -1136,22 +1160,20 @@ def policy_definition_get(name, **kwargs):
salt-call azurearm_resource.policy_definition_get testpolicy
- '''
- polconn = __utils__['azurearm.get_client']('policy', **kwargs)
+ """
+ polconn = __utils__["azurearm.get_client"]("policy", **kwargs)
try:
- policy_def = polconn.policy_definitions.get(
- policy_definition_name=name
- )
+ policy_def = polconn.policy_definitions.get(policy_definition_name=name)
result = policy_def.as_dict()
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
def policy_definitions_list(hide_builtin=False, **kwargs):
- '''
+ """
.. versionadded:: 2019.2.0
List all policy definitions for a subscription.
@@ -1164,17 +1186,19 @@ def policy_definitions_list(hide_builtin=False, **kwargs):
salt-call azurearm_resource.policy_definitions_list
- '''
+ """
result = {}
- polconn = __utils__['azurearm.get_client']('policy', **kwargs)
+ polconn = __utils__["azurearm.get_client"]("policy", **kwargs)
try:
- policy_defs = __utils__['azurearm.paged_object_to_list'](polconn.policy_definitions.list())
+ policy_defs = __utils__["azurearm.paged_object_to_list"](
+ polconn.policy_definitions.list()
+ )
for policy in policy_defs:
- if not (hide_builtin and policy['policy_type'] == 'BuiltIn'):
- result[policy['name']] = policy
+ if not (hide_builtin and policy["policy_type"] == "BuiltIn"):
+ result[policy["name"]] = policy
except CloudError as exc:
- __utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
- result = {'error': str(exc)}
+ __utils__["azurearm.log_cloud_error"]("resource", str(exc), **kwargs)
+ result = {"error": str(exc)}
return result
diff --git a/salt/modules/bamboohr.py b/salt/modules/bamboohr.py
index 2817722f1ae..87854fafc95 100644
--- a/salt/modules/bamboohr.py
+++ b/salt/modules/bamboohr.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Support for BambooHR
.. versionadded:: 2015.8.0
@@ -11,39 +11,43 @@ Requires a ``subdomain`` and an ``apikey`` in ``/etc/salt/minion``:
bamboohr:
apikey: 012345678901234567890
subdomain: mycompany
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
# Import salt libs
import salt.utils.http
import salt.utils.yaml
-from salt.ext import six
from salt._compat import ElementTree as ET
+from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
- '''
+ """
Only load the module if apache is installed
- '''
+ """
if _apikey():
return True
- return (False, 'The API key was not specified. Please specify it using the "apikey" config.')
+ return (
+ False,
+ 'The API key was not specified. Please specify it using the "apikey" config.',
+ )
def _apikey():
- '''
+ """
Get the API key
- '''
- return __opts__.get('bamboohr', {}).get('apikey', None)
+ """
+ return __opts__.get("bamboohr", {}).get("apikey", None)
-def list_employees(order_by='id'):
- '''
+def list_employees(order_by="id"):
+ """
Show all employees for this company.
CLI Example:
@@ -62,17 +66,17 @@ def list_employees(order_by='id'):
salt myminion bamboohr.list_employees order_by=id
salt myminion bamboohr.list_employees order_by=displayName
salt myminion bamboohr.list_employees order_by=workEmail
- '''
+ """
ret = {}
- status, result = _query(action='employees', command='directory')
+ status, result = _query(action="employees", command="directory")
root = ET.fromstring(result)
directory = root.getchildren()
for cat in directory:
- if cat.tag != 'employees':
+ if cat.tag != "employees":
continue
for item in cat:
emp_id = item.items()[0][1]
- emp_ret = {'id': emp_id}
+ emp_ret = {"id": emp_id}
for details in item.getchildren():
emp_ret[details.items()[0][1]] = details.text
ret[emp_ret[order_by]] = emp_ret
@@ -80,7 +84,7 @@ def list_employees(order_by='id'):
def show_employee(emp_id, fields=None):
- '''
+ """
Show all employees for this company.
CLI Example:
@@ -114,44 +118,42 @@ def show_employee(emp_id, fields=None):
A list of available fields can be found at
http://www.bamboohr.com/api/documentation/employees.php
- '''
+ """
ret = {}
if fields is None:
- fields = ','.join((
- 'canUploadPhoto',
- 'department',
- 'displayName',
- 'firstName',
- 'id',
- 'jobTitle',
- 'lastName',
- 'location',
- 'mobilePhone',
- 'nickname',
- 'photoUploaded',
- 'photoUrl',
- 'workEmail',
- 'workPhone',
- 'workPhoneExtension',
- ))
+ fields = ",".join(
+ (
+ "canUploadPhoto",
+ "department",
+ "displayName",
+ "firstName",
+ "id",
+ "jobTitle",
+ "lastName",
+ "location",
+ "mobilePhone",
+ "nickname",
+ "photoUploaded",
+ "photoUrl",
+ "workEmail",
+ "workPhone",
+ "workPhoneExtension",
+ )
+ )
- status, result = _query(
- action='employees',
- command=emp_id,
- args={'fields': fields}
- )
+ status, result = _query(action="employees", command=emp_id, args={"fields": fields})
root = ET.fromstring(result)
items = root.getchildren()
- ret = {'id': emp_id}
+ ret = {"id": emp_id}
for item in items:
ret[item.items()[0][1]] = item.text
return ret
def update_employee(emp_id, key=None, value=None, items=None):
- '''
+ """
Update one or more items for this employee. Specifying an empty value will
clear it for that employee.
@@ -161,31 +163,28 @@ def update_employee(emp_id, key=None, value=None, items=None):
salt myminion bamboohr.update_employee 1138 nickname ''
salt myminion bamboohr.update_employee 1138 items='{"nickname": "Curly"}
salt myminion bamboohr.update_employee 1138 items='{"nickname": ""}
- '''
+ """
if items is None:
if key is None or value is None:
- return {'Error': 'At least one key/value pair is required'}
+ return {"Error": "At least one key/value pair is required"}
items = {key: value}
elif isinstance(items, six.string_types):
items = salt.utils.yaml.safe_load(items)
- xml_items = ''
+ xml_items = ""
for pair in items:
xml_items += '{1} '.format(pair, items[pair])
- xml_items = '{0} '.format(xml_items)
+ xml_items = "{0} ".format(xml_items)
status, result = _query(
- action='employees',
- command=emp_id,
- data=xml_items,
- method='POST',
+ action="employees", command=emp_id, data=xml_items, method="POST",
)
- return show_employee(emp_id, ','.join(items.keys()))
+ return show_employee(emp_id, ",".join(items.keys()))
-def list_users(order_by='id'):
- '''
+def list_users(order_by="id"):
+ """
Show all users for this company.
CLI Example:
@@ -203,9 +202,9 @@ def list_users(order_by='id'):
salt myminion bamboohr.list_users order_by=id
salt myminion bamboohr.list_users order_by=email
- '''
+ """
ret = {}
- status, result = _query(action='meta', command='users')
+ status, result = _query(action="meta", command="users")
root = ET.fromstring(result)
users = root.getchildren()
for user in users:
@@ -213,7 +212,7 @@ def list_users(order_by='id'):
user_ret = {}
for item in user.items():
user_ret[item[0]] = item[1]
- if item[0] == 'id':
+ if item[0] == "id":
user_id = item[1]
for item in user.getchildren():
user_ret[item.tag] = item.text
@@ -222,50 +221,44 @@ def list_users(order_by='id'):
def list_meta_fields():
- '''
+ """
Show all meta data fields for this company.
CLI Example:
salt myminion bamboohr.list_meta_fields
- '''
+ """
ret = {}
- status, result = _query(action='meta', command='fields')
+ status, result = _query(action="meta", command="fields")
root = ET.fromstring(result)
fields = root.getchildren()
for field in fields:
field_id = None
- field_ret = {'name': field.text}
+ field_ret = {"name": field.text}
for item in field.items():
field_ret[item[0]] = item[1]
- if item[0] == 'id':
+ if item[0] == "id":
field_id = item[1]
ret[field_id] = field_ret
return ret
-def _query(action=None,
- command=None,
- args=None,
- method='GET',
- data=None):
- '''
+def _query(action=None, command=None, args=None, method="GET", data=None):
+ """
Make a web call to BambooHR
The password can be any random text, so we chose Salty text.
- '''
- subdomain = __opts__.get('bamboohr', {}).get('subdomain', None)
- path = 'https://api.bamboohr.com/api/gateway.php/{0}/v1/'.format(
- subdomain
- )
+ """
+ subdomain = __opts__.get("bamboohr", {}).get("subdomain", None)
+ path = "https://api.bamboohr.com/api/gateway.php/{0}/v1/".format(subdomain)
if action:
path += action
if command:
- path += '/{0}'.format(command)
+ path += "/{0}".format(command)
- log.debug('BambooHR URL: %s', path)
+ log.debug("BambooHR URL: %s", path)
if not isinstance(args, dict):
args = {}
@@ -275,7 +268,7 @@ def _query(action=None,
path,
method,
username=_apikey(),
- password='saltypork',
+ password="saltypork",
params=args,
data=data,
decode=False,
@@ -283,6 +276,6 @@ def _query(action=None,
status=True,
opts=__opts__,
)
- log.debug('BambooHR Response Status Code: %s', result['status'])
+ log.debug("BambooHR Response Status Code: %s", result["status"])
- return [result['status'], result['text']]
+ return [result["status"], result["text"]]
diff --git a/salt/modules/bcache.py b/salt/modules/bcache.py
index f487c745b9f..d545e1ad5d4 100644
--- a/salt/modules/bcache.py
+++ b/salt/modules/bcache.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Module for managing BCache sets
BCache is a block-level caching mechanism similar to ZFS L2ARC/ZIL, dm-cache and fscache.
@@ -14,48 +14,48 @@ This module needs the bcache userspace tools to function.
.. versionadded: 2016.3.0
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
import os
-import time
import re
-
-from salt.ext import six
+import time
# Import salt libs
import salt.utils.path
+from salt.ext import six
log = logging.getLogger(__name__)
LOG = {
- 'trace': logging.TRACE,
- 'debug': logging.DEBUG,
- 'info': logging.INFO,
- 'warn': logging.WARNING,
- 'error': logging.ERROR,
- 'crit': logging.CRITICAL,
+ "trace": logging.TRACE,
+ "debug": logging.DEBUG,
+ "info": logging.INFO,
+ "warn": logging.WARNING,
+ "error": logging.ERROR,
+ "crit": logging.CRITICAL,
}
__func_alias__ = {
- 'attach_': 'attach',
- 'config_': 'config',
- 'super_': 'super',
+ "attach_": "attach",
+ "config_": "config",
+ "super_": "super",
}
-HAS_BLKDISCARD = salt.utils.path.which('blkdiscard') is not None
+HAS_BLKDISCARD = salt.utils.path.which("blkdiscard") is not None
def __virtual__():
- '''
+ """
Only work when make-bcache is installed
- '''
- return salt.utils.path.which('make-bcache') is not None
+ """
+ return salt.utils.path.which("make-bcache") is not None
def uuid(dev=None):
- '''
+ """
Return the bcache UUID of a block device.
If no device is given, the Cache UUID is returned.
@@ -67,20 +67,20 @@ def uuid(dev=None):
salt '*' bcache.uuid /dev/sda
salt '*' bcache.uuid bcache0
- '''
+ """
try:
if dev is None:
# take the only directory in /sys/fs/bcache and return its basename
- return list(salt.utils.path.os_walk('/sys/fs/bcache/'))[0][1][0]
+ return list(salt.utils.path.os_walk("/sys/fs/bcache/"))[0][1][0]
else:
# basename of the /sys/block/{dev}/bcache/cache symlink target
- return os.path.basename(_bcsys(dev, 'cache'))
+ return os.path.basename(_bcsys(dev, "cache"))
except Exception: # pylint: disable=broad-except
return False
def attach_(dev=None):
- '''
+ """
Attach a backing devices to a cache set
If no dev is given, all backing devices will be attached.
@@ -93,16 +93,16 @@ def attach_(dev=None):
:return: bool or None if nuttin' happened
- '''
+ """
cache = uuid()
if not cache:
- log.error('No cache to attach %s to', dev)
+ log.error("No cache to attach %s to", dev)
return False
if dev is None:
res = {}
for dev, data in status(alldevs=True).items():
- if 'cache' in data:
+ if "cache" in data:
res[dev] = attach_(dev)
if res:
@@ -113,23 +113,31 @@ def attach_(dev=None):
bcache = uuid(dev)
if bcache:
if bcache == cache:
- log.info('%s is already attached to bcache %s, doing nothing', dev, cache)
+ log.info("%s is already attached to bcache %s, doing nothing", dev, cache)
return None
elif not detach(dev):
return False
- log.debug('Attaching %s to bcache %s', dev, cache)
+ log.debug("Attaching %s to bcache %s", dev, cache)
- if not _bcsys(dev, 'attach', cache,
- 'error', 'Error attaching {0} to bcache {1}'.format(dev, cache)):
+ if not _bcsys(
+ dev,
+ "attach",
+ cache,
+ "error",
+ "Error attaching {0} to bcache {1}".format(dev, cache),
+ ):
return False
- return _wait(lambda: uuid(dev) == cache,
- 'error', '{0} received attach to bcache {1}, but did not comply'.format(dev, cache))
+ return _wait(
+ lambda: uuid(dev) == cache,
+ "error",
+ "{0} received attach to bcache {1}, but did not comply".format(dev, cache),
+ )
def detach(dev=None):
- '''
+ """
Detach a backing device(s) from a cache set
If no dev is given, all backing devices will be attached.
@@ -143,11 +151,11 @@ def detach(dev=None):
salt '*' bcache.detach sdc
salt '*' bcache.detach bcache1
- '''
+ """
if dev is None:
res = {}
for dev, data in status(alldevs=True).items():
- if 'cache' in data:
+ if "cache" in data:
res[dev] = detach(dev)
if res:
@@ -155,14 +163,19 @@ def detach(dev=None):
else:
return None
- log.debug('Detaching %s', dev)
- if not _bcsys(dev, 'detach', 'goaway', 'error', 'Error detaching {0}'.format(dev)):
+ log.debug("Detaching %s", dev)
+ if not _bcsys(dev, "detach", "goaway", "error", "Error detaching {0}".format(dev)):
return False
- return _wait(lambda: uuid(dev) is False, 'error', '{0} received detach, but did not comply'.format(dev), 300)
+ return _wait(
+ lambda: uuid(dev) is False,
+ "error",
+ "{0} received detach, but did not comply".format(dev),
+ 300,
+ )
def start():
- '''
+ """
Trigger a start of the full bcache system through udev.
CLI example:
@@ -171,16 +184,20 @@ def start():
salt '*' bcache.start
- '''
- if not _run_all('udevadm trigger', 'error', 'Error starting bcache: %s'):
+ """
+ if not _run_all("udevadm trigger", "error", "Error starting bcache: %s"):
return False
- elif not _wait(lambda: uuid() is not False, 'warn', 'Bcache system started, but no active cache set found.'):
+ elif not _wait(
+ lambda: uuid() is not False,
+ "warn",
+ "Bcache system started, but no active cache set found.",
+ ):
return False
return True
def stop(dev=None):
- '''
+ """
Stop a bcache device
If no device is given, all backing devices will be detached from the cache, which will subsequently be stopped.
@@ -194,28 +211,33 @@ def stop(dev=None):
salt '*' bcache.stop
- '''
+ """
if dev is not None:
- log.warning('Stopping %s, device will only reappear after reregistering!', dev)
- if not _bcsys(dev, 'stop', 'goaway', 'error', 'Error stopping {0}'.format(dev)):
+ log.warning("Stopping %s, device will only reappear after reregistering!", dev)
+ if not _bcsys(dev, "stop", "goaway", "error", "Error stopping {0}".format(dev)):
return False
- return _wait(lambda: _sysfs_attr(_bcpath(dev)) is False, 'error', 'Device {0} did not stop'.format(dev), 300)
+ return _wait(
+ lambda: _sysfs_attr(_bcpath(dev)) is False,
+ "error",
+ "Device {0} did not stop".format(dev),
+ 300,
+ )
else:
cache = uuid()
if not cache:
- log.warning('bcache already stopped?')
+ log.warning("bcache already stopped?")
return None
if not _alltrue(detach()):
return False
- elif not _fssys('stop', 'goaway', 'error', 'Error stopping cache'):
+ elif not _fssys("stop", "goaway", "error", "Error stopping cache"):
return False
- return _wait(lambda: uuid() is False, 'error', 'Cache did not stop', 300)
+ return _wait(lambda: uuid() is False, "error", "Cache did not stop", 300)
-def back_make(dev, cache_mode='writeback', force=False, attach=True, bucket_size=None):
- '''
+def back_make(dev, cache_mode="writeback", force=False, attach=True, bucket_size=None):
+ """
Create a backing device for attachment to a set.
Because the block size must be the same, a cache set already needs to exist.
@@ -230,16 +252,18 @@ def back_make(dev, cache_mode='writeback', force=False, attach=True, bucket_size
:param force: Overwrite existing bcaches
:param attach: Immediately attach the backing device to the set
:param bucket_size: Size of a bucket (see kernel doc)
- '''
+ """
# pylint: disable=too-many-return-statements
cache = uuid()
if not cache:
- log.error('No bcache set found')
+ log.error("No bcache set found")
return False
elif _sysfs_attr(_bcpath(dev)):
if not force:
- log.error('%s already contains a bcache. Wipe it manually or use force', dev)
+ log.error(
+ "%s already contains a bcache. Wipe it manually or use force", dev
+ )
return False
elif uuid(dev) and not detach(dev):
return False
@@ -247,22 +271,31 @@ def back_make(dev, cache_mode='writeback', force=False, attach=True, bucket_size
return False
dev = _devpath(dev)
- block_size = _size_map(_fssys('block_size'))
+ block_size = _size_map(_fssys("block_size"))
# You might want to override, we pick the cache set's as sane default
if bucket_size is None:
- bucket_size = _size_map(_fssys('bucket_size'))
+ bucket_size = _size_map(_fssys("bucket_size"))
- cmd = 'make-bcache --block {0} --bucket {1} --{2} --bdev {3}'.format(block_size, bucket_size, cache_mode, dev)
+ cmd = "make-bcache --block {0} --bucket {1} --{2} --bdev {3}".format(
+ block_size, bucket_size, cache_mode, dev
+ )
if force:
- cmd += ' --wipe-bcache'
+ cmd += " --wipe-bcache"
- if not _run_all(cmd, 'error', 'Error creating backing device {0}: %s'.format(dev)):
+ if not _run_all(cmd, "error", "Error creating backing device {0}: %s".format(dev)):
return False
- elif not _sysfs_attr('fs/bcache/register', _devpath(dev),
- 'error', 'Error registering backing device {0}'.format(dev)):
+ elif not _sysfs_attr(
+ "fs/bcache/register",
+ _devpath(dev),
+ "error",
+ "Error registering backing device {0}".format(dev),
+ ):
return False
- elif not _wait(lambda: _sysfs_attr(_bcpath(dev)) is not False,
- 'error', 'Backing device {0} did not register'.format(dev)):
+ elif not _wait(
+ lambda: _sysfs_attr(_bcpath(dev)) is not False,
+ "error",
+ "Backing device {0} did not register".format(dev),
+ ):
return False
elif attach:
return attach_(dev)
@@ -270,8 +303,10 @@ def back_make(dev, cache_mode='writeback', force=False, attach=True, bucket_size
return True
-def cache_make(dev, reserved=None, force=False, block_size=None, bucket_size=None, attach=True):
- '''
+def cache_make(
+ dev, reserved=None, force=False, block_size=None, bucket_size=None, attach=True
+):
+ """
Create BCache cache on a block device.
If blkdiscard is available the entire device will be properly cleared in advance.
@@ -290,7 +325,7 @@ def cache_make(dev, reserved=None, force=False, block_size=None, bucket_size=Non
:param block_size: Block size of the cache; defaults to devices' logical block size
:param force: Overwrite existing BCache sets
:param attach: Attach all existing backend devices immediately
- '''
+ """
# TODO: multiple devs == md jbod
# pylint: disable=too-many-return-statements
@@ -298,19 +333,21 @@ def cache_make(dev, reserved=None, force=False, block_size=None, bucket_size=Non
cache = uuid()
if cache:
if not force:
- log.error('BCache cache %s is already on the system', cache)
+ log.error("BCache cache %s is already on the system", cache)
return False
cache = _bdev()
dev = _devbase(dev)
- udev = __salt__['udev.env'](dev)
+ udev = __salt__["udev.env"](dev)
- if ('ID_FS_TYPE' in udev or (udev.get('DEVTYPE', None) != 'partition' and 'ID_PART_TABLE_TYPE' in udev)) \
- and not force:
- log.error('%s already contains data, wipe first or force', dev)
+ if (
+ "ID_FS_TYPE" in udev
+ or (udev.get("DEVTYPE", None) != "partition" and "ID_PART_TABLE_TYPE" in udev)
+ ) and not force:
+ log.error("%s already contains data, wipe first or force", dev)
return False
- elif reserved is not None and udev.get('DEVTYPE', None) != 'disk':
- log.error('Need a partitionable blockdev for reserved to work')
+ elif reserved is not None and udev.get("DEVTYPE", None) != "disk":
+ log.error("Need a partitionable blockdev for reserved to work")
return False
_, block, bucket = _sizes(dev)
@@ -336,25 +373,36 @@ def cache_make(dev, reserved=None, force=False, block_size=None, bucket_size=Non
return False
if reserved:
- cmd = 'parted -m -s -a optimal -- ' \
- '/dev/{0} mklabel gpt mkpart bcache-reserved 1M {1} mkpart bcache {1} 100%'.format(dev, reserved)
+ cmd = (
+ "parted -m -s -a optimal -- "
+ "/dev/{0} mklabel gpt mkpart bcache-reserved 1M {1} mkpart bcache {1} 100%".format(
+ dev, reserved
+ )
+ )
# if wipe was incomplete & part layout remains the same,
# this is one condition set where udev would make it accidentally popup again
- if not _run_all(cmd, 'error', 'Error creating bcache partitions on {0}: %s'.format(dev)):
+ if not _run_all(
+ cmd, "error", "Error creating bcache partitions on {0}: %s".format(dev)
+ ):
return False
- dev = '{0}2'.format(dev)
+ dev = "{0}2".format(dev)
# ---------------- Finally, create a cache ----------------
- cmd = 'make-bcache --cache /dev/{0} --block {1} --wipe-bcache'.format(dev, block_size)
+ cmd = "make-bcache --cache /dev/{0} --block {1} --wipe-bcache".format(
+ dev, block_size
+ )
# Actually bucket_size should always have a value, but for testing 0 is possible as well
if bucket_size:
- cmd += ' --bucket {0}'.format(bucket_size)
+ cmd += " --bucket {0}".format(bucket_size)
- if not _run_all(cmd, 'error', 'Error creating cache {0}: %s'.format(dev)):
+ if not _run_all(cmd, "error", "Error creating cache {0}: %s".format(dev)):
return False
- elif not _wait(lambda: uuid() is not False,
- 'error', 'Cache {0} seemingly created OK, but FS did not activate'.format(dev)):
+ elif not _wait(
+ lambda: uuid() is not False,
+ "error",
+ "Cache {0} seemingly created OK, but FS did not activate".format(dev),
+ ):
return False
if attach:
@@ -364,7 +412,7 @@ def cache_make(dev, reserved=None, force=False, block_size=None, bucket_size=Non
def config_(dev=None, **kwargs):
- '''
+ """
Show or update config of a bcache device.
If no device is given, operate on the cache set itself.
@@ -379,25 +427,31 @@ def config_(dev=None, **kwargs):
salt '*' bcache.config bcache1 cache_mode=writeback writeback_percent=15
:return: config or True/False
- '''
+ """
if dev is None:
spath = _fspath()
else:
spath = _bcpath(dev)
# filter out 'hidden' kwargs added by our favourite orchestration system
- updates = dict([(key, val) for key, val in kwargs.items() if not key.startswith('__')])
+ updates = dict(
+ [(key, val) for key, val in kwargs.items() if not key.startswith("__")]
+ )
if updates:
endres = 0
for key, val in updates.items():
- endres += _sysfs_attr([spath, key], val,
- 'warn', 'Failed to update {0} with {1}'.format(os.path.join(spath, key), val))
+ endres += _sysfs_attr(
+ [spath, key],
+ val,
+ "warn",
+ "Failed to update {0} with {1}".format(os.path.join(spath, key), val),
+ )
return endres > 0
else:
result = {}
data = _sysfs_parse(spath, config=True, internals=True, options=True)
- for key in ('other_ro', 'inter_ro'):
+ for key in ("other_ro", "inter_ro"):
if key in data:
del data[key]
@@ -408,7 +462,7 @@ def config_(dev=None, **kwargs):
def status(stats=False, config=False, internals=False, superblock=False, alldevs=False):
- '''
+ """
Show the full status of the BCache system and optionally all its involved devices
CLI example:
@@ -423,15 +477,17 @@ def status(stats=False, config=False, internals=False, superblock=False, alldevs
:param config: include settings
:param internals: include internals
:param superblock: include superblock
- '''
+ """
bdevs = []
- for _, links, _ in salt.utils.path.os_walk('/sys/block/'):
+ for _, links, _ in salt.utils.path.os_walk("/sys/block/"):
for block in links:
- if 'bcache' in block:
+ if "bcache" in block:
continue
- for spath, sdirs, _ in salt.utils.path.os_walk('/sys/block/{0}'.format(block), followlinks=False):
- if 'bcache' in sdirs:
+ for spath, sdirs, _ in salt.utils.path.os_walk(
+ "/sys/block/{0}".format(block), followlinks=False
+ ):
+ if "bcache" in sdirs:
bdevs.append(os.path.basename(spath))
statii = {}
for bcache in bdevs:
@@ -444,9 +500,9 @@ def status(stats=False, config=False, internals=False, superblock=False, alldevs
for dev in statii:
if dev != cdev:
# it's a backing dev
- if statii[dev]['cache'] == cuuid:
+ if statii[dev]["cache"] == cuuid:
count += 1
- statii[cdev]['attached_backing_devices'] = count
+ statii[cdev]["attached_backing_devices"] = count
if not alldevs:
statii = statii[cdev]
@@ -455,7 +511,7 @@ def status(stats=False, config=False, internals=False, superblock=False, alldevs
def device(dev, stats=False, config=False, internals=False, superblock=False):
- '''
+ """
Check the state of a single bcache device
CLI example:
@@ -469,106 +525,114 @@ def device(dev, stats=False, config=False, internals=False, superblock=False):
:param settings: include all settings
:param internals: include all internals
:param superblock: include superblock info
- '''
+ """
result = {}
- if not _sysfs_attr(_bcpath(dev), None, 'error', '{0} is not a bcache fo any kind'.format(dev)):
+ if not _sysfs_attr(
+ _bcpath(dev), None, "error", "{0} is not a bcache fo any kind".format(dev)
+ ):
return False
- elif _bcsys(dev, 'set'):
+ elif _bcsys(dev, "set"):
# ---------------- It's the cache itself ----------------
- result['uuid'] = uuid()
- base_attr = ['block_size', 'bucket_size', 'cache_available_percent', 'cache_replacement_policy', 'congested']
+ result["uuid"] = uuid()
+ base_attr = [
+ "block_size",
+ "bucket_size",
+ "cache_available_percent",
+ "cache_replacement_policy",
+ "congested",
+ ]
# ---------------- Parse through both the blockdev & the FS ----------------
result.update(_sysfs_parse(_bcpath(dev), base_attr, stats, config, internals))
result.update(_sysfs_parse(_fspath(), base_attr, stats, config, internals))
- result.update(result.pop('base'))
+ result.update(result.pop("base"))
else:
# ---------------- It's a backing device ----------------
back_uuid = uuid(dev)
if back_uuid is not None:
- result['cache'] = back_uuid
+ result["cache"] = back_uuid
try:
- result['dev'] = os.path.basename(_bcsys(dev, 'dev'))
+ result["dev"] = os.path.basename(_bcsys(dev, "dev"))
except Exception: # pylint: disable=broad-except
pass
- result['bdev'] = _bdev(dev)
+ result["bdev"] = _bdev(dev)
- base_attr = ['cache_mode', 'running', 'state', 'writeback_running']
+ base_attr = ["cache_mode", "running", "state", "writeback_running"]
base_path = _bcpath(dev)
result.update(_sysfs_parse(base_path, base_attr, stats, config, internals))
- result.update(result.pop('base'))
+ result.update(result.pop("base"))
# ---------------- Modifications ----------------
- state = [result['state']]
- if result.pop('running'):
- state.append('running')
+ state = [result["state"]]
+ if result.pop("running"):
+ state.append("running")
else:
- state.append('stopped')
- if 'writeback_running' in result:
- if result.pop('writeback_running'):
- state.append('writeback_running')
+ state.append("stopped")
+ if "writeback_running" in result:
+ if result.pop("writeback_running"):
+ state.append("writeback_running")
else:
- state.append('writeback_stopped')
- result['state'] = state
+ state.append("writeback_stopped")
+ result["state"] = state
# ---------------- Statistics ----------------
- if 'stats' in result:
- replre = r'(stats|cache)_'
- statres = result['stats']
- for attr in result['stats']:
- if '/' not in attr:
- key = re.sub(replre, '', attr)
+ if "stats" in result:
+ replre = r"(stats|cache)_"
+ statres = result["stats"]
+ for attr in result["stats"]:
+ if "/" not in attr:
+ key = re.sub(replre, "", attr)
statres[key] = statres.pop(attr)
else:
- stat, key = attr.split('/', 1)
- stat = re.sub(replre, '', stat)
- key = re.sub(replre, '', key)
+ stat, key = attr.split("/", 1)
+ stat = re.sub(replre, "", stat)
+ key = re.sub(replre, "", key)
if stat not in statres:
statres[stat] = {}
statres[stat][key] = statres.pop(attr)
- result['stats'] = statres
+ result["stats"] = statres
# ---------------- Internals ----------------
if internals:
- interres = result.pop('inter_ro', {})
- interres.update(result.pop('inter_rw', {}))
+ interres = result.pop("inter_ro", {})
+ interres.update(result.pop("inter_rw", {}))
if interres:
for key in interres:
- if key.startswith('internal'):
- nkey = re.sub(r'internal[s/]*', '', key)
+ if key.startswith("internal"):
+ nkey = re.sub(r"internal[s/]*", "", key)
interres[nkey] = interres.pop(key)
key = nkey
- if key.startswith(('btree', 'writeback')):
- mkey, skey = re.split(r'_', key, maxsplit=1)
+ if key.startswith(("btree", "writeback")):
+ mkey, skey = re.split(r"_", key, maxsplit=1)
if mkey not in interres:
interres[mkey] = {}
interres[mkey][skey] = interres.pop(key)
- result['internals'] = interres
+ result["internals"] = interres
# ---------------- Config ----------------
if config:
- configres = result['config']
+ configres = result["config"]
for key in configres:
- if key.startswith('writeback'):
- mkey, skey = re.split(r'_', key, maxsplit=1)
+ if key.startswith("writeback"):
+ mkey, skey = re.split(r"_", key, maxsplit=1)
if mkey not in configres:
configres[mkey] = {}
configres[mkey][skey] = configres.pop(key)
- result['config'] = configres
+ result["config"] = configres
# ---------------- Superblock ----------------
if superblock:
- result['superblock'] = super_(dev)
+ result["superblock"] = super_(dev)
return result
def super_(dev):
- '''
+ """
Read out BCache SuperBlock
CLI example:
@@ -578,11 +642,15 @@ def super_(dev):
salt '*' bcache.device bcache0
salt '*' bcache.device /dev/sdc
- '''
+ """
dev = _devpath(dev)
ret = {}
- res = _run_all('bcache-super-show {0}'.format(dev), 'error', 'Error reading superblock on {0}: %s'.format(dev))
+ res = _run_all(
+ "bcache-super-show {0}".format(dev),
+ "error",
+ "Error reading superblock on {0}: %s".format(dev),
+ )
if not res:
return False
@@ -591,13 +659,13 @@ def super_(dev):
if not line:
continue
- key, val = [val.strip() for val in re.split(r'[\s]+', line, maxsplit=1)]
+ key, val = [val.strip() for val in re.split(r"[\s]+", line, maxsplit=1)]
if not (key and val):
continue
mval = None
- if ' ' in val:
- rval, mval = [val.strip() for val in re.split(r'[\s]+', val, maxsplit=1)]
+ if " " in val:
+ rval, mval = [val.strip() for val in re.split(r"[\s]+", val, maxsplit=1)]
mval = mval[1:-1]
else:
rval = val
@@ -608,12 +676,12 @@ def super_(dev):
try:
rval = float(rval)
except Exception: # pylint: disable=broad-except
- if rval == 'yes':
+ if rval == "yes":
rval = True
- elif rval == 'no':
+ elif rval == "no":
rval = False
- pkey, key = re.split(r'\.', key, maxsplit=1)
+ pkey, key = re.split(r"\.", key, maxsplit=1)
if pkey not in ret:
ret[pkey] = {}
@@ -624,75 +692,76 @@ def super_(dev):
return ret
+
# -------------------------------- HELPER FUNCTIONS --------------------------------
def _devbase(dev):
- '''
+ """
Basename of just about any dev
- '''
+ """
dev = os.path.realpath(os.path.expandvars(dev))
dev = os.path.basename(dev)
return dev
def _devpath(dev):
- '''
+ """
Return /dev name of just about any dev
:return: /dev/devicename
- '''
- return os.path.join('/dev', _devbase(dev))
+ """
+ return os.path.join("/dev", _devbase(dev))
def _syspath(dev):
- '''
+ """
Full SysFS path of a device
- '''
+ """
dev = _devbase(dev)
- dev = re.sub(r'^([vhs][a-z]+)([0-9]+)', r'\1/\1\2', dev)
+ dev = re.sub(r"^([vhs][a-z]+)([0-9]+)", r"\1/\1\2", dev)
# name = re.sub(r'^([a-z]+)(?GPT) writes stuff at the end of a dev as well
- cmd += ' seek={0}'.format((size/1024**2) - blocks)
- endres += _run_all(cmd, 'warn', wipe_failmsg)
+ cmd += " seek={0}".format((size / 1024 ** 2) - blocks)
+ endres += _run_all(cmd, "warn", wipe_failmsg)
- elif wiper == 'blkdiscard':
- cmd = 'blkdiscard /dev/{0}'.format(dev)
- endres += _run_all(cmd, 'warn', wipe_failmsg)
+ elif wiper == "blkdiscard":
+ cmd = "blkdiscard /dev/{0}".format(dev)
+ endres += _run_all(cmd, "warn", wipe_failmsg)
# TODO: fix annoying bug failing blkdiscard by trying to discard 1 sector past blkdev
endres = 1
@@ -925,10 +1026,10 @@ def _wipe(dev):
def _wait(lfunc, log_lvl=None, log_msg=None, tries=10):
- '''
+ """
Wait for lfunc to be True
:return: True if lfunc succeeded within tries, False if it didn't
- '''
+ """
i = 0
while i < tries:
time.sleep(1)
@@ -943,20 +1044,20 @@ def _wait(lfunc, log_lvl=None, log_msg=None, tries=10):
def _run_all(cmd, log_lvl=None, log_msg=None, exitcode=0):
- '''
+ """
Simple wrapper around cmd.run_all
log_msg can contain {0} for stderr
:return: True or stdout, False if retcode wasn't exitcode
- '''
- res = __salt__['cmd.run_all'](cmd)
- if res['retcode'] == exitcode:
- if res['stdout']:
- return res['stdout']
+ """
+ res = __salt__["cmd.run_all"](cmd)
+ if res["retcode"] == exitcode:
+ if res["stdout"]:
+ return res["stdout"]
else:
return True
if log_lvl is not None:
- log.log(LOG[log_lvl], log_msg, res['stderr'])
+ log.log(LOG[log_lvl], log_msg, res["stderr"])
return False
diff --git a/salt/modules/beacons.py b/salt/modules/beacons.py
index 461901d56db..24a241d825d 100644
--- a/salt/modules/beacons.py
+++ b/salt/modules/beacons.py
@@ -1,13 +1,14 @@
# -*- coding: utf-8 -*-
-'''
+"""
Module for managing the Salt beacons on a minion
.. versionadded:: 2015.8.0
-'''
+"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import difflib
import logging
import os
@@ -23,17 +24,11 @@ from salt.ext.six.moves import map
log = logging.getLogger(__name__)
default_event_wait = 60
-__func_alias__ = {
- 'list_': 'list',
- 'reload_': 'reload'
-}
+__func_alias__ = {"list_": "list", "reload_": "reload"}
-def list_(return_yaml=True,
- include_pillar=True,
- include_opts=True,
- **kwargs):
- '''
+def list_(return_yaml=True, include_pillar=True, include_opts=True, **kwargs):
+ """
List the beacons currently configured on the minion
:param return_yaml: Whether to return YAML formatted output,
@@ -53,41 +48,48 @@ def list_(return_yaml=True,
salt '*' beacons.list
- '''
+ """
beacons = None
try:
- with salt.utils.event.get_event('minion', opts=__opts__, listen=True) as event_bus:
- res = __salt__['event.fire']({'func': 'list',
- 'include_pillar': include_pillar,
- 'include_opts': include_opts},
- 'manage_beacons')
+ with salt.utils.event.get_event(
+ "minion", opts=__opts__, listen=True
+ ) as event_bus:
+ res = __salt__["event.fire"](
+ {
+ "func": "list",
+ "include_pillar": include_pillar,
+ "include_opts": include_opts,
+ },
+ "manage_beacons",
+ )
if res:
event_ret = event_bus.get_event(
- tag='/salt/minion/minion_beacons_list_complete',
- wait=kwargs.get('timeout', default_event_wait))
- log.debug('event_ret %s', event_ret)
- if event_ret and event_ret['complete']:
- beacons = event_ret['beacons']
+ tag="/salt/minion/minion_beacons_list_complete",
+ wait=kwargs.get("timeout", default_event_wait),
+ )
+ log.debug("event_ret %s", event_ret)
+ if event_ret and event_ret["complete"]:
+ beacons = event_ret["beacons"]
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret = {}
- ret['result'] = False
- ret['comment'] = 'Event module not available. Beacon add failed.'
+ ret["result"] = False
+ ret["comment"] = "Event module not available. Beacon add failed."
return ret
if beacons:
if return_yaml:
- tmp = {'beacons': beacons}
+ tmp = {"beacons": beacons}
return salt.utils.yaml.safe_dump(tmp, default_flow_style=False)
else:
return beacons
else:
- return {'beacons': {}}
+ return {"beacons": {}}
def list_available(return_yaml=True, **kwargs):
- '''
+ """
List the beacons currently available on the minion
:param return_yaml: Whether to return YAML formatted output, default
@@ -100,37 +102,40 @@ def list_available(return_yaml=True, **kwargs):
salt '*' beacons.list_available
- '''
+ """
beacons = None
try:
- with salt.utils.event.get_event('minion', opts=__opts__, listen=True) as event_bus:
- res = __salt__['event.fire']({'func': 'list_available'}, 'manage_beacons')
+ with salt.utils.event.get_event(
+ "minion", opts=__opts__, listen=True
+ ) as event_bus:
+ res = __salt__["event.fire"]({"func": "list_available"}, "manage_beacons")
if res:
event_ret = event_bus.get_event(
- tag='/salt/minion/minion_beacons_list_available_complete',
- wait=kwargs.get('timeout', default_event_wait))
- if event_ret and event_ret['complete']:
- beacons = event_ret['beacons']
+ tag="/salt/minion/minion_beacons_list_available_complete",
+ wait=kwargs.get("timeout", default_event_wait),
+ )
+ if event_ret and event_ret["complete"]:
+ beacons = event_ret["beacons"]
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret = {}
- ret['result'] = False
- ret['comment'] = 'Event module not available. Beacon add failed.'
+ ret["result"] = False
+ ret["comment"] = "Event module not available. Beacon add failed."
return ret
if beacons:
if return_yaml:
- tmp = {'beacons': beacons}
+ tmp = {"beacons": beacons}
return salt.utils.yaml.safe_dump(tmp, default_flow_style=False)
else:
return beacons
else:
- return {'beacons': {}}
+ return {"beacons": {}}
def add(name, beacon_data, **kwargs):
- '''
+ """
Add a beacon on the minion
:param name: Name of the beacon to configure
@@ -143,84 +148,98 @@ def add(name, beacon_data, **kwargs):
salt '*' beacons.add ps "[{'processes': {'salt-master': 'stopped', 'apache2': 'stopped'}}]"
- '''
- ret = {'comment': 'Failed to add beacon {0}.'.format(name),
- 'result': False}
+ """
+ ret = {"comment": "Failed to add beacon {0}.".format(name), "result": False}
if name in list_(return_yaml=False, **kwargs):
- ret['comment'] = 'Beacon {0} is already configured.'.format(name)
+ ret["comment"] = "Beacon {0} is already configured.".format(name)
return ret
# Check to see if a beacon_module is specified, if so, verify it is
# valid and available beacon type.
- if any('beacon_module' in key for key in beacon_data):
- res = next(value for value in beacon_data if 'beacon_module' in value)
- beacon_name = res['beacon_module']
+ if any("beacon_module" in key for key in beacon_data):
+ res = next(value for value in beacon_data if "beacon_module" in value)
+ beacon_name = res["beacon_module"]
else:
beacon_name = name
if beacon_name not in list_available(return_yaml=False):
- ret['comment'] = 'Beacon "{0}" is not available.'.format(beacon_name)
+ ret["comment"] = 'Beacon "{0}" is not available.'.format(beacon_name)
return ret
- if 'test' in kwargs and kwargs['test']:
- ret['result'] = True
- ret['comment'] = 'Beacon: {0} would be added.'.format(name)
+ if "test" in kwargs and kwargs["test"]:
+ ret["result"] = True
+ ret["comment"] = "Beacon: {0} would be added.".format(name)
else:
try:
# Attempt to load the beacon module so we have access to the validate function
- with salt.utils.event.get_event('minion', opts=__opts__, listen=True) as event_bus:
- res = __salt__['event.fire']({'name': name,
- 'beacon_data': beacon_data,
- 'func': 'validate_beacon'},
- 'manage_beacons')
+ with salt.utils.event.get_event(
+ "minion", opts=__opts__, listen=True
+ ) as event_bus:
+ res = __salt__["event.fire"](
+ {
+ "name": name,
+ "beacon_data": beacon_data,
+ "func": "validate_beacon",
+ },
+ "manage_beacons",
+ )
if res:
event_ret = event_bus.get_event(
- tag='/salt/minion/minion_beacon_validation_complete',
- wait=kwargs.get('timeout', default_event_wait))
- valid = event_ret['valid']
- vcomment = event_ret['vcomment']
+ tag="/salt/minion/minion_beacon_validation_complete",
+ wait=kwargs.get("timeout", default_event_wait),
+ )
+ valid = event_ret["valid"]
+ vcomment = event_ret["vcomment"]
if not valid:
- ret['result'] = False
- ret['comment'] = ('Beacon {0} configuration invalid, '
- 'not adding.\n{1}'.format(name, vcomment))
+ ret["result"] = False
+ ret["comment"] = (
+ "Beacon {0} configuration invalid, "
+ "not adding.\n{1}".format(name, vcomment)
+ )
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
- ret['comment'] = 'Event module not available. Beacon add failed.'
+ ret["comment"] = "Event module not available. Beacon add failed."
try:
- with salt.utils.event.get_event('minion', opts=__opts__, listen=True) as event_bus:
- res = __salt__['event.fire']({'name': name,
- 'beacon_data': beacon_data,
- 'func': 'add'}, 'manage_beacons')
+ with salt.utils.event.get_event(
+ "minion", opts=__opts__, listen=True
+ ) as event_bus:
+ res = __salt__["event.fire"](
+ {"name": name, "beacon_data": beacon_data, "func": "add"},
+ "manage_beacons",
+ )
if res:
event_ret = event_bus.get_event(
- tag='/salt/minion/minion_beacon_add_complete',
- wait=kwargs.get('timeout', default_event_wait))
- if event_ret and event_ret['complete']:
- beacons = event_ret['beacons']
+ tag="/salt/minion/minion_beacon_add_complete",
+ wait=kwargs.get("timeout", default_event_wait),
+ )
+ if event_ret and event_ret["complete"]:
+ beacons = event_ret["beacons"]
if name in beacons and beacons[name] == beacon_data:
- ret['result'] = True
- ret['comment'] = 'Added beacon: {0}.'.format(name)
+ ret["result"] = True
+ ret["comment"] = "Added beacon: {0}.".format(name)
elif event_ret:
- ret['result'] = False
- ret['comment'] = event_ret['comment']
+ ret["result"] = False
+ ret["comment"] = event_ret["comment"]
else:
- ret['result'] = False
- ret['comment'] = 'Did not receive the beacon add complete event before the timeout of {}s'.format(
- kwargs.get('timeout', default_event_wait)
+ ret["result"] = False
+ ret[
+ "comment"
+ ] = "Did not receive the beacon add complete event before the timeout of {}s".format(
+ kwargs.get("timeout", default_event_wait)
)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
- ret['comment'] = 'Event module not available. Beacon add failed.'
+ ret["comment"] = "Event module not available. Beacon add failed."
return ret
def modify(name, beacon_data, **kwargs):
- '''
+ """
Modify an existing beacon
:param name: Name of the beacon to configure
@@ -232,99 +251,125 @@ def modify(name, beacon_data, **kwargs):
.. code-block:: bash
salt '*' beacons.modify ps "[{'salt-master': 'stopped'}, {'apache2': 'stopped'}]"
- '''
+ """
- ret = {'comment': '',
- 'result': True}
+ ret = {"comment": "", "result": True}
current_beacons = list_(return_yaml=False, **kwargs)
if name not in current_beacons:
- ret['comment'] = 'Beacon {0} is not configured.'.format(name)
+ ret["comment"] = "Beacon {0} is not configured.".format(name)
return ret
- if 'test' in kwargs and kwargs['test']:
- ret['result'] = True
- ret['comment'] = 'Beacon: {0} would be added.'.format(name)
+ if "test" in kwargs and kwargs["test"]:
+ ret["result"] = True
+ ret["comment"] = "Beacon: {0} would be added.".format(name)
else:
try:
# Attempt to load the beacon module so we have access to the validate function
- with salt.utils.event.get_event('minion', opts=__opts__, listen=True) as event_bus:
- res = __salt__['event.fire']({'name': name,
- 'beacon_data': beacon_data,
- 'func': 'validate_beacon'},
- 'manage_beacons')
+ with salt.utils.event.get_event(
+ "minion", opts=__opts__, listen=True
+ ) as event_bus:
+ res = __salt__["event.fire"](
+ {
+ "name": name,
+ "beacon_data": beacon_data,
+ "func": "validate_beacon",
+ },
+ "manage_beacons",
+ )
if res:
event_ret = event_bus.get_event(
- tag='/salt/minion/minion_beacon_validation_complete',
- wait=kwargs.get('timeout', default_event_wait))
- valid = event_ret['valid']
- vcomment = event_ret['vcomment']
+ tag="/salt/minion/minion_beacon_validation_complete",
+ wait=kwargs.get("timeout", default_event_wait),
+ )
+ valid = event_ret["valid"]
+ vcomment = event_ret["vcomment"]
if not valid:
- ret['result'] = False
- ret['comment'] = ('Beacon {0} configuration invalid, '
- 'not adding.\n{1}'.format(name, vcomment))
+ ret["result"] = False
+ ret["comment"] = (
+ "Beacon {0} configuration invalid, "
+ "not adding.\n{1}".format(name, vcomment)
+ )
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
- ret['comment'] = 'Event module not available. Beacon modify failed.'
+ ret["comment"] = "Event module not available. Beacon modify failed."
if not valid:
- ret['result'] = False
- ret['comment'] = ('Beacon {0} configuration invalid, '
- 'not modifying.\n{1}'.format(name, vcomment))
+ ret["result"] = False
+ ret["comment"] = (
+ "Beacon {0} configuration invalid, "
+ "not modifying.\n{1}".format(name, vcomment)
+ )
return ret
_current = current_beacons[name]
_new = beacon_data
if _new == _current:
- ret['comment'] = 'Job {0} in correct state'.format(name)
+ ret["comment"] = "Job {0} in correct state".format(name)
return ret
_current_lines = []
for _item in _current:
- _current_lines.extend(['{0}:{1}\n'.format(key, value)
- for (key, value) in six.iteritems(_item)])
+ _current_lines.extend(
+ [
+ "{0}:{1}\n".format(key, value)
+ for (key, value) in six.iteritems(_item)
+ ]
+ )
_new_lines = []
for _item in _new:
- _new_lines.extend(['{0}:{1}\n'.format(key, value)
- for (key, value) in six.iteritems(_item)])
+ _new_lines.extend(
+ [
+ "{0}:{1}\n".format(key, value)
+ for (key, value) in six.iteritems(_item)
+ ]
+ )
_diff = difflib.unified_diff(_current_lines, _new_lines)
- ret['changes'] = {}
- ret['changes']['diff'] = ''.join(_diff)
+ ret["changes"] = {}
+ ret["changes"]["diff"] = "".join(_diff)
try:
- with salt.utils.event.get_event('minion', opts=__opts__, listen=True) as event_bus:
- res = __salt__['event.fire']({'name': name, 'beacon_data': beacon_data, 'func': 'modify'}, 'manage_beacons')
+ with salt.utils.event.get_event(
+ "minion", opts=__opts__, listen=True
+ ) as event_bus:
+ res = __salt__["event.fire"](
+ {"name": name, "beacon_data": beacon_data, "func": "modify"},
+ "manage_beacons",
+ )
if res:
event_ret = event_bus.get_event(
- tag='/salt/minion/minion_beacon_modify_complete',
- wait=kwargs.get('timeout', default_event_wait))
- if event_ret and event_ret['complete']:
- beacons = event_ret['beacons']
+ tag="/salt/minion/minion_beacon_modify_complete",
+ wait=kwargs.get("timeout", default_event_wait),
+ )
+ if event_ret and event_ret["complete"]:
+ beacons = event_ret["beacons"]
if name in beacons and beacons[name] == beacon_data:
- ret['result'] = True
- ret['comment'] = 'Modified beacon: {0}.'.format(name)
+ ret["result"] = True
+ ret["comment"] = "Modified beacon: {0}.".format(name)
elif event_ret:
- ret['result'] = False
- ret['comment'] = event_ret['comment']
+ ret["result"] = False
+ ret["comment"] = event_ret["comment"]
else:
- ret['result'] = False
- ret['comment'] = 'Did not receive the beacon modify complete event before the timeout of {}s'.format(
- kwargs.get('timeout', default_event_wait)
+ ret["result"] = False
+ ret[
+ "comment"
+ ] = "Did not receive the beacon modify complete event before the timeout of {}s".format(
+ kwargs.get("timeout", default_event_wait)
)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
- ret['comment'] = 'Event module not available. Beacon add failed.'
+ ret["comment"] = "Event module not available. Beacon add failed."
return ret
def delete(name, **kwargs):
- '''
+ """
Delete a beacon item
:param name: Name of the beacon to delete
@@ -338,44 +383,50 @@ def delete(name, **kwargs):
salt '*' beacons.delete load
- '''
+ """
- ret = {'comment': 'Failed to delete beacon {0}.'.format(name),
- 'result': False}
+ ret = {"comment": "Failed to delete beacon {0}.".format(name), "result": False}
- if 'test' in kwargs and kwargs['test']:
- ret['result'] = True
- ret['comment'] = 'Beacon: {0} would be deleted.'.format(name)
+ if "test" in kwargs and kwargs["test"]:
+ ret["result"] = True
+ ret["comment"] = "Beacon: {0} would be deleted.".format(name)
else:
try:
- with salt.utils.event.get_event('minion', opts=__opts__, listen=True) as event_bus:
- res = __salt__['event.fire']({'name': name, 'func': 'delete'}, 'manage_beacons')
+ with salt.utils.event.get_event(
+ "minion", opts=__opts__, listen=True
+ ) as event_bus:
+ res = __salt__["event.fire"](
+ {"name": name, "func": "delete"}, "manage_beacons"
+ )
if res:
event_ret = event_bus.get_event(
- tag='/salt/minion/minion_beacon_delete_complete',
- wait=kwargs.get('timeout', default_event_wait))
- if event_ret and event_ret['complete']:
- beacons = event_ret['beacons']
+ tag="/salt/minion/minion_beacon_delete_complete",
+ wait=kwargs.get("timeout", default_event_wait),
+ )
+ if event_ret and event_ret["complete"]:
+ beacons = event_ret["beacons"]
if name not in beacons:
- ret['result'] = True
- ret['comment'] = 'Deleted beacon: {0}.'.format(name)
+ ret["result"] = True
+ ret["comment"] = "Deleted beacon: {0}.".format(name)
return ret
elif event_ret:
- ret['result'] = False
- ret['comment'] = event_ret['comment']
+ ret["result"] = False
+ ret["comment"] = event_ret["comment"]
else:
- ret['result'] = False
- ret['comment'] = 'Did not receive the beacon delete complete event before the timeout of {}s'.format(
- kwargs.get('timeout', default_event_wait)
+ ret["result"] = False
+ ret[
+ "comment"
+ ] = "Did not receive the beacon delete complete event before the timeout of {}s".format(
+ kwargs.get("timeout", default_event_wait)
)
except KeyError:
# Effectively a no-op, since we can't really return without an event system
- ret['comment'] = 'Event module not available. Beacon add failed.'
+ ret["comment"] = "Event module not available. Beacon add failed."
return ret
def save(**kwargs):
- '''
+ """
Save all beacons on the minion
:return: Boolean and status message on success or failure of save.
@@ -385,36 +436,38 @@ def save(**kwargs):
.. code-block:: bash
salt '*' beacons.save
- '''
+ """
- ret = {'comment': [],
- 'result': True}
+ ret = {"comment": [], "result": True}
beacons = list_(return_yaml=False, include_pillar=False, **kwargs)
# move this file into an configurable opt
- sfn = os.path.join(os.path.dirname(__opts__['conf_file']),
- os.path.dirname(__opts__['default_include']),
- 'beacons.conf')
+ sfn = os.path.join(
+ os.path.dirname(__opts__["conf_file"]),
+ os.path.dirname(__opts__["default_include"]),
+ "beacons.conf",
+ )
if beacons:
- tmp = {'beacons': beacons}
+ tmp = {"beacons": beacons}
yaml_out = salt.utils.yaml.safe_dump(tmp, default_flow_style=False)
else:
- yaml_out = ''
+ yaml_out = ""
try:
- with salt.utils.files.fopen(sfn, 'w+') as fp_:
+ with salt.utils.files.fopen(sfn, "w+") as fp_:
fp_.write(yaml_out)
- ret['comment'] = 'Beacons saved to {0}.'.format(sfn)
+ ret["comment"] = "Beacons saved to {0}.".format(sfn)
except (IOError, OSError):
- ret['comment'] = 'Unable to write to beacons file at {0}. Check ' \
- 'permissions.'.format(sfn)
- ret['result'] = False
+ ret[
+ "comment"
+ ] = "Unable to write to beacons file at {0}. Check " "permissions.".format(sfn)
+ ret["result"] = False
return ret
def enable(**kwargs):
- '''
+ """
Enable all beacons on the minion
Returns:
@@ -425,43 +478,47 @@ def enable(**kwargs):
.. code-block:: bash
salt '*' beacons.enable
- '''
+ """
- ret = {'comment': [],
- 'result': True}
+ ret = {"comment": [], "result": True}
- if 'test' in kwargs and kwargs['test']:
- ret['comment'] = 'Beacons would be enabled.'
+ if "test" in kwargs and kwargs["test"]:
+ ret["comment"] = "Beacons would be enabled."
else:
try:
- with salt.utils.event.get_event('minion', opts=__opts__, listen=True) as event_bus:
- res = __salt__['event.fire']({'func': 'enable'}, 'manage_beacons')
+ with salt.utils.event.get_event(
+ "minion", opts=__opts__, listen=True
+ ) as event_bus:
+ res = __salt__["event.fire"]({"func": "enable"}, "manage_beacons")
if res:
event_ret = event_bus.get_event(
- tag='/salt/minion/minion_beacons_enabled_complete',
- wait=kwargs.get('timeout', default_event_wait))
- if event_ret and event_ret['complete']:
- beacons = event_ret['beacons']
- if 'enabled' in beacons and beacons['enabled']:
- ret['result'] = True
- ret['comment'] = 'Enabled beacons on minion.'
+ tag="/salt/minion/minion_beacons_enabled_complete",
+ wait=kwargs.get("timeout", default_event_wait),
+ )
+ if event_ret and event_ret["complete"]:
+ beacons = event_ret["beacons"]
+ if "enabled" in beacons and beacons["enabled"]:
+ ret["result"] = True
+ ret["comment"] = "Enabled beacons on minion."
elif event_ret:
- ret['result'] = False
- ret['comment'] = 'Failed to enable beacons on minion.'
+ ret["result"] = False
+ ret["comment"] = "Failed to enable beacons on minion."
else:
- ret['result'] = False
- ret['comment'] = 'Did not receive the beacon enabled complete event before the timeout of {}s'.format(
- kwargs.get('timeout', default_event_wait)
+ ret["result"] = False
+ ret[
+ "comment"
+ ] = "Did not receive the beacon enabled complete event before the timeout of {}s".format(
+ kwargs.get("timeout", default_event_wait)
)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
- ret['comment'] = 'Event module not available. Beacons enable job failed.'
+ ret["comment"] = "Event module not available. Beacons enable job failed."
return ret
def disable(**kwargs):
- '''
+ """
Disable all beacons jobs on the minion
:return: Boolean and status message on success or failure of disable.
@@ -471,39 +528,43 @@ def disable(**kwargs):
.. code-block:: bash
salt '*' beacons.disable
- '''
+ """
- ret = {'comment': [],
- 'result': True}
+ ret = {"comment": [], "result": True}
- if 'test' in kwargs and kwargs['test']:
- ret['comment'] = 'Beacons would be disabled.'
+ if "test" in kwargs and kwargs["test"]:
+ ret["comment"] = "Beacons would be disabled."
else:
try:
- with salt.utils.event.get_event('minion', opts=__opts__, listen=True) as event_bus:
- res = __salt__['event.fire']({'func': 'disable'}, 'manage_beacons')
+ with salt.utils.event.get_event(
+ "minion", opts=__opts__, listen=True
+ ) as event_bus:
+ res = __salt__["event.fire"]({"func": "disable"}, "manage_beacons")
if res:
event_ret = event_bus.get_event(
- tag='/salt/minion/minion_beacons_disabled_complete',
- wait=kwargs.get('timeout', default_event_wait))
- log.debug('event_ret %s', event_ret)
- if event_ret and event_ret['complete']:
- beacons = event_ret['beacons']
- if 'enabled' in beacons and not beacons['enabled']:
- ret['result'] = True
- ret['comment'] = 'Disabled beacons on minion.'
+ tag="/salt/minion/minion_beacons_disabled_complete",
+ wait=kwargs.get("timeout", default_event_wait),
+ )
+ log.debug("event_ret %s", event_ret)
+ if event_ret and event_ret["complete"]:
+ beacons = event_ret["beacons"]
+ if "enabled" in beacons and not beacons["enabled"]:
+ ret["result"] = True
+ ret["comment"] = "Disabled beacons on minion."
elif event_ret:
- ret['result'] = False
- ret['comment'] = 'Failed to disable beacons on minion.'
+ ret["result"] = False
+ ret["comment"] = "Failed to disable beacons on minion."
else:
- ret['result'] = False
- ret['comment'] = 'Did not receive the beacon disabled complete event before the timeout of {}s'.format(
- kwargs.get('timeout', default_event_wait)
+ ret["result"] = False
+ ret[
+ "comment"
+ ] = "Did not receive the beacon disabled complete event before the timeout of {}s".format(
+ kwargs.get("timeout", default_event_wait)
)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
- ret['comment'] = 'Event module not available. Beacons enable job failed.'
+ ret["comment"] = "Event module not available. Beacons enable job failed."
return ret
@@ -518,7 +579,7 @@ def _get_beacon_config_dict(beacon_config):
def enable_beacon(name, **kwargs):
- '''
+ """
Enable beacon on the minion
:name: Name of the beacon to enable.
@@ -529,59 +590,72 @@ def enable_beacon(name, **kwargs):
.. code-block:: bash
salt '*' beacons.enable_beacon ps
- '''
+ """
- ret = {'comment': [],
- 'result': True}
+ ret = {"comment": [], "result": True}
if not name:
- ret['comment'] = 'Beacon name is required.'
- ret['result'] = False
+ ret["comment"] = "Beacon name is required."
+ ret["result"] = False
return ret
- if 'test' in kwargs and kwargs['test']:
- ret['comment'] = 'Beacon {0} would be enabled.'.format(name)
+ if "test" in kwargs and kwargs["test"]:
+ ret["comment"] = "Beacon {0} would be enabled.".format(name)
else:
_beacons = list_(return_yaml=False, **kwargs)
if name not in _beacons:
- ret['comment'] = 'Beacon {0} is not currently configured.'.format(name)
- ret['result'] = False
+ ret["comment"] = "Beacon {0} is not currently configured.".format(name)
+ ret["result"] = False
return ret
try:
- with salt.utils.event.get_event('minion', opts=__opts__, listen=True) as event_bus:
- res = __salt__['event.fire']({'func': 'enable_beacon', 'name': name}, 'manage_beacons')
+ with salt.utils.event.get_event(
+ "minion", opts=__opts__, listen=True
+ ) as event_bus:
+ res = __salt__["event.fire"](
+ {"func": "enable_beacon", "name": name}, "manage_beacons"
+ )
if res:
event_ret = event_bus.get_event(
- tag='/salt/minion/minion_beacon_enabled_complete',
- wait=kwargs.get('timeout', default_event_wait))
- if event_ret and event_ret['complete']:
- beacons = event_ret['beacons']
+ tag="/salt/minion/minion_beacon_enabled_complete",
+ wait=kwargs.get("timeout", default_event_wait),
+ )
+ if event_ret and event_ret["complete"]:
+ beacons = event_ret["beacons"]
beacon_config_dict = _get_beacon_config_dict(beacons[name])
- if 'enabled' in beacon_config_dict and beacon_config_dict['enabled']:
- ret['result'] = True
- ret['comment'] = 'Enabled beacon {0} on minion.'.format(name)
+ if (
+ "enabled" in beacon_config_dict
+ and beacon_config_dict["enabled"]
+ ):
+ ret["result"] = True
+ ret["comment"] = "Enabled beacon {0} on minion.".format(
+ name
+ )
else:
- ret['result'] = False
- ret['comment'] = 'Failed to enable beacon {0} on minion.'.format(name)
+ ret["result"] = False
+ ret[
+ "comment"
+ ] = "Failed to enable beacon {0} on minion.".format(name)
elif event_ret:
- ret['result'] = False
- ret['comment'] = event_ret['comment']
+ ret["result"] = False
+ ret["comment"] = event_ret["comment"]
else:
- ret['result'] = False
- ret['comment'] = 'Did not receive the beacon enabled complete event before the timeout of {}s'.format(
- kwargs.get('timeout', default_event_wait)
+ ret["result"] = False
+ ret[
+ "comment"
+ ] = "Did not receive the beacon enabled complete event before the timeout of {}s".format(
+ kwargs.get("timeout", default_event_wait)
)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
- ret['comment'] = 'Event module not available. Beacon enable job failed.'
+ ret["comment"] = "Event module not available. Beacon enable job failed."
return ret
def disable_beacon(name, **kwargs):
- '''
+ """
Disable beacon on the minion
:name: Name of the beacon to disable.
@@ -592,59 +666,70 @@ def disable_beacon(name, **kwargs):
.. code-block:: bash
salt '*' beacons.disable_beacon ps
- '''
+ """
- ret = {'comment': [],
- 'result': True}
+ ret = {"comment": [], "result": True}
if not name:
- ret['comment'] = 'Beacon name is required.'
- ret['result'] = False
+ ret["comment"] = "Beacon name is required."
+ ret["result"] = False
return ret
- if 'test' in kwargs and kwargs['test']:
- ret['comment'] = 'Beacons would be enabled.'
+ if "test" in kwargs and kwargs["test"]:
+ ret["comment"] = "Beacons would be enabled."
else:
_beacons = list_(return_yaml=False, **kwargs)
if name not in _beacons:
- ret['comment'] = 'Beacon {0} is not currently configured.'.format(name)
- ret['result'] = False
+ ret["comment"] = "Beacon {0} is not currently configured.".format(name)
+ ret["result"] = False
return ret
try:
- with salt.utils.event.get_event('minion', opts=__opts__, listen=True) as event_bus:
- res = __salt__['event.fire']({'func': 'disable_beacon', 'name': name}, 'manage_beacons')
+ with salt.utils.event.get_event(
+ "minion", opts=__opts__, listen=True
+ ) as event_bus:
+ res = __salt__["event.fire"](
+ {"func": "disable_beacon", "name": name}, "manage_beacons"
+ )
if res:
event_ret = event_bus.get_event(
- tag='/salt/minion/minion_beacon_disabled_complete',
- wait=kwargs.get('timeout', default_event_wait))
- if event_ret and event_ret['complete']:
- beacons = event_ret['beacons']
+ tag="/salt/minion/minion_beacon_disabled_complete",
+ wait=kwargs.get("timeout", default_event_wait),
+ )
+ if event_ret and event_ret["complete"]:
+ beacons = event_ret["beacons"]
beacon_config_dict = _get_beacon_config_dict(beacons[name])
- if 'enabled' in beacon_config_dict and not beacon_config_dict['enabled']:
- ret['result'] = True
- ret['comment'] = 'Disabled beacon {0} on minion.'.format(name)
+ if (
+ "enabled" in beacon_config_dict
+ and not beacon_config_dict["enabled"]
+ ):
+ ret["result"] = True
+ ret["comment"] = "Disabled beacon {0} on minion.".format(
+ name
+ )
else:
- ret['result'] = False
- ret['comment'] = 'Failed to disable beacon on minion.'
+ ret["result"] = False
+ ret["comment"] = "Failed to disable beacon on minion."
elif event_ret:
- ret['result'] = False
- ret['comment'] = event_ret['comment']
+ ret["result"] = False
+ ret["comment"] = event_ret["comment"]
else:
- ret['result'] = False
- ret['comment'] = 'Did not receive the beacon disabled complete event before the timeout of {}s'.format(
- kwargs.get('timeout', default_event_wait)
+ ret["result"] = False
+ ret[
+ "comment"
+ ] = "Did not receive the beacon disabled complete event before the timeout of {}s".format(
+ kwargs.get("timeout", default_event_wait)
)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
- ret['comment'] = 'Event module not available. Beacon disable job failed.'
+ ret["comment"] = "Event module not available. Beacon disable job failed."
return ret
def reset(**kwargs):
- '''
+ """
Resest beacon configuration on the minion
CLI Example:
@@ -652,32 +737,34 @@ def reset(**kwargs):
.. code-block:: bash
salt '*' beacons.reset
- '''
+ """
- ret = {'comment': [],
- 'result': True}
+ ret = {"comment": [], "result": True}
- if kwargs.get('test'):
- ret['comment'] = 'Beacons would be reset.'
+ if kwargs.get("test"):
+ ret["comment"] = "Beacons would be reset."
else:
try:
- with salt.utils.event.get_event('minion', opts=__opts__, listen=True) as event_bus:
- res = __salt__['event.fire']({'func': 'reset'}, 'manage_beacons')
+ with salt.utils.event.get_event(
+ "minion", opts=__opts__, listen=True
+ ) as event_bus:
+ res = __salt__["event.fire"]({"func": "reset"}, "manage_beacons")
if res:
event_ret = event_bus.get_event(
- tag='/salt/minion/minion_beacon_reset_complete',
- wait=kwargs.get('timeout', default_event_wait))
- if event_ret and event_ret['complete']:
- ret['result'] = True
- ret['comment'] = 'Beacon configuration reset.'
+ tag="/salt/minion/minion_beacon_reset_complete",
+ wait=kwargs.get("timeout", default_event_wait),
+ )
+ if event_ret and event_ret["complete"]:
+ ret["result"] = True
+ ret["comment"] = "Beacon configuration reset."
else:
- ret['result'] = False
+ ret["result"] = False
if ret is not None:
- ret['comment'] = event_ret['comment']
+ ret["comment"] = event_ret["comment"]
else:
- ret['comment'] = 'Beacon reset event never received'
+ ret["comment"] = "Beacon reset event never received"
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
- ret['comment'] = 'Event module not available. Beacon disable job failed.'
+ ret["comment"] = "Event module not available. Beacon disable job failed."
return ret
diff --git a/salt/modules/bigip.py b/salt/modules/bigip.py
index 3737bf2ad95..2b54e4d27c5 100644
--- a/salt/modules/bigip.py
+++ b/salt/modules/bigip.py
@@ -1,102 +1,114 @@
# -*- coding: utf-8 -*-
-'''
+"""
An execution module which can manipulate an f5 bigip via iControl REST
:maturity: develop
:platform: f5_bigip_11.6
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
+# Import salt libs
+import salt.exceptions
import salt.utils.json
+# Import 3rd-party libs
+from salt.ext import six
+
# Import third party libs
try:
import requests
import requests.exceptions
+
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
-# Import 3rd-party libs
-from salt.ext import six
-
-# Import salt libs
-import salt.exceptions
# Define the module's virtual name
-__virtualname__ = 'bigip'
+__virtualname__ = "bigip"
def __virtual__():
- '''
+ """
Only return if requests is installed
- '''
+ """
if HAS_LIBS:
return __virtualname__
- return (False, 'The bigip execution module cannot be loaded: '
- 'python requests library not available.')
+ return (
+ False,
+ "The bigip execution module cannot be loaded: "
+ "python requests library not available.",
+ )
-BIG_IP_URL_BASE = 'https://{host}/mgmt/tm'
+BIG_IP_URL_BASE = "https://{host}/mgmt/tm"
def _build_session(username, password, trans_label=None):
- '''
+ """
Create a session to be used when connecting to iControl REST.
- '''
+ """
bigip = requests.session()
bigip.auth = (username, password)
bigip.verify = False
- bigip.headers.update({'Content-Type': 'application/json'})
+ bigip.headers.update({"Content-Type": "application/json"})
if trans_label:
- #pull the trans id from the grain
- trans_id = __salt__['grains.get']('bigip_f5_trans:{label}'.format(label=trans_label))
+ # pull the trans id from the grain
+ trans_id = __salt__["grains.get"](
+ "bigip_f5_trans:{label}".format(label=trans_label)
+ )
if trans_id:
- bigip.headers.update({'X-F5-REST-Coordination-Id': trans_id})
+ bigip.headers.update({"X-F5-REST-Coordination-Id": trans_id})
else:
- bigip.headers.update({'X-F5-REST-Coordination-Id': None})
+ bigip.headers.update({"X-F5-REST-Coordination-Id": None})
return bigip
def _load_response(response):
- '''
+ """
Load the response from json data, return the dictionary or raw text
- '''
+ """
try:
data = salt.utils.json.loads(response.text)
except ValueError:
data = response.text
- ret = {'code': response.status_code, 'content': data}
+ ret = {"code": response.status_code, "content": data}
return ret
def _load_connection_error(hostname, error):
- '''
+ """
Format and Return a connection error
- '''
+ """
- ret = {'code': None, 'content': 'Error: Unable to connect to the bigip device: {host}\n{error}'.format(host=hostname, error=error)}
+ ret = {
+ "code": None,
+ "content": "Error: Unable to connect to the bigip device: {host}\n{error}".format(
+ host=hostname, error=error
+ ),
+ }
return ret
def _loop_payload(params):
- '''
+ """
Pass in a dictionary of parameters, loop through them and build a payload containing,
parameters who's values are not None.
- '''
+ """
- #construct the payload
+ # construct the payload
payload = {}
- #set the payload
+ # set the payload
for param, value in six.iteritems(params):
if value is not None:
payload[param] = value
@@ -105,22 +117,22 @@ def _loop_payload(params):
def _build_list(option_value, item_kind):
- '''
+ """
pass in an option to check for a list of items, create a list of dictionary of items to set
for this option
- '''
- #specify profiles if provided
+ """
+ # specify profiles if provided
if option_value is not None:
items = []
- #if user specified none, return an empty list
- if option_value == 'none':
+ # if user specified none, return an empty list
+ if option_value == "none":
return items
- #was a list already passed in?
+ # was a list already passed in?
if not isinstance(option_value, list):
- values = option_value.split(',')
+ values = option_value.split(",")
else:
values = option_value
@@ -130,97 +142,105 @@ def _build_list(option_value, item_kind):
items.append(value)
# other times it's picky and likes key value pairs...
else:
- items.append({'kind': item_kind, 'name': value})
+ items.append({"kind": item_kind, "name": value})
return items
return None
def _determine_toggles(payload, toggles):
- '''
+ """
BigIP can't make up its mind if it likes yes / no or true or false.
Figure out what it likes to hear without confusing the user.
- '''
+ """
for toggle, definition in six.iteritems(toggles):
- #did the user specify anything?
- if definition['value'] is not None:
- #test for yes_no toggle
- if (definition['value'] is True or definition['value'] == 'yes') and definition['type'] == 'yes_no':
- payload[toggle] = 'yes'
- elif (definition['value'] is False or definition['value'] == 'no') and definition['type'] == 'yes_no':
- payload[toggle] = 'no'
+ # did the user specify anything?
+ if definition["value"] is not None:
+ # test for yes_no toggle
+ if (
+ definition["value"] is True or definition["value"] == "yes"
+ ) and definition["type"] == "yes_no":
+ payload[toggle] = "yes"
+ elif (
+ definition["value"] is False or definition["value"] == "no"
+ ) and definition["type"] == "yes_no":
+ payload[toggle] = "no"
- #test for true_false toggle
- if (definition['value'] is True or definition['value'] == 'yes') and definition['type'] == 'true_false':
+ # test for true_false toggle
+ if (
+ definition["value"] is True or definition["value"] == "yes"
+ ) and definition["type"] == "true_false":
payload[toggle] = True
- elif (definition['value'] is False or definition['value'] == 'no') and definition['type'] == 'true_false':
+ elif (
+ definition["value"] is False or definition["value"] == "no"
+ ) and definition["type"] == "true_false":
payload[toggle] = False
return payload
def _set_value(value):
- '''
+ """
A function to detect if user is trying to pass a dictionary or list. parse it and return a
dictionary list or a string
- '''
- #don't continue if already an acceptable data-type
+ """
+ # don't continue if already an acceptable data-type
if isinstance(value, bool) or isinstance(value, dict) or isinstance(value, list):
return value
- #check if json
- if value.startswith('j{') and value.endswith('}j'):
+ # check if json
+ if value.startswith("j{") and value.endswith("}j"):
- value = value.replace('j{', '{')
- value = value.replace('}j', '}')
+ value = value.replace("j{", "{")
+ value = value.replace("}j", "}")
try:
return salt.utils.json.loads(value)
except Exception: # pylint: disable=broad-except
raise salt.exceptions.CommandExecutionError
- #detect list of dictionaries
- if '|' in value and r'\|' not in value:
- values = value.split('|')
+ # detect list of dictionaries
+ if "|" in value and r"\|" not in value:
+ values = value.split("|")
items = []
for value in values:
items.append(_set_value(value))
return items
- #parse out dictionary if detected
- if ':' in value and r'\:' not in value:
+ # parse out dictionary if detected
+ if ":" in value and r"\:" not in value:
options = {}
- #split out pairs
- key_pairs = value.split(',')
+ # split out pairs
+ key_pairs = value.split(",")
for key_pair in key_pairs:
- k = key_pair.split(':')[0]
- v = key_pair.split(':')[1]
+ k = key_pair.split(":")[0]
+ v = key_pair.split(":")[1]
options[k] = v
return options
- #try making a list
- elif ',' in value and r'\,' not in value:
- value_items = value.split(',')
+ # try making a list
+ elif "," in value and r"\," not in value:
+ value_items = value.split(",")
return value_items
- #just return a string
+ # just return a string
else:
- #remove escape chars if added
- if r'\|' in value:
- value = value.replace(r'\|', '|')
+ # remove escape chars if added
+ if r"\|" in value:
+ value = value.replace(r"\|", "|")
- if r'\:' in value:
- value = value.replace(r'\:', ':')
+ if r"\:" in value:
+ value = value.replace(r"\:", ":")
- if r'\,' in value:
- value = value.replace(r'\,', ',')
+ if r"\," in value:
+ value = value.replace(r"\,", ",")
return value
def start_transaction(hostname, username, password, label):
- '''
+ """
A function to connect to a bigip device and start a new transaction.
hostname
@@ -237,39 +257,40 @@ def start_transaction(hostname, username, password, label):
salt '*' bigip.start_transaction bigip admin admin my_transaction
- '''
+ """
- #build the session
+ # build the session
bigip_session = _build_session(username, password)
payload = {}
- #post to REST to get trans id
+ # post to REST to get trans id
try:
response = bigip_session.post(
- BIG_IP_URL_BASE.format(host=hostname) + '/transaction',
- data=salt.utils.json.dumps(payload)
+ BIG_IP_URL_BASE.format(host=hostname) + "/transaction",
+ data=salt.utils.json.dumps(payload),
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
- #extract the trans_id
+ # extract the trans_id
data = _load_response(response)
- if data['code'] == 200:
+ if data["code"] == 200:
- trans_id = data['content']['transId']
+ trans_id = data["content"]["transId"]
- __salt__['grains.setval']('bigip_f5_trans', {label: trans_id})
+ __salt__["grains.setval"]("bigip_f5_trans", {label: trans_id})
- return 'Transaction: {trans_id} - has successfully been stored in the grain: bigip_f5_trans:{label}'.format(trans_id=trans_id,
- label=label)
+ return "Transaction: {trans_id} - has successfully been stored in the grain: bigip_f5_trans:{label}".format(
+ trans_id=trans_id, label=label
+ )
else:
return data
def list_transaction(hostname, username, password, label):
- '''
+ """
A function to connect to a bigip device and list an existing transaction.
hostname
@@ -286,29 +307,34 @@ def list_transaction(hostname, username, password, label):
salt '*' bigip.list_transaction bigip admin admin my_transaction
- '''
+ """
- #build the session
+ # build the session
bigip_session = _build_session(username, password)
- #pull the trans id from the grain
- trans_id = __salt__['grains.get']('bigip_f5_trans:{label}'.format(label=label))
+ # pull the trans id from the grain
+ trans_id = __salt__["grains.get"]("bigip_f5_trans:{label}".format(label=label))
if trans_id:
- #post to REST to get trans id
+ # post to REST to get trans id
try:
- response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/transaction/{trans_id}/commands'.format(trans_id=trans_id))
+ response = bigip_session.get(
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/transaction/{trans_id}/commands".format(trans_id=trans_id)
+ )
return _load_response(response)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
else:
- return 'Error: the label for this transaction was not defined as a grain. Begin a new transaction using the' \
- ' bigip.start_transaction function'
+ return (
+ "Error: the label for this transaction was not defined as a grain. Begin a new transaction using the"
+ " bigip.start_transaction function"
+ )
def commit_transaction(hostname, username, password, label):
- '''
+ """
A function to connect to a bigip device and commit an existing transaction.
hostname
@@ -324,35 +350,38 @@ def commit_transaction(hostname, username, password, label):
CLI Example::
salt '*' bigip.commit_transaction bigip admin admin my_transaction
- '''
+ """
- #build the session
+ # build the session
bigip_session = _build_session(username, password)
- #pull the trans id from the grain
- trans_id = __salt__['grains.get']('bigip_f5_trans:{label}'.format(label=label))
+ # pull the trans id from the grain
+ trans_id = __salt__["grains.get"]("bigip_f5_trans:{label}".format(label=label))
if trans_id:
payload = {}
- payload['state'] = 'VALIDATING'
+ payload["state"] = "VALIDATING"
- #patch to REST to get trans id
+ # patch to REST to get trans id
try:
response = bigip_session.patch(
- BIG_IP_URL_BASE.format(host=hostname) + '/transaction/{trans_id}'.format(trans_id=trans_id),
- data=salt.utils.json.dumps(payload)
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/transaction/{trans_id}".format(trans_id=trans_id),
+ data=salt.utils.json.dumps(payload),
)
return _load_response(response)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
else:
- return 'Error: the label for this transaction was not defined as a grain. Begin a new transaction using the' \
- ' bigip.start_transaction function'
+ return (
+ "Error: the label for this transaction was not defined as a grain. Begin a new transaction using the"
+ " bigip.start_transaction function"
+ )
def delete_transaction(hostname, username, password, label):
- '''
+ """
A function to connect to a bigip device and delete an existing transaction.
hostname
@@ -368,29 +397,34 @@ def delete_transaction(hostname, username, password, label):
CLI Example::
salt '*' bigip.delete_transaction bigip admin admin my_transaction
- '''
+ """
- #build the session
+ # build the session
bigip_session = _build_session(username, password)
- #pull the trans id from the grain
- trans_id = __salt__['grains.get']('bigip_f5_trans:{label}'.format(label=label))
+ # pull the trans id from the grain
+ trans_id = __salt__["grains.get"]("bigip_f5_trans:{label}".format(label=label))
if trans_id:
- #patch to REST to get trans id
+ # patch to REST to get trans id
try:
- response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/transaction/{trans_id}'.format(trans_id=trans_id))
+ response = bigip_session.delete(
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/transaction/{trans_id}".format(trans_id=trans_id)
+ )
return _load_response(response)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
else:
- return 'Error: the label for this transaction was not defined as a grain. Begin a new transaction using the' \
- ' bigip.start_transaction function'
+ return (
+ "Error: the label for this transaction was not defined as a grain. Begin a new transaction using the"
+ " bigip.start_transaction function"
+ )
def list_node(hostname, username, password, name=None, trans_label=None):
- '''
+ """
A function to connect to a bigip device and list all nodes or a specific node.
@@ -410,17 +444,22 @@ def list_node(hostname, username, password, name=None, trans_label=None):
CLI Example::
salt '*' bigip.list_node bigip admin admin my-node
- '''
+ """
- #build sessions
+ # build sessions
bigip_session = _build_session(username, password, trans_label)
- #get to REST
+ # get to REST
try:
if name:
- response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/node/{name}'.format(name=name))
+ response = bigip_session.get(
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/node/{name}".format(name=name)
+ )
else:
- response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/node')
+ response = bigip_session.get(
+ BIG_IP_URL_BASE.format(host=hostname) + "/ltm/node"
+ )
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
@@ -428,7 +467,7 @@ def list_node(hostname, username, password, name=None, trans_label=None):
def create_node(hostname, username, password, name, address, trans_label=None):
- '''
+ """
A function to connect to a bigip device and create a node.
hostname
@@ -448,39 +487,45 @@ def create_node(hostname, username, password, name, address, trans_label=None):
CLI Example::
salt '*' bigip.create_node bigip admin admin 10.1.1.2
- '''
+ """
- #build session
+ # build session
bigip_session = _build_session(username, password, trans_label)
- #construct the payload
+ # construct the payload
payload = {}
- payload['name'] = name
- payload['address'] = address
+ payload["name"] = name
+ payload["address"] = address
- #post to REST
+ # post to REST
try:
response = bigip_session.post(
- BIG_IP_URL_BASE.format(host=hostname) + '/ltm/node',
- data=salt.utils.json.dumps(payload))
+ BIG_IP_URL_BASE.format(host=hostname) + "/ltm/node",
+ data=salt.utils.json.dumps(payload),
+ )
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
-def modify_node(hostname, username, password, name,
- connection_limit=None,
- description=None,
- dynamic_ratio=None,
- logging=None,
- monitor=None,
- rate_limit=None,
- ratio=None,
- session=None,
- state=None,
- trans_label=None):
- '''
+def modify_node(
+ hostname,
+ username,
+ password,
+ name,
+ connection_limit=None,
+ description=None,
+ dynamic_ratio=None,
+ logging=None,
+ monitor=None,
+ rate_limit=None,
+ ratio=None,
+ session=None,
+ state=None,
+ trans_label=None,
+):
+ """
A function to connect to a bigip device and modify an existing node.
hostname
@@ -516,32 +561,33 @@ def modify_node(hostname, username, password, name,
CLI Example::
salt '*' bigip.modify_node bigip admin admin 10.1.1.2 ratio=2 logging=enabled
- '''
+ """
params = {
- 'connection-limit': connection_limit,
- 'description': description,
- 'dynamic-ratio': dynamic_ratio,
- 'logging': logging,
- 'monitor': monitor,
- 'rate-limit': rate_limit,
- 'ratio': ratio,
- 'session': session,
- 'state': state,
+ "connection-limit": connection_limit,
+ "description": description,
+ "dynamic-ratio": dynamic_ratio,
+ "logging": logging,
+ "monitor": monitor,
+ "rate-limit": rate_limit,
+ "ratio": ratio,
+ "session": session,
+ "state": state,
}
- #build session
+ # build session
bigip_session = _build_session(username, password, trans_label)
- #build payload
+ # build payload
payload = _loop_payload(params)
- payload['name'] = name
+ payload["name"] = name
- #put to REST
+ # put to REST
try:
response = bigip_session.put(
- BIG_IP_URL_BASE.format(host=hostname) + '/ltm/node/{name}'.format(name=name),
- data=salt.utils.json.dumps(payload)
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/node/{name}".format(name=name),
+ data=salt.utils.json.dumps(payload),
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
@@ -550,7 +596,7 @@ def modify_node(hostname, username, password, name,
def delete_node(hostname, username, password, name, trans_label=None):
- '''
+ """
A function to connect to a bigip device and delete a specific node.
hostname
@@ -568,25 +614,27 @@ def delete_node(hostname, username, password, name, trans_label=None):
CLI Example::
salt '*' bigip.delete_node bigip admin admin my-node
- '''
+ """
- #build session
+ # build session
bigip_session = _build_session(username, password, trans_label)
- #delete to REST
+ # delete to REST
try:
- response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/node/{name}'.format(name=name))
+ response = bigip_session.delete(
+ BIG_IP_URL_BASE.format(host=hostname) + "/ltm/node/{name}".format(name=name)
+ )
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
- if _load_response(response) == '':
+ if _load_response(response) == "":
return True
else:
return _load_response(response)
def list_pool(hostname, username, password, name=None):
- '''
+ """
A function to connect to a bigip device and list all pools or a specific pool.
hostname
@@ -602,47 +650,58 @@ def list_pool(hostname, username, password, name=None):
CLI Example::
salt '*' bigip.list_pool bigip admin admin my-pool
- '''
+ """
- #build sessions
+ # build sessions
bigip_session = _build_session(username, password)
- #get to REST
+ # get to REST
try:
if name:
- response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}/?expandSubcollections=true'.format(name=name))
+ response = bigip_session.get(
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/pool/{name}/?expandSubcollections=true".format(name=name)
+ )
else:
- response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool')
+ response = bigip_session.get(
+ BIG_IP_URL_BASE.format(host=hostname) + "/ltm/pool"
+ )
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
-def create_pool(hostname, username, password, name, members=None,
- allow_nat=None,
- allow_snat=None,
- description=None,
- gateway_failsafe_device=None,
- ignore_persisted_weight=None,
- ip_tos_to_client=None,
- ip_tos_to_server=None,
- link_qos_to_client=None,
- link_qos_to_server=None,
- load_balancing_mode=None,
- min_active_members=None,
- min_up_members=None,
- min_up_members_action=None,
- min_up_members_checking=None,
- monitor=None,
- profiles=None,
- queue_depth_limit=None,
- queue_on_connection_limit=None,
- queue_time_limit=None,
- reselect_tries=None,
- service_down_action=None,
- slow_ramp_time=None):
- '''
+def create_pool(
+ hostname,
+ username,
+ password,
+ name,
+ members=None,
+ allow_nat=None,
+ allow_snat=None,
+ description=None,
+ gateway_failsafe_device=None,
+ ignore_persisted_weight=None,
+ ip_tos_to_client=None,
+ ip_tos_to_server=None,
+ link_qos_to_client=None,
+ link_qos_to_server=None,
+ load_balancing_mode=None,
+ min_active_members=None,
+ min_up_members=None,
+ min_up_members_action=None,
+ min_up_members_checking=None,
+ monitor=None,
+ profiles=None,
+ queue_depth_limit=None,
+ queue_on_connection_limit=None,
+ queue_time_limit=None,
+ reselect_tries=None,
+ service_down_action=None,
+ slow_ramp_time=None,
+):
+ """
A function to connect to a bigip device and create a pool.
hostname
@@ -715,57 +774,57 @@ def create_pool(hostname, username, password, name, members=None,
CLI Example::
salt '*' bigip.create_pool bigip admin admin my-pool 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80 monitor=http
- '''
+ """
params = {
- 'description': description,
- 'gateway-failsafe-device': gateway_failsafe_device,
- 'ignore-persisted-weight': ignore_persisted_weight,
- 'ip-tos-to-client': ip_tos_to_client,
- 'ip-tos-to-server': ip_tos_to_server,
- 'link-qos-to-client': link_qos_to_client,
- 'link-qos-to-server': link_qos_to_server,
- 'load-balancing-mode': load_balancing_mode,
- 'min-active-members': min_active_members,
- 'min-up-members': min_up_members,
- 'min-up-members-action': min_up_members_action,
- 'min-up-members-checking': min_up_members_checking,
- 'monitor': monitor,
- 'profiles': profiles,
- 'queue-on-connection-limit': queue_on_connection_limit,
- 'queue-depth-limit': queue_depth_limit,
- 'queue-time-limit': queue_time_limit,
- 'reselect-tries': reselect_tries,
- 'service-down-action': service_down_action,
- 'slow-ramp-time': slow_ramp_time
+ "description": description,
+ "gateway-failsafe-device": gateway_failsafe_device,
+ "ignore-persisted-weight": ignore_persisted_weight,
+ "ip-tos-to-client": ip_tos_to_client,
+ "ip-tos-to-server": ip_tos_to_server,
+ "link-qos-to-client": link_qos_to_client,
+ "link-qos-to-server": link_qos_to_server,
+ "load-balancing-mode": load_balancing_mode,
+ "min-active-members": min_active_members,
+ "min-up-members": min_up_members,
+ "min-up-members-action": min_up_members_action,
+ "min-up-members-checking": min_up_members_checking,
+ "monitor": monitor,
+ "profiles": profiles,
+ "queue-on-connection-limit": queue_on_connection_limit,
+ "queue-depth-limit": queue_depth_limit,
+ "queue-time-limit": queue_time_limit,
+ "reselect-tries": reselect_tries,
+ "service-down-action": service_down_action,
+ "slow-ramp-time": slow_ramp_time,
}
# some options take yes no others take true false. Figure out when to use which without
# confusing the end user
toggles = {
- 'allow-nat': {'type': 'yes_no', 'value': allow_nat},
- 'allow-snat': {'type': 'yes_no', 'value': allow_snat}
+ "allow-nat": {"type": "yes_no", "value": allow_nat},
+ "allow-snat": {"type": "yes_no", "value": allow_snat},
}
- #build payload
+ # build payload
payload = _loop_payload(params)
- payload['name'] = name
+ payload["name"] = name
- #determine toggles
+ # determine toggles
payload = _determine_toggles(payload, toggles)
- #specify members if provided
+ # specify members if provided
if members is not None:
- payload['members'] = _build_list(members, 'ltm:pool:members')
+ payload["members"] = _build_list(members, "ltm:pool:members")
- #build session
+ # build session
bigip_session = _build_session(username, password)
- #post to REST
+ # post to REST
try:
response = bigip_session.post(
- BIG_IP_URL_BASE.format(host=hostname) + '/ltm/pool',
- data=salt.utils.json.dumps(payload)
+ BIG_IP_URL_BASE.format(host=hostname) + "/ltm/pool",
+ data=salt.utils.json.dumps(payload),
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
@@ -773,30 +832,35 @@ def create_pool(hostname, username, password, name, members=None,
return _load_response(response)
-def modify_pool(hostname, username, password, name,
- allow_nat=None,
- allow_snat=None,
- description=None,
- gateway_failsafe_device=None,
- ignore_persisted_weight=None,
- ip_tos_to_client=None,
- ip_tos_to_server=None,
- link_qos_to_client=None,
- link_qos_to_server=None,
- load_balancing_mode=None,
- min_active_members=None,
- min_up_members=None,
- min_up_members_action=None,
- min_up_members_checking=None,
- monitor=None,
- profiles=None,
- queue_depth_limit=None,
- queue_on_connection_limit=None,
- queue_time_limit=None,
- reselect_tries=None,
- service_down_action=None,
- slow_ramp_time=None):
- '''
+def modify_pool(
+ hostname,
+ username,
+ password,
+ name,
+ allow_nat=None,
+ allow_snat=None,
+ description=None,
+ gateway_failsafe_device=None,
+ ignore_persisted_weight=None,
+ ip_tos_to_client=None,
+ ip_tos_to_server=None,
+ link_qos_to_client=None,
+ link_qos_to_server=None,
+ load_balancing_mode=None,
+ min_active_members=None,
+ min_up_members=None,
+ min_up_members_action=None,
+ min_up_members_checking=None,
+ monitor=None,
+ profiles=None,
+ queue_depth_limit=None,
+ queue_on_connection_limit=None,
+ queue_time_limit=None,
+ reselect_tries=None,
+ service_down_action=None,
+ slow_ramp_time=None,
+):
+ """
A function to connect to a bigip device and modify an existing pool.
hostname
@@ -866,53 +930,54 @@ def modify_pool(hostname, username, password, name,
CLI Example::
salt '*' bigip.modify_pool bigip admin admin my-pool 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80 min_active_members=1
- '''
+ """
params = {
- 'description': description,
- 'gateway-failsafe-device': gateway_failsafe_device,
- 'ignore-persisted-weight': ignore_persisted_weight,
- 'ip-tos-to-client': ip_tos_to_client,
- 'ip-tos-to-server': ip_tos_to_server,
- 'link-qos-to-client': link_qos_to_client,
- 'link-qos-to-server': link_qos_to_server,
- 'load-balancing-mode': load_balancing_mode,
- 'min-active-members': min_active_members,
- 'min-up-members': min_up_members,
- 'min-up_members-action': min_up_members_action,
- 'min-up-members-checking': min_up_members_checking,
- 'monitor': monitor,
- 'profiles': profiles,
- 'queue-on-connection-limit': queue_on_connection_limit,
- 'queue-depth-limit': queue_depth_limit,
- 'queue-time-limit': queue_time_limit,
- 'reselect-tries': reselect_tries,
- 'service-down-action': service_down_action,
- 'slow-ramp-time': slow_ramp_time
+ "description": description,
+ "gateway-failsafe-device": gateway_failsafe_device,
+ "ignore-persisted-weight": ignore_persisted_weight,
+ "ip-tos-to-client": ip_tos_to_client,
+ "ip-tos-to-server": ip_tos_to_server,
+ "link-qos-to-client": link_qos_to_client,
+ "link-qos-to-server": link_qos_to_server,
+ "load-balancing-mode": load_balancing_mode,
+ "min-active-members": min_active_members,
+ "min-up-members": min_up_members,
+ "min-up_members-action": min_up_members_action,
+ "min-up-members-checking": min_up_members_checking,
+ "monitor": monitor,
+ "profiles": profiles,
+ "queue-on-connection-limit": queue_on_connection_limit,
+ "queue-depth-limit": queue_depth_limit,
+ "queue-time-limit": queue_time_limit,
+ "reselect-tries": reselect_tries,
+ "service-down-action": service_down_action,
+ "slow-ramp-time": slow_ramp_time,
}
# some options take yes no others take true false. Figure out when to use which without
# confusing the end user
toggles = {
- 'allow-nat': {'type': 'yes_no', 'value': allow_nat},
- 'allow-snat': {'type': 'yes_no', 'value': allow_snat}
+ "allow-nat": {"type": "yes_no", "value": allow_nat},
+ "allow-snat": {"type": "yes_no", "value": allow_snat},
}
- #build payload
+ # build payload
payload = _loop_payload(params)
- payload['name'] = name
+ payload["name"] = name
- #determine toggles
+ # determine toggles
payload = _determine_toggles(payload, toggles)
- #build session
+ # build session
bigip_session = _build_session(username, password)
- #post to REST
+ # post to REST
try:
response = bigip_session.put(
- BIG_IP_URL_BASE.format(host=hostname) + '/ltm/pool/{name}'.format(name=name),
- data=salt.utils.json.dumps(payload)
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/pool/{name}".format(name=name),
+ data=salt.utils.json.dumps(payload),
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
@@ -921,7 +986,7 @@ def modify_pool(hostname, username, password, name,
def delete_pool(hostname, username, password, name):
- '''
+ """
A function to connect to a bigip device and delete a specific pool.
hostname
@@ -936,25 +1001,27 @@ def delete_pool(hostname, username, password, name):
CLI Example::
salt '*' bigip.delete_node bigip admin admin my-pool
- '''
+ """
- #build session
+ # build session
bigip_session = _build_session(username, password)
- #delete to REST
+ # delete to REST
try:
- response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}'.format(name=name))
+ response = bigip_session.delete(
+ BIG_IP_URL_BASE.format(host=hostname) + "/ltm/pool/{name}".format(name=name)
+ )
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
- if _load_response(response) == '':
+ if _load_response(response) == "":
return True
else:
return _load_response(response)
def replace_pool_members(hostname, username, password, name, members):
- '''
+ """
A function to connect to a bigip device and replace members of an existing pool with new members.
hostname
@@ -972,47 +1039,48 @@ def replace_pool_members(hostname, username, password, name, members):
CLI Example::
salt '*' bigip.replace_pool_members bigip admin admin my-pool 10.2.2.1:80,10.2.2.2:80,10.2.2.3:80
- '''
+ """
payload = {}
- payload['name'] = name
- #specify members if provided
+ payload["name"] = name
+ # specify members if provided
if members is not None:
if isinstance(members, six.string_types):
- members = members.split(',')
+ members = members.split(",")
pool_members = []
for member in members:
- #check to see if already a dictionary ( for states)
+ # check to see if already a dictionary ( for states)
if isinstance(member, dict):
- #check for state alternative name 'member_state', replace with state
- if 'member_state' in member.keys():
- member['state'] = member.pop('member_state')
+ # check for state alternative name 'member_state', replace with state
+ if "member_state" in member.keys():
+ member["state"] = member.pop("member_state")
- #replace underscore with dash
+ # replace underscore with dash
for key in member:
- new_key = key.replace('_', '-')
+ new_key = key.replace("_", "-")
member[new_key] = member.pop(key)
pool_members.append(member)
- #parse string passed via execution command (for executions)
+ # parse string passed via execution command (for executions)
else:
- pool_members.append({'name': member, 'address': member.split(':')[0]})
+ pool_members.append({"name": member, "address": member.split(":")[0]})
- payload['members'] = pool_members
+ payload["members"] = pool_members
- #build session
+ # build session
bigip_session = _build_session(username, password)
- #put to REST
+ # put to REST
try:
response = bigip_session.put(
- BIG_IP_URL_BASE.format(host=hostname) + '/ltm/pool/{name}'.format(name=name),
- data=salt.utils.json.dumps(payload)
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/pool/{name}".format(name=name),
+ data=salt.utils.json.dumps(payload),
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
@@ -1021,7 +1089,7 @@ def replace_pool_members(hostname, username, password, name, members):
def add_pool_member(hostname, username, password, name, member):
- '''
+ """
A function to connect to a bigip device and add a new member to an existing pool.
hostname
@@ -1041,34 +1109,35 @@ def add_pool_member(hostname, username, password, name, member):
.. code-block:: bash
salt '*' bigip.add_pool_members bigip admin admin my-pool 10.2.2.1:80
- '''
+ """
# for states
if isinstance(member, dict):
- #check for state alternative name 'member_state', replace with state
- if 'member_state' in member.keys():
- member['state'] = member.pop('member_state')
+ # check for state alternative name 'member_state', replace with state
+ if "member_state" in member.keys():
+ member["state"] = member.pop("member_state")
- #replace underscore with dash
+ # replace underscore with dash
for key in member:
- new_key = key.replace('_', '-')
+ new_key = key.replace("_", "-")
member[new_key] = member.pop(key)
payload = member
# for execution
else:
- payload = {'name': member, 'address': member.split(':')[0]}
+ payload = {"name": member, "address": member.split(":")[0]}
- #build session
+ # build session
bigip_session = _build_session(username, password)
- #post to REST
+ # post to REST
try:
response = bigip_session.post(
- BIG_IP_URL_BASE.format(host=hostname) + '/ltm/pool/{name}/members'.format(name=name),
- data=salt.utils.json.dumps(payload)
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/pool/{name}/members".format(name=name),
+ data=salt.utils.json.dumps(payload),
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
@@ -1076,20 +1145,26 @@ def add_pool_member(hostname, username, password, name, member):
return _load_response(response)
-def modify_pool_member(hostname, username, password, name, member,
- connection_limit=None,
- description=None,
- dynamic_ratio=None,
- inherit_profile=None,
- logging=None,
- monitor=None,
- priority_group=None,
- profiles=None,
- rate_limit=None,
- ratio=None,
- session=None,
- state=None):
- '''
+def modify_pool_member(
+ hostname,
+ username,
+ password,
+ name,
+ member,
+ connection_limit=None,
+ description=None,
+ dynamic_ratio=None,
+ inherit_profile=None,
+ logging=None,
+ monitor=None,
+ priority_group=None,
+ profiles=None,
+ rate_limit=None,
+ ratio=None,
+ session=None,
+ state=None,
+):
+ """
A function to connect to a bigip device and modify an existing member of a pool.
hostname
@@ -1130,34 +1205,35 @@ def modify_pool_member(hostname, username, password, name, member,
CLI Example::
salt '*' bigip.modify_pool_member bigip admin admin my-pool 10.2.2.1:80 state=use-down session=user-disabled
- '''
+ """
params = {
- 'connection-limit': connection_limit,
- 'description': description,
- 'dynamic-ratio': dynamic_ratio,
- 'inherit-profile': inherit_profile,
- 'logging': logging,
- 'monitor': monitor,
- 'priority-group': priority_group,
- 'profiles': profiles,
- 'rate-limit': rate_limit,
- 'ratio': ratio,
- 'session': session,
- 'state': state
+ "connection-limit": connection_limit,
+ "description": description,
+ "dynamic-ratio": dynamic_ratio,
+ "inherit-profile": inherit_profile,
+ "logging": logging,
+ "monitor": monitor,
+ "priority-group": priority_group,
+ "profiles": profiles,
+ "rate-limit": rate_limit,
+ "ratio": ratio,
+ "session": session,
+ "state": state,
}
- #build session
+ # build session
bigip_session = _build_session(username, password)
- #build payload
+ # build payload
payload = _loop_payload(params)
- #put to REST
+ # put to REST
try:
response = bigip_session.put(
- BIG_IP_URL_BASE.format(host=hostname) + '/ltm/pool/{name}/members/{member}'.format(name=name, member=member),
- data=salt.utils.json.dumps(payload)
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/pool/{name}/members/{member}".format(name=name, member=member),
+ data=salt.utils.json.dumps(payload),
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
@@ -1166,7 +1242,7 @@ def modify_pool_member(hostname, username, password, name, member,
def delete_pool_member(hostname, username, password, name, member):
- '''
+ """
A function to connect to a bigip device and delete a specific pool.
hostname
@@ -1183,25 +1259,28 @@ def delete_pool_member(hostname, username, password, name, member):
CLI Example::
salt '*' bigip.delete_pool_member bigip admin admin my-pool 10.2.2.2:80
- '''
+ """
- #build session
+ # build session
bigip_session = _build_session(username, password)
- #delete to REST
+ # delete to REST
try:
- response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}/members/{member}'.format(name=name, member=member))
+ response = bigip_session.delete(
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/pool/{name}/members/{member}".format(name=name, member=member)
+ )
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
- if _load_response(response) == '':
+ if _load_response(response) == "":
return True
else:
return _load_response(response)
def list_virtual(hostname, username, password, name=None):
- '''
+ """
A function to connect to a bigip device and list all virtuals or a specific virtual.
hostname
@@ -1217,63 +1296,74 @@ def list_virtual(hostname, username, password, name=None):
CLI Example::
salt '*' bigip.list_virtual bigip admin admin my-virtual
- '''
+ """
- #build sessions
+ # build sessions
bigip_session = _build_session(username, password)
- #get to REST
+ # get to REST
try:
if name:
- response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/virtual/{name}/?expandSubcollections=true'.format(name=name))
+ response = bigip_session.get(
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/virtual/{name}/?expandSubcollections=true".format(name=name)
+ )
else:
- response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/virtual')
+ response = bigip_session.get(
+ BIG_IP_URL_BASE.format(host=hostname) + "/ltm/virtual"
+ )
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
-def create_virtual(hostname, username, password, name, destination,
- pool=None,
- address_status=None,
- auto_lasthop=None,
- bwc_policy=None,
- cmp_enabled=None,
- connection_limit=None,
- dhcp_relay=None,
- description=None,
- fallback_persistence=None,
- flow_eviction_policy=None,
- gtm_score=None,
- ip_forward=None,
- ip_protocol=None,
- internal=None,
- twelve_forward=None,
- last_hop_pool=None,
- mask=None,
- mirror=None,
- nat64=None,
- persist=None,
- profiles=None,
- policies=None,
- rate_class=None,
- rate_limit=None,
- rate_limit_mode=None,
- rate_limit_dst=None,
- rate_limit_src=None,
- rules=None,
- related_rules=None,
- reject=None,
- source=None,
- source_address_translation=None,
- source_port=None,
- state=None,
- traffic_classes=None,
- translate_address=None,
- translate_port=None,
- vlans=None):
- r'''
+def create_virtual(
+ hostname,
+ username,
+ password,
+ name,
+ destination,
+ pool=None,
+ address_status=None,
+ auto_lasthop=None,
+ bwc_policy=None,
+ cmp_enabled=None,
+ connection_limit=None,
+ dhcp_relay=None,
+ description=None,
+ fallback_persistence=None,
+ flow_eviction_policy=None,
+ gtm_score=None,
+ ip_forward=None,
+ ip_protocol=None,
+ internal=None,
+ twelve_forward=None,
+ last_hop_pool=None,
+ mask=None,
+ mirror=None,
+ nat64=None,
+ persist=None,
+ profiles=None,
+ policies=None,
+ rate_class=None,
+ rate_limit=None,
+ rate_limit_mode=None,
+ rate_limit_dst=None,
+ rate_limit_src=None,
+ rules=None,
+ related_rules=None,
+ reject=None,
+ source=None,
+ source_address_translation=None,
+ source_port=None,
+ state=None,
+ traffic_classes=None,
+ translate_address=None,
+ translate_port=None,
+ vlans=None,
+):
+ r"""
A function to connect to a bigip device and create a virtual server.
hostname
@@ -1383,139 +1473,145 @@ def create_virtual(hostname, username, password, name, destination,
traffic_classes=my-class,other-class \
vlans=enabled:external,internal
- '''
+ """
params = {
- 'pool': pool,
- 'auto-lasthop': auto_lasthop,
- 'bwc-policy': bwc_policy,
- 'connection-limit': connection_limit,
- 'description': description,
- 'fallback-persistence': fallback_persistence,
- 'flow-eviction-policy': flow_eviction_policy,
- 'gtm-score': gtm_score,
- 'ip-protocol': ip_protocol,
- 'last-hop-pool': last_hop_pool,
- 'mask': mask,
- 'mirror': mirror,
- 'nat64': nat64,
- 'persist': persist,
- 'rate-class': rate_class,
- 'rate-limit': rate_limit,
- 'rate-limit-mode': rate_limit_mode,
- 'rate-limit-dst': rate_limit_dst,
- 'rate-limit-src': rate_limit_src,
- 'source': source,
- 'source-port': source_port,
- 'translate-address': translate_address,
- 'translate-port': translate_port
+ "pool": pool,
+ "auto-lasthop": auto_lasthop,
+ "bwc-policy": bwc_policy,
+ "connection-limit": connection_limit,
+ "description": description,
+ "fallback-persistence": fallback_persistence,
+ "flow-eviction-policy": flow_eviction_policy,
+ "gtm-score": gtm_score,
+ "ip-protocol": ip_protocol,
+ "last-hop-pool": last_hop_pool,
+ "mask": mask,
+ "mirror": mirror,
+ "nat64": nat64,
+ "persist": persist,
+ "rate-class": rate_class,
+ "rate-limit": rate_limit,
+ "rate-limit-mode": rate_limit_mode,
+ "rate-limit-dst": rate_limit_dst,
+ "rate-limit-src": rate_limit_src,
+ "source": source,
+ "source-port": source_port,
+ "translate-address": translate_address,
+ "translate-port": translate_port,
}
# some options take yes no others take true false. Figure out when to use which without
# confusing the end user
toggles = {
- 'address-status': {'type': 'yes_no', 'value': address_status},
- 'cmp-enabled': {'type': 'yes_no', 'value': cmp_enabled},
- 'dhcp-relay': {'type': 'true_false', 'value': dhcp_relay},
- 'reject': {'type': 'true_false', 'value': reject},
- '12-forward': {'type': 'true_false', 'value': twelve_forward},
- 'internal': {'type': 'true_false', 'value': internal},
- 'ip-forward': {'type': 'true_false', 'value': ip_forward}
+ "address-status": {"type": "yes_no", "value": address_status},
+ "cmp-enabled": {"type": "yes_no", "value": cmp_enabled},
+ "dhcp-relay": {"type": "true_false", "value": dhcp_relay},
+ "reject": {"type": "true_false", "value": reject},
+ "12-forward": {"type": "true_false", "value": twelve_forward},
+ "internal": {"type": "true_false", "value": internal},
+ "ip-forward": {"type": "true_false", "value": ip_forward},
}
- #build session
+ # build session
bigip_session = _build_session(username, password)
- #build payload
+ # build payload
payload = _loop_payload(params)
- payload['name'] = name
- payload['destination'] = destination
+ payload["name"] = name
+ payload["destination"] = destination
- #determine toggles
+ # determine toggles
payload = _determine_toggles(payload, toggles)
- #specify profiles if provided
+ # specify profiles if provided
if profiles is not None:
- payload['profiles'] = _build_list(profiles, 'ltm:virtual:profile')
+ payload["profiles"] = _build_list(profiles, "ltm:virtual:profile")
- #specify persist if provided
+ # specify persist if provided
if persist is not None:
- payload['persist'] = _build_list(persist, 'ltm:virtual:persist')
+ payload["persist"] = _build_list(persist, "ltm:virtual:persist")
- #specify policies if provided
+ # specify policies if provided
if policies is not None:
- payload['policies'] = _build_list(policies, 'ltm:virtual:policy')
+ payload["policies"] = _build_list(policies, "ltm:virtual:policy")
- #specify rules if provided
+ # specify rules if provided
if rules is not None:
- payload['rules'] = _build_list(rules, None)
+ payload["rules"] = _build_list(rules, None)
- #specify related-rules if provided
+ # specify related-rules if provided
if related_rules is not None:
- payload['related-rules'] = _build_list(related_rules, None)
+ payload["related-rules"] = _build_list(related_rules, None)
- #handle source-address-translation
+ # handle source-address-translation
if source_address_translation is not None:
- #check to see if this is already a dictionary first
+ # check to see if this is already a dictionary first
if isinstance(source_address_translation, dict):
- payload['source-address-translation'] = source_address_translation
- elif source_address_translation == 'none':
- payload['source-address-translation'] = {'pool': 'none', 'type': 'none'}
- elif source_address_translation == 'automap':
- payload['source-address-translation'] = {'pool': 'none', 'type': 'automap'}
- elif source_address_translation == 'lsn':
- payload['source-address-translation'] = {'pool': 'none', 'type': 'lsn'}
- elif source_address_translation.startswith('snat'):
- snat_pool = source_address_translation.split(':')[1]
- payload['source-address-translation'] = {'pool': snat_pool, 'type': 'snat'}
+ payload["source-address-translation"] = source_address_translation
+ elif source_address_translation == "none":
+ payload["source-address-translation"] = {"pool": "none", "type": "none"}
+ elif source_address_translation == "automap":
+ payload["source-address-translation"] = {"pool": "none", "type": "automap"}
+ elif source_address_translation == "lsn":
+ payload["source-address-translation"] = {"pool": "none", "type": "lsn"}
+ elif source_address_translation.startswith("snat"):
+ snat_pool = source_address_translation.split(":")[1]
+ payload["source-address-translation"] = {"pool": snat_pool, "type": "snat"}
- #specify related-rules if provided
+ # specify related-rules if provided
if traffic_classes is not None:
- payload['traffic-classes'] = _build_list(traffic_classes, None)
+ payload["traffic-classes"] = _build_list(traffic_classes, None)
- #handle vlans
+ # handle vlans
if vlans is not None:
- #ceck to see if vlans is a dictionary (used when state makes use of function)
+ # ceck to see if vlans is a dictionary (used when state makes use of function)
if isinstance(vlans, dict):
try:
- payload['vlans'] = vlans['vlan_ids']
- if vlans['enabled']:
- payload['vlans-enabled'] = True
- elif vlans['disabled']:
- payload['vlans-disabled'] = True
+ payload["vlans"] = vlans["vlan_ids"]
+ if vlans["enabled"]:
+ payload["vlans-enabled"] = True
+ elif vlans["disabled"]:
+ payload["vlans-disabled"] = True
except Exception: # pylint: disable=broad-except
- return 'Error: Unable to Parse vlans dictionary: \n\tvlans={vlans}'.format(vlans=vlans)
- elif vlans == 'none':
- payload['vlans'] = 'none'
- elif vlans == 'default':
- payload['vlans'] = 'default'
- elif isinstance(vlans, six.string_types) and (vlans.startswith('enabled') or vlans.startswith('disabled')):
+ return "Error: Unable to Parse vlans dictionary: \n\tvlans={vlans}".format(
+ vlans=vlans
+ )
+ elif vlans == "none":
+ payload["vlans"] = "none"
+ elif vlans == "default":
+ payload["vlans"] = "default"
+ elif isinstance(vlans, six.string_types) and (
+ vlans.startswith("enabled") or vlans.startswith("disabled")
+ ):
try:
- vlans_setting = vlans.split(':')[0]
- payload['vlans'] = vlans.split(':')[1].split(',')
- if vlans_setting == 'disabled':
- payload['vlans-disabled'] = True
- elif vlans_setting == 'enabled':
- payload['vlans-enabled'] = True
+ vlans_setting = vlans.split(":")[0]
+ payload["vlans"] = vlans.split(":")[1].split(",")
+ if vlans_setting == "disabled":
+ payload["vlans-disabled"] = True
+ elif vlans_setting == "enabled":
+ payload["vlans-enabled"] = True
except Exception: # pylint: disable=broad-except
- return 'Error: Unable to Parse vlans option: \n\tvlans={vlans}'.format(vlans=vlans)
+ return "Error: Unable to Parse vlans option: \n\tvlans={vlans}".format(
+ vlans=vlans
+ )
else:
- return 'Error: vlans must be a dictionary or string.'
+ return "Error: vlans must be a dictionary or string."
- #determine state
+ # determine state
if state is not None:
- if state == 'enabled':
- payload['enabled'] = True
- elif state == 'disabled':
- payload['disabled'] = True
+ if state == "enabled":
+ payload["enabled"] = True
+ elif state == "disabled":
+ payload["disabled"] = True
- #post to REST
+ # post to REST
try:
response = bigip_session.post(
- BIG_IP_URL_BASE.format(host=hostname) + '/ltm/virtual',
- data=salt.utils.json.dumps(payload)
+ BIG_IP_URL_BASE.format(host=hostname) + "/ltm/virtual",
+ data=salt.utils.json.dumps(payload),
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
@@ -1523,47 +1619,52 @@ def create_virtual(hostname, username, password, name, destination,
return _load_response(response)
-def modify_virtual(hostname, username, password, name,
- destination=None,
- pool=None,
- address_status=None,
- auto_lasthop=None,
- bwc_policy=None,
- cmp_enabled=None,
- connection_limit=None,
- dhcp_relay=None,
- description=None,
- fallback_persistence=None,
- flow_eviction_policy=None,
- gtm_score=None,
- ip_forward=None,
- ip_protocol=None,
- internal=None,
- twelve_forward=None,
- last_hop_pool=None,
- mask=None,
- mirror=None,
- nat64=None,
- persist=None,
- profiles=None,
- policies=None,
- rate_class=None,
- rate_limit=None,
- rate_limit_mode=None,
- rate_limit_dst=None,
- rate_limit_src=None,
- rules=None,
- related_rules=None,
- reject=None,
- source=None,
- source_address_translation=None,
- source_port=None,
- state=None,
- traffic_classes=None,
- translate_address=None,
- translate_port=None,
- vlans=None):
- '''
+def modify_virtual(
+ hostname,
+ username,
+ password,
+ name,
+ destination=None,
+ pool=None,
+ address_status=None,
+ auto_lasthop=None,
+ bwc_policy=None,
+ cmp_enabled=None,
+ connection_limit=None,
+ dhcp_relay=None,
+ description=None,
+ fallback_persistence=None,
+ flow_eviction_policy=None,
+ gtm_score=None,
+ ip_forward=None,
+ ip_protocol=None,
+ internal=None,
+ twelve_forward=None,
+ last_hop_pool=None,
+ mask=None,
+ mirror=None,
+ nat64=None,
+ persist=None,
+ profiles=None,
+ policies=None,
+ rate_class=None,
+ rate_limit=None,
+ rate_limit_mode=None,
+ rate_limit_dst=None,
+ rate_limit_src=None,
+ rules=None,
+ related_rules=None,
+ reject=None,
+ source=None,
+ source_address_translation=None,
+ source_port=None,
+ state=None,
+ traffic_classes=None,
+ translate_address=None,
+ translate_port=None,
+ vlans=None,
+):
+ """
A function to connect to a bigip device and modify an existing virtual server.
hostname
@@ -1662,132 +1763,137 @@ def modify_virtual(hostname, username, password, name,
salt '*' bigip.modify_virtual bigip admin admin my-virtual source_address_translation=none
salt '*' bigip.modify_virtual bigip admin admin my-virtual rules=my-rule,my-other-rule
- '''
+ """
params = {
- 'destination': destination,
- 'pool': pool,
- 'auto-lasthop': auto_lasthop,
- 'bwc-policy': bwc_policy,
- 'connection-limit': connection_limit,
- 'description': description,
- 'fallback-persistence': fallback_persistence,
- 'flow-eviction-policy': flow_eviction_policy,
- 'gtm-score': gtm_score,
- 'ip-protocol': ip_protocol,
- 'last-hop-pool': last_hop_pool,
- 'mask': mask,
- 'mirror': mirror,
- 'nat64': nat64,
- 'persist': persist,
- 'rate-class': rate_class,
- 'rate-limit': rate_limit,
- 'rate-limit-mode': rate_limit_mode,
- 'rate-limit-dst': rate_limit_dst,
- 'rate-limit-src': rate_limit_src,
- 'source': source,
- 'source-port': source_port,
- 'translate-address': translate_address,
- 'translate-port': translate_port
+ "destination": destination,
+ "pool": pool,
+ "auto-lasthop": auto_lasthop,
+ "bwc-policy": bwc_policy,
+ "connection-limit": connection_limit,
+ "description": description,
+ "fallback-persistence": fallback_persistence,
+ "flow-eviction-policy": flow_eviction_policy,
+ "gtm-score": gtm_score,
+ "ip-protocol": ip_protocol,
+ "last-hop-pool": last_hop_pool,
+ "mask": mask,
+ "mirror": mirror,
+ "nat64": nat64,
+ "persist": persist,
+ "rate-class": rate_class,
+ "rate-limit": rate_limit,
+ "rate-limit-mode": rate_limit_mode,
+ "rate-limit-dst": rate_limit_dst,
+ "rate-limit-src": rate_limit_src,
+ "source": source,
+ "source-port": source_port,
+ "translate-address": translate_address,
+ "translate-port": translate_port,
}
# some options take yes no others take true false. Figure out when to use which without
# confusing the end user
toggles = {
- 'address-status': {'type': 'yes_no', 'value': address_status},
- 'cmp-enabled': {'type': 'yes_no', 'value': cmp_enabled},
- 'dhcp-relay': {'type': 'true_false', 'value': dhcp_relay},
- 'reject': {'type': 'true_false', 'value': reject},
- '12-forward': {'type': 'true_false', 'value': twelve_forward},
- 'internal': {'type': 'true_false', 'value': internal},
- 'ip-forward': {'type': 'true_false', 'value': ip_forward}
+ "address-status": {"type": "yes_no", "value": address_status},
+ "cmp-enabled": {"type": "yes_no", "value": cmp_enabled},
+ "dhcp-relay": {"type": "true_false", "value": dhcp_relay},
+ "reject": {"type": "true_false", "value": reject},
+ "12-forward": {"type": "true_false", "value": twelve_forward},
+ "internal": {"type": "true_false", "value": internal},
+ "ip-forward": {"type": "true_false", "value": ip_forward},
}
- #build session
+ # build session
bigip_session = _build_session(username, password)
- #build payload
+ # build payload
payload = _loop_payload(params)
- payload['name'] = name
+ payload["name"] = name
- #determine toggles
+ # determine toggles
payload = _determine_toggles(payload, toggles)
- #specify profiles if provided
+ # specify profiles if provided
if profiles is not None:
- payload['profiles'] = _build_list(profiles, 'ltm:virtual:profile')
+ payload["profiles"] = _build_list(profiles, "ltm:virtual:profile")
- #specify persist if provided
+ # specify persist if provided
if persist is not None:
- payload['persist'] = _build_list(persist, 'ltm:virtual:persist')
+ payload["persist"] = _build_list(persist, "ltm:virtual:persist")
- #specify policies if provided
+ # specify policies if provided
if policies is not None:
- payload['policies'] = _build_list(policies, 'ltm:virtual:policy')
+ payload["policies"] = _build_list(policies, "ltm:virtual:policy")
- #specify rules if provided
+ # specify rules if provided
if rules is not None:
- payload['rules'] = _build_list(rules, None)
+ payload["rules"] = _build_list(rules, None)
- #specify related-rules if provided
+ # specify related-rules if provided
if related_rules is not None:
- payload['related-rules'] = _build_list(related_rules, None)
+ payload["related-rules"] = _build_list(related_rules, None)
- #handle source-address-translation
+ # handle source-address-translation
if source_address_translation is not None:
- if source_address_translation == 'none':
- payload['source-address-translation'] = {'pool': 'none', 'type': 'none'}
- elif source_address_translation == 'automap':
- payload['source-address-translation'] = {'pool': 'none', 'type': 'automap'}
- elif source_address_translation == 'lsn':
- payload['source-address-translation'] = {'pool': 'none', 'type': 'lsn'}
- elif source_address_translation.startswith('snat'):
- snat_pool = source_address_translation.split(':')[1]
- payload['source-address-translation'] = {'pool': snat_pool, 'type': 'snat'}
+ if source_address_translation == "none":
+ payload["source-address-translation"] = {"pool": "none", "type": "none"}
+ elif source_address_translation == "automap":
+ payload["source-address-translation"] = {"pool": "none", "type": "automap"}
+ elif source_address_translation == "lsn":
+ payload["source-address-translation"] = {"pool": "none", "type": "lsn"}
+ elif source_address_translation.startswith("snat"):
+ snat_pool = source_address_translation.split(":")[1]
+ payload["source-address-translation"] = {"pool": snat_pool, "type": "snat"}
- #specify related-rules if provided
+ # specify related-rules if provided
if traffic_classes is not None:
- payload['traffic-classes'] = _build_list(traffic_classes, None)
+ payload["traffic-classes"] = _build_list(traffic_classes, None)
- #handle vlans
+ # handle vlans
if vlans is not None:
- #ceck to see if vlans is a dictionary (used when state makes use of function)
+ # ceck to see if vlans is a dictionary (used when state makes use of function)
if isinstance(vlans, dict):
try:
- payload['vlans'] = vlans['vlan_ids']
- if vlans['enabled']:
- payload['vlans-enabled'] = True
- elif vlans['disabled']:
- payload['vlans-disabled'] = True
+ payload["vlans"] = vlans["vlan_ids"]
+ if vlans["enabled"]:
+ payload["vlans-enabled"] = True
+ elif vlans["disabled"]:
+ payload["vlans-disabled"] = True
except Exception: # pylint: disable=broad-except
- return 'Error: Unable to Parse vlans dictionary: \n\tvlans={vlans}'.format(vlans=vlans)
- elif vlans == 'none':
- payload['vlans'] = 'none'
- elif vlans == 'default':
- payload['vlans'] = 'default'
- elif vlans.startswith('enabled') or vlans.startswith('disabled'):
+ return "Error: Unable to Parse vlans dictionary: \n\tvlans={vlans}".format(
+ vlans=vlans
+ )
+ elif vlans == "none":
+ payload["vlans"] = "none"
+ elif vlans == "default":
+ payload["vlans"] = "default"
+ elif vlans.startswith("enabled") or vlans.startswith("disabled"):
try:
- vlans_setting = vlans.split(':')[0]
- payload['vlans'] = vlans.split(':')[1].split(',')
- if vlans_setting == 'disabled':
- payload['vlans-disabled'] = True
- elif vlans_setting == 'enabled':
- payload['vlans-enabled'] = True
+ vlans_setting = vlans.split(":")[0]
+ payload["vlans"] = vlans.split(":")[1].split(",")
+ if vlans_setting == "disabled":
+ payload["vlans-disabled"] = True
+ elif vlans_setting == "enabled":
+ payload["vlans-enabled"] = True
except Exception: # pylint: disable=broad-except
- return 'Error: Unable to Parse vlans option: \n\tvlans={vlans}'.format(vlans=vlans)
+ return "Error: Unable to Parse vlans option: \n\tvlans={vlans}".format(
+ vlans=vlans
+ )
- #determine state
+ # determine state
if state is not None:
- if state == 'enabled':
- payload['enabled'] = True
- elif state == 'disabled':
- payload['disabled'] = True
+ if state == "enabled":
+ payload["enabled"] = True
+ elif state == "disabled":
+ payload["disabled"] = True
- #put to REST
+ # put to REST
try:
response = bigip_session.put(
- BIG_IP_URL_BASE.format(host=hostname) + '/ltm/virtual/{name}'.format(name=name),
- data=salt.utils.json.dumps(payload)
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/virtual/{name}".format(name=name),
+ data=salt.utils.json.dumps(payload),
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
@@ -1796,7 +1902,7 @@ def modify_virtual(hostname, username, password, name,
def delete_virtual(hostname, username, password, name):
- '''
+ """
A function to connect to a bigip device and delete a specific virtual.
hostname
@@ -1811,25 +1917,30 @@ def delete_virtual(hostname, username, password, name):
CLI Example::
salt '*' bigip.delete_virtual bigip admin admin my-virtual
- '''
+ """
- #build session
+ # build session
bigip_session = _build_session(username, password)
- #delete to REST
+ # delete to REST
try:
- response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/virtual/{name}'.format(name=name))
+ response = bigip_session.delete(
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/virtual/{name}".format(name=name)
+ )
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
- if _load_response(response) == '':
+ if _load_response(response) == "":
return True
else:
return _load_response(response)
-def list_monitor(hostname, username, password, monitor_type, name=None, ):
- '''
+def list_monitor(
+ hostname, username, password, monitor_type, name=None,
+):
+ """
A function to connect to a bigip device and list an existing monitor. If no name is provided than all
monitors of the specified type will be listed.
@@ -1848,17 +1959,25 @@ def list_monitor(hostname, username, password, monitor_type, name=None, ):
salt '*' bigip.list_monitor bigip admin admin http my-http-monitor
- '''
+ """
- #build sessions
+ # build sessions
bigip_session = _build_session(username, password)
- #get to REST
+ # get to REST
try:
if name:
- response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/monitor/{type}/{name}?expandSubcollections=true'.format(type=monitor_type, name=name))
+ response = bigip_session.get(
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/monitor/{type}/{name}?expandSubcollections=true".format(
+ type=monitor_type, name=name
+ )
+ )
else:
- response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/monitor/{type}'.format(type=monitor_type))
+ response = bigip_session.get(
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/monitor/{type}".format(type=monitor_type)
+ )
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
@@ -1866,7 +1985,7 @@ def list_monitor(hostname, username, password, monitor_type, name=None, ):
def create_monitor(hostname, username, password, monitor_type, name, **kwargs):
- '''
+ """
A function to connect to a bigip device and create a monitor.
hostname
@@ -1886,28 +2005,29 @@ def create_monitor(hostname, username, password, monitor_type, name, **kwargs):
CLI Example::
salt '*' bigip.create_monitor bigip admin admin http my-http-monitor timeout=10 interval=5
- '''
+ """
- #build session
+ # build session
bigip_session = _build_session(username, password)
- #construct the payload
+ # construct the payload
payload = {}
- payload['name'] = name
+ payload["name"] = name
- #there's a ton of different monitors and a ton of options for each type of monitor.
- #this logic relies that the end user knows which options are meant for which monitor types
+ # there's a ton of different monitors and a ton of options for each type of monitor.
+ # this logic relies that the end user knows which options are meant for which monitor types
for key, value in six.iteritems(kwargs):
- if not key.startswith('__'):
- if key not in ['hostname', 'username', 'password', 'type']:
- key = key.replace('_', '-')
+ if not key.startswith("__"):
+ if key not in ["hostname", "username", "password", "type"]:
+ key = key.replace("_", "-")
payload[key] = value
- #post to REST
+ # post to REST
try:
response = bigip_session.post(
- BIG_IP_URL_BASE.format(host=hostname) + '/ltm/monitor/{type}'.format(type=monitor_type),
- data=salt.utils.json.dumps(payload)
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/monitor/{type}".format(type=monitor_type),
+ data=salt.utils.json.dumps(payload),
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
@@ -1916,7 +2036,7 @@ def create_monitor(hostname, username, password, monitor_type, name, **kwargs):
def modify_monitor(hostname, username, password, monitor_type, name, **kwargs):
- '''
+ """
A function to connect to a bigip device and modify an existing monitor.
hostname
@@ -1937,27 +2057,28 @@ def modify_monitor(hostname, username, password, monitor_type, name, **kwargs):
salt '*' bigip.modify_monitor bigip admin admin http my-http-monitor timout=16 interval=6
- '''
+ """
- #build session
+ # build session
bigip_session = _build_session(username, password)
- #construct the payload
+ # construct the payload
payload = {}
- #there's a ton of different monitors and a ton of options for each type of monitor.
- #this logic relies that the end user knows which options are meant for which monitor types
+ # there's a ton of different monitors and a ton of options for each type of monitor.
+ # this logic relies that the end user knows which options are meant for which monitor types
for key, value in six.iteritems(kwargs):
- if not key.startswith('__'):
- if key not in ['hostname', 'username', 'password', 'type', 'name']:
- key = key.replace('_', '-')
+ if not key.startswith("__"):
+ if key not in ["hostname", "username", "password", "type", "name"]:
+ key = key.replace("_", "-")
payload[key] = value
- #put to REST
+ # put to REST
try:
response = bigip_session.put(
- BIG_IP_URL_BASE.format(host=hostname) + '/ltm/monitor/{type}/{name}'.format(type=monitor_type, name=name),
- data=salt.utils.json.dumps(payload)
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/monitor/{type}/{name}".format(type=monitor_type, name=name),
+ data=salt.utils.json.dumps(payload),
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
@@ -1966,7 +2087,7 @@ def modify_monitor(hostname, username, password, monitor_type, name, **kwargs):
def delete_monitor(hostname, username, password, monitor_type, name):
- '''
+ """
A function to connect to a bigip device and delete an existing monitor.
hostname
@@ -1984,25 +2105,30 @@ def delete_monitor(hostname, username, password, monitor_type, name):
salt '*' bigip.delete_monitor bigip admin admin http my-http-monitor
- '''
+ """
- #build sessions
+ # build sessions
bigip_session = _build_session(username, password)
- #delete to REST
+ # delete to REST
try:
- response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/monitor/{type}/{name}'.format(type=monitor_type, name=name))
+ response = bigip_session.delete(
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/monitor/{type}/{name}".format(type=monitor_type, name=name)
+ )
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
- if _load_response(response) == '':
+ if _load_response(response) == "":
return True
else:
return _load_response(response)
-def list_profile(hostname, username, password, profile_type, name=None, ):
- '''
+def list_profile(
+ hostname, username, password, profile_type, name=None,
+):
+ """
A function to connect to a bigip device and list an existing profile. If no name is provided than all
profiles of the specified type will be listed.
@@ -2021,17 +2147,25 @@ def list_profile(hostname, username, password, profile_type, name=None, ):
salt '*' bigip.list_profile bigip admin admin http my-http-profile
- '''
+ """
- #build sessions
+ # build sessions
bigip_session = _build_session(username, password)
- #get to REST
+ # get to REST
try:
if name:
- response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/profile/{type}/{name}?expandSubcollections=true'.format(type=profile_type, name=name))
+ response = bigip_session.get(
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/profile/{type}/{name}?expandSubcollections=true".format(
+ type=profile_type, name=name
+ )
+ )
else:
- response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/profile/{type}'.format(type=profile_type))
+ response = bigip_session.get(
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/profile/{type}".format(type=profile_type)
+ )
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
@@ -2039,7 +2173,7 @@ def list_profile(hostname, username, password, profile_type, name=None, ):
def create_profile(hostname, username, password, profile_type, name, **kwargs):
- r'''
+ r"""
A function to connect to a bigip device and create a profile.
hostname
@@ -2086,32 +2220,35 @@ def create_profile(hostname, username, password, profile_type, name, **kwargs):
salt '*' bigip.create_profile bigip admin admin http my-http-profile defaultsFrom='/Common/http' \
enforcement=maxHeaderCount:3200,maxRequests:10
- '''
+ """
- #build session
+ # build session
bigip_session = _build_session(username, password)
- #construct the payload
+ # construct the payload
payload = {}
- payload['name'] = name
+ payload["name"] = name
- #there's a ton of different profiles and a ton of options for each type of profile.
- #this logic relies that the end user knows which options are meant for which profile types
+ # there's a ton of different profiles and a ton of options for each type of profile.
+ # this logic relies that the end user knows which options are meant for which profile types
for key, value in six.iteritems(kwargs):
- if not key.startswith('__'):
- if key not in ['hostname', 'username', 'password', 'profile_type']:
- key = key.replace('_', '-')
+ if not key.startswith("__"):
+ if key not in ["hostname", "username", "password", "profile_type"]:
+ key = key.replace("_", "-")
try:
payload[key] = _set_value(value)
except salt.exceptions.CommandExecutionError:
- return 'Error: Unable to Parse JSON data for parameter: {key}\n{value}'.format(key=key, value=value)
+ return "Error: Unable to Parse JSON data for parameter: {key}\n{value}".format(
+ key=key, value=value
+ )
- #post to REST
+ # post to REST
try:
response = bigip_session.post(
- BIG_IP_URL_BASE.format(host=hostname) + '/ltm/profile/{type}'.format(type=profile_type),
- data=salt.utils.json.dumps(payload)
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/profile/{type}".format(type=profile_type),
+ data=salt.utils.json.dumps(payload),
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
@@ -2120,7 +2257,7 @@ def create_profile(hostname, username, password, profile_type, name, **kwargs):
def modify_profile(hostname, username, password, profile_type, name, **kwargs):
- r'''
+ r"""
A function to connect to a bigip device and create a profile.
A function to connect to a bigip device and create a profile.
@@ -2174,32 +2311,35 @@ def modify_profile(hostname, username, password, profile_type, name, **kwargs):
salt '*' bigip.modify_profile bigip admin admin client-ssl my-client-ssl-1 retainCertificate=false \
ciphers='DEFAULT\:!SSLv3'
cert_key_chain='j{ "default": { "cert": "default.crt", "chain": "default.crt", "key": "default.key" } }j'
- '''
+ """
- #build session
+ # build session
bigip_session = _build_session(username, password)
- #construct the payload
+ # construct the payload
payload = {}
- payload['name'] = name
+ payload["name"] = name
- #there's a ton of different profiles and a ton of options for each type of profile.
- #this logic relies that the end user knows which options are meant for which profile types
+ # there's a ton of different profiles and a ton of options for each type of profile.
+ # this logic relies that the end user knows which options are meant for which profile types
for key, value in six.iteritems(kwargs):
- if not key.startswith('__'):
- if key not in ['hostname', 'username', 'password', 'profile_type']:
- key = key.replace('_', '-')
+ if not key.startswith("__"):
+ if key not in ["hostname", "username", "password", "profile_type"]:
+ key = key.replace("_", "-")
try:
payload[key] = _set_value(value)
except salt.exceptions.CommandExecutionError:
- return 'Error: Unable to Parse JSON data for parameter: {key}\n{value}'.format(key=key, value=value)
+ return "Error: Unable to Parse JSON data for parameter: {key}\n{value}".format(
+ key=key, value=value
+ )
- #put to REST
+ # put to REST
try:
response = bigip_session.put(
- BIG_IP_URL_BASE.format(host=hostname) + '/ltm/profile/{type}/{name}'.format(type=profile_type, name=name),
- data=salt.utils.json.dumps(payload)
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/profile/{type}/{name}".format(type=profile_type, name=name),
+ data=salt.utils.json.dumps(payload),
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
@@ -2208,7 +2348,7 @@ def modify_profile(hostname, username, password, profile_type, name, **kwargs):
def delete_profile(hostname, username, password, profile_type, name):
- '''
+ """
A function to connect to a bigip device and delete an existing profile.
hostname
@@ -2226,18 +2366,21 @@ def delete_profile(hostname, username, password, profile_type, name):
salt '*' bigip.delete_profile bigip admin admin http my-http-profile
- '''
+ """
- #build sessions
+ # build sessions
bigip_session = _build_session(username, password)
- #delete to REST
+ # delete to REST
try:
- response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/profile/{type}/{name}'.format(type=profile_type, name=name))
+ response = bigip_session.delete(
+ BIG_IP_URL_BASE.format(host=hostname)
+ + "/ltm/profile/{type}/{name}".format(type=profile_type, name=name)
+ )
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
- if _load_response(response) == '':
+ if _load_response(response) == "":
return True
else:
return _load_response(response)
diff --git a/salt/modules/bluez_bluetooth.py b/salt/modules/bluez_bluetooth.py
index 7f10425fefe..ac1d2a29008 100644
--- a/salt/modules/bluez_bluetooth.py
+++ b/salt/modules/bluez_bluetooth.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Support for Bluetooth (using BlueZ in Linux).
The following packages are required packages for this module:
@@ -8,45 +8,48 @@ The following packages are required packages for this module:
bluez-libs >= 5.7
bluez-utils >= 5.7
pybluez >= 0.18
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
-# Import 3rd-party libs
-# pylint: disable=import-error
-from salt.ext.six.moves import shlex_quote as _cmd_quote
-# pylint: enable=import-error
-
# Import salt libs
import salt.utils.validate.net
from salt.exceptions import CommandExecutionError
+# Import 3rd-party libs
+# pylint: disable=import-error
+from salt.ext.six.moves import shlex_quote as _cmd_quote
+
+# pylint: enable=import-error
+
HAS_PYBLUEZ = False
try:
import bluetooth # pylint: disable=import-error
+
HAS_PYBLUEZ = True
except ImportError:
pass
-__func_alias__ = {
- 'address_': 'address'
-}
+__func_alias__ = {"address_": "address"}
# Define the module's virtual name
-__virtualname__ = 'bluetooth'
+__virtualname__ = "bluetooth"
def __virtual__():
- '''
+ """
Only load the module if bluetooth is installed
- '''
+ """
if HAS_PYBLUEZ:
return __virtualname__
- return (False, 'The bluetooth execution module cannot be loaded: bluetooth not installed.')
+ return (
+ False,
+ "The bluetooth execution module cannot be loaded: bluetooth not installed.",
+ )
def version():
- '''
+ """
Return Bluez version from bluetoothd -v
CLI Example:
@@ -54,20 +57,20 @@ def version():
.. code-block:: bash
salt '*' bluetoothd.version
- '''
- cmd = 'bluetoothctl -v'
- out = __salt__['cmd.run'](cmd).splitlines()
+ """
+ cmd = "bluetoothctl -v"
+ out = __salt__["cmd.run"](cmd).splitlines()
bluez_version = out[0]
- pybluez_version = '<= 0.18 (Unknown, but installed)'
+ pybluez_version = "<= 0.18 (Unknown, but installed)"
try:
pybluez_version = bluetooth.__version__
except Exception as exc: # pylint: disable=broad-except
pass
- return {'Bluez': bluez_version, 'PyBluez': pybluez_version}
+ return {"Bluez": bluez_version, "PyBluez": pybluez_version}
def address_():
- '''
+ """
Get the many addresses of the Bluetooth adapter
CLI Example:
@@ -75,31 +78,31 @@ def address_():
.. code-block:: bash
salt '*' bluetooth.address
- '''
+ """
ret = {}
- cmd = 'hciconfig'
- out = __salt__['cmd.run'](cmd).splitlines()
- dev = ''
+ cmd = "hciconfig"
+ out = __salt__["cmd.run"](cmd).splitlines()
+ dev = ""
for line in out:
- if line.startswith('hci'):
- comps = line.split(':')
+ if line.startswith("hci"):
+ comps = line.split(":")
dev = comps[0]
ret[dev] = {
- 'device': dev,
- 'path': '/sys/class/bluetooth/{0}'.format(dev),
+ "device": dev,
+ "path": "/sys/class/bluetooth/{0}".format(dev),
}
- if 'BD Address' in line:
+ if "BD Address" in line:
comps = line.split()
- ret[dev]['address'] = comps[2]
- if 'DOWN' in line:
- ret[dev]['power'] = 'off'
- if 'UP RUNNING' in line:
- ret[dev]['power'] = 'on'
+ ret[dev]["address"] = comps[2]
+ if "DOWN" in line:
+ ret[dev]["power"] = "off"
+ if "UP RUNNING" in line:
+ ret[dev]["power"] = "on"
return ret
def power(dev, mode):
- '''
+ """
Power a bluetooth device on or off
CLI Examples:
@@ -108,26 +111,26 @@ def power(dev, mode):
salt '*' bluetooth.power hci0 on
salt '*' bluetooth.power hci0 off
- '''
+ """
if dev not in address_():
- raise CommandExecutionError('Invalid dev passed to bluetooth.power')
+ raise CommandExecutionError("Invalid dev passed to bluetooth.power")
- if mode == 'on' or mode is True:
- state = 'up'
- mode = 'on'
+ if mode == "on" or mode is True:
+ state = "up"
+ mode = "on"
else:
- state = 'down'
- mode = 'off'
- cmd = 'hciconfig {0} {1}'.format(dev, state)
- __salt__['cmd.run'](cmd).splitlines()
+ state = "down"
+ mode = "off"
+ cmd = "hciconfig {0} {1}".format(dev, state)
+ __salt__["cmd.run"](cmd).splitlines()
info = address_()
- if info[dev]['power'] == mode:
+ if info[dev]["power"] == mode:
return True
return False
def discoverable(dev):
- '''
+ """
Enable this bluetooth device to be discoverable.
CLI Example:
@@ -135,23 +138,21 @@ def discoverable(dev):
.. code-block:: bash
salt '*' bluetooth.discoverable hci0
- '''
+ """
if dev not in address_():
- raise CommandExecutionError(
- 'Invalid dev passed to bluetooth.discoverable'
- )
+ raise CommandExecutionError("Invalid dev passed to bluetooth.discoverable")
- cmd = 'hciconfig {0} iscan'.format(dev)
- __salt__['cmd.run'](cmd).splitlines()
- cmd = 'hciconfig {0}'.format(dev)
- out = __salt__['cmd.run'](cmd)
- if 'UP RUNNING ISCAN' in out:
+ cmd = "hciconfig {0} iscan".format(dev)
+ __salt__["cmd.run"](cmd).splitlines()
+ cmd = "hciconfig {0}".format(dev)
+ out = __salt__["cmd.run"](cmd)
+ if "UP RUNNING ISCAN" in out:
return True
return False
def noscan(dev):
- '''
+ """
Turn off scanning modes on this device.
CLI Example:
@@ -159,21 +160,21 @@ def noscan(dev):
.. code-block:: bash
salt '*' bluetooth.noscan hci0
- '''
+ """
if dev not in address_():
- raise CommandExecutionError('Invalid dev passed to bluetooth.noscan')
+ raise CommandExecutionError("Invalid dev passed to bluetooth.noscan")
- cmd = 'hciconfig {0} noscan'.format(dev)
- __salt__['cmd.run'](cmd).splitlines()
- cmd = 'hciconfig {0}'.format(dev)
- out = __salt__['cmd.run'](cmd)
- if 'SCAN' in out:
+ cmd = "hciconfig {0} noscan".format(dev)
+ __salt__["cmd.run"](cmd).splitlines()
+ cmd = "hciconfig {0}".format(dev)
+ out = __salt__["cmd.run"](cmd)
+ if "SCAN" in out:
return False
return True
def scan():
- '''
+ """
Scan for bluetooth devices in the area
CLI Example:
@@ -181,7 +182,7 @@ def scan():
.. code-block:: bash
salt '*' bluetooth.scan
- '''
+ """
ret = []
devices = bluetooth.discover_devices(lookup_names=True)
for device in devices:
@@ -190,7 +191,7 @@ def scan():
def block(bdaddr):
- '''
+ """
Block a specific bluetooth device by BD Address
CLI Example:
@@ -198,18 +199,16 @@ def block(bdaddr):
.. code-block:: bash
salt '*' bluetooth.block DE:AD:BE:EF:CA:FE
- '''
+ """
if not salt.utils.validate.net.mac(bdaddr):
- raise CommandExecutionError(
- 'Invalid BD address passed to bluetooth.block'
- )
+ raise CommandExecutionError("Invalid BD address passed to bluetooth.block")
- cmd = 'hciconfig {0} block'.format(bdaddr)
- __salt__['cmd.run'](cmd).splitlines()
+ cmd = "hciconfig {0} block".format(bdaddr)
+ __salt__["cmd.run"](cmd).splitlines()
def unblock(bdaddr):
- '''
+ """
Unblock a specific bluetooth device by BD Address
CLI Example:
@@ -217,18 +216,16 @@ def unblock(bdaddr):
.. code-block:: bash
salt '*' bluetooth.unblock DE:AD:BE:EF:CA:FE
- '''
+ """
if not salt.utils.validate.net.mac(bdaddr):
- raise CommandExecutionError(
- 'Invalid BD address passed to bluetooth.unblock'
- )
+ raise CommandExecutionError("Invalid BD address passed to bluetooth.unblock")
- cmd = 'hciconfig {0} unblock'.format(bdaddr)
- __salt__['cmd.run'](cmd).splitlines()
+ cmd = "hciconfig {0} unblock".format(bdaddr)
+ __salt__["cmd.run"](cmd).splitlines()
def pair(address, key):
- '''
+ """
Pair the bluetooth adapter with a device
CLI Example:
@@ -242,29 +239,27 @@ def pair(address, key):
TODO: This function is currently broken, as the bluez-simple-agent program
no longer ships with BlueZ >= 5.0. It needs to be refactored.
- '''
+ """
if not salt.utils.validate.net.mac(address):
- raise CommandExecutionError(
- 'Invalid BD address passed to bluetooth.pair'
- )
+ raise CommandExecutionError("Invalid BD address passed to bluetooth.pair")
try:
int(key)
except Exception: # pylint: disable=broad-except
raise CommandExecutionError(
- 'bluetooth.pair requires a numerical key to be used'
+ "bluetooth.pair requires a numerical key to be used"
)
addy = address_()
- cmd = 'echo {0} | bluez-simple-agent {1} {2}'.format(
- _cmd_quote(addy['device']), _cmd_quote(address), _cmd_quote(key)
+ cmd = "echo {0} | bluez-simple-agent {1} {2}".format(
+ _cmd_quote(addy["device"]), _cmd_quote(address), _cmd_quote(key)
)
- out = __salt__['cmd.run'](cmd, python_shell=True).splitlines()
+ out = __salt__["cmd.run"](cmd, python_shell=True).splitlines()
return out
def unpair(address):
- '''
+ """
Unpair the bluetooth adapter from a device
CLI Example:
@@ -277,19 +272,17 @@ def unpair(address):
TODO: This function is currently broken, as the bluez-simple-agent program
no longer ships with BlueZ >= 5.0. It needs to be refactored.
- '''
+ """
if not salt.utils.validate.net.mac(address):
- raise CommandExecutionError(
- 'Invalid BD address passed to bluetooth.unpair'
- )
+ raise CommandExecutionError("Invalid BD address passed to bluetooth.unpair")
- cmd = 'bluez-test-device remove {0}'.format(address)
- out = __salt__['cmd.run'](cmd).splitlines()
+ cmd = "bluez-test-device remove {0}".format(address)
+ out = __salt__["cmd.run"](cmd).splitlines()
return out
def start():
- '''
+ """
Start the bluetooth service.
CLI Example:
@@ -297,13 +290,13 @@ def start():
.. code-block:: bash
salt '*' bluetooth.start
- '''
- out = __salt__['service.start']('bluetooth')
+ """
+ out = __salt__["service.start"]("bluetooth")
return out
def stop():
- '''
+ """
Stop the bluetooth service.
CLI Example:
@@ -311,6 +304,6 @@ def stop():
.. code-block:: bash
salt '*' bluetooth.stop
- '''
- out = __salt__['service.stop']('bluetooth')
+ """
+ out = __salt__["service.stop"]("bluetooth")
return out
diff --git a/salt/modules/boto3_elasticache.py b/salt/modules/boto3_elasticache.py
index 0cd48f5b273..8d20a9c6ee8 100644
--- a/salt/modules/boto3_elasticache.py
+++ b/salt/modules/boto3_elasticache.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Execution module for Amazon Elasticache using boto3
===================================================
@@ -41,56 +41,61 @@ Execution module for Amazon Elasticache using boto3
region: us-east-1
:depends: boto3
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
import time
-# Import Salt libs
-from salt.exceptions import SaltInvocationError, CommandExecutionError
import salt.utils.compat
import salt.utils.versions
+# Import Salt libs
+from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__)
# Import third party libs
try:
- #pylint: disable=unused-import
+ # pylint: disable=unused-import
import botocore
import boto3
- #pylint: enable=unused-import
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ # pylint: enable=unused-import
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def __virtual__():
- '''
+ """
Only load if boto libraries exist and if boto libraries are greater than
a given version.
- '''
+ """
return salt.utils.versions.check_boto_reqs()
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO3:
- __utils__['boto3.assign_funcs'](__name__, 'elasticache',
- get_conn_funcname='_get_conn',
- cache_id_funcname='_cache_id',
- exactly_one_funcname=None)
+ __utils__["boto3.assign_funcs"](
+ __name__,
+ "elasticache",
+ get_conn_funcname="_get_conn",
+ cache_id_funcname="_cache_id",
+ exactly_one_funcname=None,
+ )
-def _collect_results(func, item, args, marker='Marker'):
+def _collect_results(func, item, args, marker="Marker"):
ret = []
- Marker = args[marker] if marker in args else ''
+ Marker = args[marker] if marker in args else ""
while Marker is not None:
r = func(**args)
ret += r.get(item)
@@ -99,18 +104,30 @@ def _collect_results(func, item, args, marker='Marker'):
return ret
-def _describe_resource(name=None, name_param=None, res_type=None, info_node=None, conn=None,
- region=None, key=None, keyid=None, profile=None, **args):
+def _describe_resource(
+ name=None,
+ name_param=None,
+ res_type=None,
+ info_node=None,
+ conn=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ **args
+):
if conn is None:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- func = 'describe_'+res_type+'s'
+ func = "describe_" + res_type + "s"
f = getattr(conn, func)
except (AttributeError, KeyError) as e:
- raise SaltInvocationError("No function '{0}()' found: {1}".format(func, e.message))
+ raise SaltInvocationError(
+ "No function '{0}()' found: {1}".format(func, e.message)
+ )
# Undocumented, but you can't pass 'Marker' if searching for a specific resource...
- args.update({name_param: name} if name else {'Marker': ''})
- args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
+ args.update({name_param: name} if name else {"Marker": ""})
+ args = dict([(k, v) for k, v in args.items() if not k.startswith("_")])
try:
return _collect_results(f, info_node, args)
except botocore.exceptions.ClientError as e:
@@ -118,170 +135,233 @@ def _describe_resource(name=None, name_param=None, res_type=None, info_node=None
return None
-def _delete_resource(name, name_param, desc, res_type, wait=0, status_param=None,
- status_gone='deleted', region=None, key=None, keyid=None, profile=None,
- **args):
- '''
+def _delete_resource(
+ name,
+ name_param,
+ desc,
+ res_type,
+ wait=0,
+ status_param=None,
+ status_gone="deleted",
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ **args
+):
+ """
Delete a generic Elasticache resource.
- '''
+ """
try:
wait = int(wait)
except Exception: # pylint: disable=broad-except
- raise SaltInvocationError("Bad value ('{0}') passed for 'wait' param - must be an "
- "int or boolean.".format(wait))
+ raise SaltInvocationError(
+ "Bad value ('{0}') passed for 'wait' param - must be an "
+ "int or boolean.".format(wait)
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if name_param in args:
log.info(
"'name: %s' param being overridden by explicitly provided '%s: %s'",
- name, name_param, args[name_param]
+ name,
+ name_param,
+ args[name_param],
)
name = args[name_param]
else:
args[name_param] = name
- args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
+ args = dict([(k, v) for k, v in args.items() if not k.startswith("_")])
try:
- func = 'delete_'+res_type
+ func = "delete_" + res_type
f = getattr(conn, func)
if wait:
- func = 'describe_'+res_type+'s'
+ func = "describe_" + res_type + "s"
s = globals()[func]
except (AttributeError, KeyError) as e:
- raise SaltInvocationError("No function '{0}()' found: {1}".format(func, e.message))
+ raise SaltInvocationError(
+ "No function '{0}()' found: {1}".format(func, e.message)
+ )
try:
f(**args)
if not wait:
- log.info('%s %s deletion requested.', desc.title(), name)
+ log.info("%s %s deletion requested.", desc.title(), name)
return True
- log.info('Waiting up to %s seconds for %s %s to be deleted.', wait, desc, name)
+ log.info("Waiting up to %s seconds for %s %s to be deleted.", wait, desc, name)
orig_wait = wait
while wait > 0:
r = s(name=name, conn=conn)
if not r or (r and r[0].get(status_param) == status_gone):
- log.info('%s %s deleted.', desc.title(), name)
+ log.info("%s %s deleted.", desc.title(), name)
return True
sleep = wait if wait % 60 == wait else 60
- log.info('Sleeping %s seconds for %s %s to be deleted.',
- sleep, desc, name)
+ log.info("Sleeping %s seconds for %s %s to be deleted.", sleep, desc, name)
time.sleep(sleep)
wait -= sleep
- log.error('%s %s not deleted after %s seconds!', desc.title(), name, orig_wait)
+ log.error("%s %s not deleted after %s seconds!", desc.title(), name, orig_wait)
return False
except botocore.exceptions.ClientError as e:
- log.error('Failed to delete %s %s: %s', desc, name, e)
+ log.error("Failed to delete %s %s: %s", desc, name, e)
return False
-def _create_resource(name, name_param=None, desc=None, res_type=None, wait=0, status_param=None,
- status_good='available', region=None, key=None, keyid=None, profile=None,
- **args):
+def _create_resource(
+ name,
+ name_param=None,
+ desc=None,
+ res_type=None,
+ wait=0,
+ status_param=None,
+ status_good="available",
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ **args
+):
try:
wait = int(wait)
except Exception: # pylint: disable=broad-except
- raise SaltInvocationError("Bad value ('{0}') passed for 'wait' param - must be an "
- "int or boolean.".format(wait))
+ raise SaltInvocationError(
+ "Bad value ('{0}') passed for 'wait' param - must be an "
+ "int or boolean.".format(wait)
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if name_param in args:
log.info(
"'name: %s' param being overridden by explicitly provided '%s: %s'",
- name, name_param, args[name_param]
+ name,
+ name_param,
+ args[name_param],
)
name = args[name_param]
else:
args[name_param] = name
- args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
+ args = dict([(k, v) for k, v in args.items() if not k.startswith("_")])
try:
- func = 'create_'+res_type
+ func = "create_" + res_type
f = getattr(conn, func)
if wait:
- func = 'describe_'+res_type+'s'
+ func = "describe_" + res_type + "s"
s = globals()[func]
except (AttributeError, KeyError) as e:
- raise SaltInvocationError("No function '{0}()' found: {1}".format(func, e.message))
+ raise SaltInvocationError(
+ "No function '{0}()' found: {1}".format(func, e.message)
+ )
try:
f(**args)
if not wait:
- log.info('%s %s created.', desc.title(), name)
+ log.info("%s %s created.", desc.title(), name)
return True
- log.info('Waiting up to %s seconds for %s %s to be become available.',
- wait, desc, name)
+ log.info(
+ "Waiting up to %s seconds for %s %s to be become available.",
+ wait,
+ desc,
+ name,
+ )
orig_wait = wait
while wait > 0:
r = s(name=name, conn=conn)
if r and r[0].get(status_param) == status_good:
- log.info('%s %s created and available.', desc.title(), name)
+ log.info("%s %s created and available.", desc.title(), name)
return True
sleep = wait if wait % 60 == wait else 60
- log.info('Sleeping %s seconds for %s %s to become available.',
- sleep, desc, name)
+ log.info(
+ "Sleeping %s seconds for %s %s to become available.", sleep, desc, name
+ )
time.sleep(sleep)
wait -= sleep
- log.error('%s %s not available after %s seconds!',
- desc.title(), name, orig_wait)
+ log.error(
+ "%s %s not available after %s seconds!", desc.title(), name, orig_wait
+ )
return False
except botocore.exceptions.ClientError as e:
- msg = 'Failed to create {0} {1}: {2}'.format(desc, name, e)
+ msg = "Failed to create {0} {1}: {2}".format(desc, name, e)
log.error(msg)
return False
-def _modify_resource(name, name_param=None, desc=None, res_type=None, wait=0, status_param=None,
- status_good='available', region=None, key=None, keyid=None, profile=None,
- **args):
+def _modify_resource(
+ name,
+ name_param=None,
+ desc=None,
+ res_type=None,
+ wait=0,
+ status_param=None,
+ status_good="available",
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ **args
+):
try:
wait = int(wait)
except Exception: # pylint: disable=broad-except
- raise SaltInvocationError("Bad value ('{0}') passed for 'wait' param - must be an "
- "int or boolean.".format(wait))
+ raise SaltInvocationError(
+ "Bad value ('{0}') passed for 'wait' param - must be an "
+ "int or boolean.".format(wait)
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if name_param in args:
log.info(
"'name: %s' param being overridden by explicitly provided '%s: %s'",
- name, name_param, args[name_param]
+ name,
+ name_param,
+ args[name_param],
)
name = args[name_param]
else:
args[name_param] = name
- args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
+ args = dict([(k, v) for k, v in args.items() if not k.startswith("_")])
try:
- func = 'modify_'+res_type
+ func = "modify_" + res_type
f = getattr(conn, func)
if wait:
- func = 'describe_'+res_type+'s'
+ func = "describe_" + res_type + "s"
s = globals()[func]
except (AttributeError, KeyError) as e:
- raise SaltInvocationError("No function '{0}()' found: {1}".format(func, e.message))
+ raise SaltInvocationError(
+ "No function '{0}()' found: {1}".format(func, e.message)
+ )
try:
f(**args)
if not wait:
- log.info('%s %s modification requested.', desc.title(), name)
+ log.info("%s %s modification requested.", desc.title(), name)
return True
- log.info('Waiting up to %s seconds for %s %s to be become available.',
- wait, desc, name)
+ log.info(
+ "Waiting up to %s seconds for %s %s to be become available.",
+ wait,
+ desc,
+ name,
+ )
orig_wait = wait
while wait > 0:
r = s(name=name, conn=conn)
if r and r[0].get(status_param) == status_good:
- log.info('%s %s modified and available.', desc.title(), name)
+ log.info("%s %s modified and available.", desc.title(), name)
return True
sleep = wait if wait % 60 == wait else 60
- log.info('Sleeping %s seconds for %s %s to become available.',
- sleep, desc, name)
+ log.info(
+ "Sleeping %s seconds for %s %s to become available.", sleep, desc, name
+ )
time.sleep(sleep)
wait -= sleep
- log.error('%s %s not available after %s seconds!',
- desc.title(), name, orig_wait)
+ log.error(
+ "%s %s not available after %s seconds!", desc.title(), name, orig_wait
+ )
return False
except botocore.exceptions.ClientError as e:
- msg = 'Failed to modify {0} {1}: {2}'.format(desc, name, e)
+ msg = "Failed to modify {0} {1}: {2}".format(desc, name, e)
log.error(msg)
return False
-def describe_cache_clusters(name=None, conn=None, region=None, key=None,
- keyid=None, profile=None, **args):
- '''
+def describe_cache_clusters(
+ name=None, conn=None, region=None, key=None, keyid=None, profile=None, **args
+):
+ """
Return details about all (or just one) Elasticache cache clusters.
Example:
@@ -290,14 +370,25 @@ def describe_cache_clusters(name=None, conn=None, region=None, key=None,
salt myminion boto3_elasticache.describe_cache_clusters
salt myminion boto3_elasticache.describe_cache_clusters myelasticache
- '''
- return _describe_resource(name=name, name_param='CacheClusterId', res_type='cache_cluster',
- info_node='CacheClusters', conn=conn, region=region, key=key,
- keyid=keyid, profile=profile, **args)
+ """
+ return _describe_resource(
+ name=name,
+ name_param="CacheClusterId",
+ res_type="cache_cluster",
+ info_node="CacheClusters",
+ conn=conn,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ **args
+ )
-def cache_cluster_exists(name, conn=None, region=None, key=None, keyid=None, profile=None):
- '''
+def cache_cluster_exists(
+ name, conn=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Check to see if a cache cluster exists.
Example:
@@ -305,13 +396,25 @@ def cache_cluster_exists(name, conn=None, region=None, key=None, keyid=None, pro
.. code-block:: bash
salt myminion boto3_elasticache.cache_cluster_exists myelasticache
- '''
- return bool(describe_cache_clusters(name=name, conn=conn, region=region, key=key, keyid=keyid, profile=profile))
+ """
+ return bool(
+ describe_cache_clusters(
+ name=name, conn=conn, region=region, key=key, keyid=keyid, profile=profile
+ )
+ )
-def create_cache_cluster(name, wait=600, security_groups=None,
- region=None, key=None, keyid=None, profile=None, **args):
- '''
+def create_cache_cluster(
+ name,
+ wait=600,
+ security_groups=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ **args
+):
+ """
Create a cache cluster.
Example:
@@ -324,24 +427,43 @@ def create_cache_cluster(name, wait=600, security_groups=None,
NumCacheNodes=1 \
SecurityGroupIds='[sg-11223344]' \
CacheSubnetGroupName=myCacheSubnetGroup
- '''
+ """
if security_groups:
if not isinstance(security_groups, list):
security_groups = [security_groups]
- sgs = __salt__['boto_secgroup.convert_to_group_ids'](groups=security_groups, region=region,
- key=key, keyid=keyid, profile=profile)
- if 'SecurityGroupIds' not in args:
- args['SecurityGroupIds'] = []
- args['SecurityGroupIds'] += sgs
- args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
- return _create_resource(name, name_param='CacheClusterId', desc='cache cluster',
- res_type='cache_cluster', wait=wait, status_param='CacheClusterStatus',
- region=region, key=key, keyid=keyid, profile=profile, **args)
+ sgs = __salt__["boto_secgroup.convert_to_group_ids"](
+ groups=security_groups, region=region, key=key, keyid=keyid, profile=profile
+ )
+ if "SecurityGroupIds" not in args:
+ args["SecurityGroupIds"] = []
+ args["SecurityGroupIds"] += sgs
+ args = dict([(k, v) for k, v in args.items() if not k.startswith("_")])
+ return _create_resource(
+ name,
+ name_param="CacheClusterId",
+ desc="cache cluster",
+ res_type="cache_cluster",
+ wait=wait,
+ status_param="CacheClusterStatus",
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ **args
+ )
-def modify_cache_cluster(name, wait=600, security_groups=None, region=None,
- key=None, keyid=None, profile=None, **args):
- '''
+def modify_cache_cluster(
+ name,
+ wait=600,
+ security_groups=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ **args
+):
+ """
Update a cache cluster in place.
Notes: {ApplyImmediately: False} is pretty danged silly in the context of salt.
@@ -359,23 +481,36 @@ def modify_cache_cluster(name, wait=600, security_groups=None, region=None,
salt myminion boto3_elasticache.create_cache_cluster name=myCacheCluster \
NotificationTopicStatus=inactive
- '''
+ """
if security_groups:
if not isinstance(security_groups, list):
security_groups = [security_groups]
- sgs = __salt__['boto_secgroup.convert_to_group_ids'](groups=security_groups, region=region,
- key=key, keyid=keyid, profile=profile)
- if 'SecurityGroupIds' not in args:
- args['SecurityGroupIds'] = []
- args['SecurityGroupIds'] += sgs
- args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
- return _modify_resource(name, name_param='CacheClusterId', desc='cache cluster',
- res_type='cache_cluster', wait=wait, status_param='CacheClusterStatus',
- region=region, key=key, keyid=keyid, profile=profile, **args)
+ sgs = __salt__["boto_secgroup.convert_to_group_ids"](
+ groups=security_groups, region=region, key=key, keyid=keyid, profile=profile
+ )
+ if "SecurityGroupIds" not in args:
+ args["SecurityGroupIds"] = []
+ args["SecurityGroupIds"] += sgs
+ args = dict([(k, v) for k, v in args.items() if not k.startswith("_")])
+ return _modify_resource(
+ name,
+ name_param="CacheClusterId",
+ desc="cache cluster",
+ res_type="cache_cluster",
+ wait=wait,
+ status_param="CacheClusterStatus",
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ **args
+ )
-def delete_cache_cluster(name, wait=600, region=None, key=None, keyid=None, profile=None, **args):
- '''
+def delete_cache_cluster(
+ name, wait=600, region=None, key=None, keyid=None, profile=None, **args
+):
+ """
Delete a cache cluster.
Example:
@@ -383,15 +518,26 @@ def delete_cache_cluster(name, wait=600, region=None, key=None, keyid=None, prof
.. code-block:: bash
salt myminion boto3_elasticache.delete myelasticache
- '''
- return _delete_resource(name, name_param='CacheClusterId', desc='cache cluster',
- res_type='cache_cluster', wait=wait,
- status_param='CacheClusterStatus',
- region=region, key=key, keyid=keyid, profile=profile, **args)
+ """
+ return _delete_resource(
+ name,
+ name_param="CacheClusterId",
+ desc="cache cluster",
+ res_type="cache_cluster",
+ wait=wait,
+ status_param="CacheClusterStatus",
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ **args
+ )
-def describe_replication_groups(name=None, conn=None, region=None, key=None, keyid=None, profile=None):
- '''
+def describe_replication_groups(
+ name=None, conn=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Return details about all (or just one) Elasticache replication groups.
Example:
@@ -400,14 +546,22 @@ def describe_replication_groups(name=None, conn=None, region=None, key=None, key
salt myminion boto3_elasticache.describe_replication_groups
salt myminion boto3_elasticache.describe_replication_groups myelasticache
- '''
- return _describe_resource(name=name, name_param='ReplicationGroupId',
- res_type='replication_group', info_node='ReplicationGroups',
- conn=conn, region=region, key=key, keyid=keyid, profile=profile)
+ """
+ return _describe_resource(
+ name=name,
+ name_param="ReplicationGroupId",
+ res_type="replication_group",
+ info_node="ReplicationGroups",
+ conn=conn,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
def replication_group_exists(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if a replication group exists.
Example:
@@ -415,14 +569,25 @@ def replication_group_exists(name, region=None, key=None, keyid=None, profile=No
.. code-block:: bash
salt myminion boto3_elasticache.replication_group_exists myelasticache
- '''
- return bool(describe_replication_groups(name=name, region=region, key=key, keyid=keyid,
- profile=profile))
+ """
+ return bool(
+ describe_replication_groups(
+ name=name, region=region, key=key, keyid=keyid, profile=profile
+ )
+ )
-def create_replication_group(name, wait=600, security_groups=None, region=None, key=None, keyid=None,
- profile=None, **args):
- '''
+def create_replication_group(
+ name,
+ wait=600,
+ security_groups=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ **args
+):
+ """
Create a replication group.
Params are extensive and variable - see
http://boto3.readthedocs.io/en/latest/reference/services/elasticache.html?#ElastiCache.Client.create_replication_group
@@ -435,24 +600,43 @@ def create_replication_group(name, wait=600, security_groups=None, region=None,
salt myminion boto3_elasticache.create_replication_group \
name=myelasticache \
ReplicationGroupDescription=description
- '''
+ """
if security_groups:
if not isinstance(security_groups, list):
security_groups = [security_groups]
- sgs = __salt__['boto_secgroup.convert_to_group_ids'](groups=security_groups, region=region,
- key=key, keyid=keyid, profile=profile)
- if 'SecurityGroupIds' not in args:
- args['SecurityGroupIds'] = []
- args['SecurityGroupIds'] += sgs
- args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
- return _create_resource(name, name_param='ReplicationGroupId', desc='replication group',
- res_type='replication_group', wait=wait, status_param='Status',
- region=region, key=key, keyid=keyid, profile=profile, **args)
+ sgs = __salt__["boto_secgroup.convert_to_group_ids"](
+ groups=security_groups, region=region, key=key, keyid=keyid, profile=profile
+ )
+ if "SecurityGroupIds" not in args:
+ args["SecurityGroupIds"] = []
+ args["SecurityGroupIds"] += sgs
+ args = dict([(k, v) for k, v in args.items() if not k.startswith("_")])
+ return _create_resource(
+ name,
+ name_param="ReplicationGroupId",
+ desc="replication group",
+ res_type="replication_group",
+ wait=wait,
+ status_param="Status",
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ **args
+ )
-def modify_replication_group(name, wait=600, security_groups=None, region=None, key=None, keyid=None,
- profile=None, **args):
- '''
+def modify_replication_group(
+ name,
+ wait=600,
+ security_groups=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ **args
+):
+ """
Modify a replication group.
Example:
@@ -462,23 +646,36 @@ def modify_replication_group(name, wait=600, security_groups=None, region=None,
salt myminion boto3_elasticache.modify_replication_group \
name=myelasticache \
ReplicationGroupDescription=newDescription
- '''
+ """
if security_groups:
if not isinstance(security_groups, list):
security_groups = [security_groups]
- sgs = __salt__['boto_secgroup.convert_to_group_ids'](groups=security_groups, region=region,
- key=key, keyid=keyid, profile=profile)
- if 'SecurityGroupIds' not in args:
- args['SecurityGroupIds'] = []
- args['SecurityGroupIds'] += sgs
- args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
- return _modify_resource(name, name_param='ReplicationGroupId', desc='replication group',
- res_type='replication_group', wait=wait, status_param='Status',
- region=region, key=key, keyid=keyid, profile=profile, **args)
+ sgs = __salt__["boto_secgroup.convert_to_group_ids"](
+ groups=security_groups, region=region, key=key, keyid=keyid, profile=profile
+ )
+ if "SecurityGroupIds" not in args:
+ args["SecurityGroupIds"] = []
+ args["SecurityGroupIds"] += sgs
+ args = dict([(k, v) for k, v in args.items() if not k.startswith("_")])
+ return _modify_resource(
+ name,
+ name_param="ReplicationGroupId",
+ desc="replication group",
+ res_type="replication_group",
+ wait=wait,
+ status_param="Status",
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ **args
+ )
-def delete_replication_group(name, wait=600, region=None, key=None, keyid=None, profile=None, **args):
- '''
+def delete_replication_group(
+ name, wait=600, region=None, key=None, keyid=None, profile=None, **args
+):
+ """
Delete an ElastiCache replication group, optionally taking a snapshot first.
Example:
@@ -486,14 +683,26 @@ def delete_replication_group(name, wait=600, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto3_elasticache.delete_replication_group my-replication-group
- '''
- return _delete_resource(name, name_param='ReplicationGroupId', desc='replication group',
- res_type='replication_group', wait=wait, status_param='Status',
- region=region, key=key, keyid=keyid, profile=profile, **args)
+ """
+ return _delete_resource(
+ name,
+ name_param="ReplicationGroupId",
+ desc="replication group",
+ res_type="replication_group",
+ wait=wait,
+ status_param="Status",
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ **args
+ )
-def describe_cache_subnet_groups(name=None, conn=None, region=None, key=None, keyid=None, profile=None):
- '''
+def describe_cache_subnet_groups(
+ name=None, conn=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Return details about all (or just one) Elasticache replication groups.
Example:
@@ -501,14 +710,22 @@ def describe_cache_subnet_groups(name=None, conn=None, region=None, key=None, ke
.. code-block:: bash
salt myminion boto3_elasticache.describe_cache_subnet_groups region=us-east-1
- '''
- return _describe_resource(name=name, name_param='CacheSubnetGroupName',
- res_type='cache_subnet_group', info_node='CacheSubnetGroups',
- conn=conn, region=region, key=key, keyid=keyid, profile=profile)
+ """
+ return _describe_resource(
+ name=name,
+ name_param="CacheSubnetGroupName",
+ res_type="cache_subnet_group",
+ info_node="CacheSubnetGroups",
+ conn=conn,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
def cache_subnet_group_exists(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if an ElastiCache subnet group exists.
Example:
@@ -516,12 +733,16 @@ def cache_subnet_group_exists(name, region=None, key=None, keyid=None, profile=N
.. code-block:: bash
salt myminion boto3_elasticache.cache_subnet_group_exists my-subnet-group
- '''
- return bool(describe_cache_subnet_groups(name=name, region=region, key=key, keyid=keyid, profile=profile))
+ """
+ return bool(
+ describe_cache_subnet_groups(
+ name=name, region=region, key=key, keyid=keyid, profile=profile
+ )
+ )
def list_cache_subnet_groups(region=None, key=None, keyid=None, profile=None):
- '''
+ """
Return a list of all cache subnet group names
Example:
@@ -529,13 +750,17 @@ def list_cache_subnet_groups(region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto3_elasticache.list_cache_subnet_groups region=us-east-1
- '''
- return [g['CacheSubnetGroupName'] for g in
- describe_cache_subnet_groups(None, region, key, keyid, profile)]
+ """
+ return [
+ g["CacheSubnetGroupName"]
+ for g in describe_cache_subnet_groups(None, region, key, keyid, profile)
+ ]
-def create_cache_subnet_group(name, subnets=None, region=None, key=None, keyid=None, profile=None, **args):
- '''
+def create_cache_subnet_group(
+ name, subnets=None, region=None, key=None, keyid=None, profile=None, **args
+):
+ """
Create an ElastiCache subnet group
Example:
@@ -545,35 +770,52 @@ def create_cache_subnet_group(name, subnets=None, region=None, key=None, keyid=N
salt myminion boto3_elasticache.create_cache_subnet_group name=my-subnet-group \
CacheSubnetGroupDescription="description" \
subnets='[myVPCSubnet1,myVPCSubnet2]'
- '''
+ """
if subnets:
- if 'SubnetIds' not in args:
- args['SubnetIds'] = []
+ if "SubnetIds" not in args:
+ args["SubnetIds"] = []
if not isinstance(subnets, list):
subnets = [subnets]
for subnet in subnets:
- if subnet.startswith('subnet-'):
+ if subnet.startswith("subnet-"):
# Moderately safe assumption... :) Will be caught further down if incorrect.
- args['SubnetIds'] += [subnet]
+ args["SubnetIds"] += [subnet]
continue
- sn = __salt__['boto_vpc.describe_subnets'](subnet_names=subnet, region=region, key=key,
- keyid=keyid, profile=profile).get('subnets')
+ sn = __salt__["boto_vpc.describe_subnets"](
+ subnet_names=subnet,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ).get("subnets")
if not sn:
raise SaltInvocationError(
- 'Could not resolve Subnet Name {0} to an ID.'.format(subnet))
+ "Could not resolve Subnet Name {0} to an ID.".format(subnet)
+ )
if len(sn) == 1:
- args['SubnetIds'] += [sn[0]['id']]
+ args["SubnetIds"] += [sn[0]["id"]]
elif len(sn) > 1:
raise CommandExecutionError(
- 'Subnet Name {0} returned more than one ID.'.format(subnet))
- args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
- return _create_resource(name, name_param='CacheSubnetGroupName', desc='cache subnet group',
- res_type='cache_subnet_group',
- region=region, key=key, keyid=keyid, profile=profile, **args)
+ "Subnet Name {0} returned more than one ID.".format(subnet)
+ )
+ args = dict([(k, v) for k, v in args.items() if not k.startswith("_")])
+ return _create_resource(
+ name,
+ name_param="CacheSubnetGroupName",
+ desc="cache subnet group",
+ res_type="cache_subnet_group",
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ **args
+ )
-def modify_cache_subnet_group(name, subnets=None, region=None, key=None, keyid=None, profile=None, **args):
- '''
+def modify_cache_subnet_group(
+ name, subnets=None, region=None, key=None, keyid=None, profile=None, **args
+):
+ """
Modify an ElastiCache subnet group
Example:
@@ -583,35 +825,51 @@ def modify_cache_subnet_group(name, subnets=None, region=None, key=None, keyid=N
salt myminion boto3_elasticache.modify_cache_subnet_group \
name=my-subnet-group \
subnets='[myVPCSubnet3]'
- '''
+ """
if subnets:
- if 'SubnetIds' not in args:
- args['SubnetIds'] = []
+ if "SubnetIds" not in args:
+ args["SubnetIds"] = []
if not isinstance(subnets, list):
subnets = [subnets]
for subnet in subnets:
- sn = __salt__['boto_vpc.describe_subnets'](subnet_names=subnet,
- region=region, key=key, keyid=keyid,
- profile=profile).get('subnets')
+ sn = __salt__["boto_vpc.describe_subnets"](
+ subnet_names=subnet,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ).get("subnets")
if len(sn) == 1:
- args['SubnetIds'] += [sn[0]['id']]
+ args["SubnetIds"] += [sn[0]["id"]]
elif len(sn) > 1:
raise CommandExecutionError(
- 'Subnet Name {0} returned more than one ID.'.format(subnet))
- elif subnet.startswith('subnet-'):
+ "Subnet Name {0} returned more than one ID.".format(subnet)
+ )
+ elif subnet.startswith("subnet-"):
# Moderately safe assumption... :) Will be caught later if incorrect.
- args['SubnetIds'] += [subnet]
+ args["SubnetIds"] += [subnet]
else:
raise SaltInvocationError(
- 'Could not resolve Subnet Name {0} to an ID.'.format(subnet))
- args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
- return _modify_resource(name, name_param='CacheSubnetGroupName', desc='cache subnet group',
- res_type='cache_subnet_group',
- region=region, key=key, keyid=keyid, profile=profile, **args)
+ "Could not resolve Subnet Name {0} to an ID.".format(subnet)
+ )
+ args = dict([(k, v) for k, v in args.items() if not k.startswith("_")])
+ return _modify_resource(
+ name,
+ name_param="CacheSubnetGroupName",
+ desc="cache subnet group",
+ res_type="cache_subnet_group",
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ **args
+ )
-def delete_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None, **args):
- '''
+def delete_cache_subnet_group(
+ name, region=None, key=None, keyid=None, profile=None, **args
+):
+ """
Delete an ElastiCache subnet group.
Example:
@@ -619,14 +877,24 @@ def delete_cache_subnet_group(name, region=None, key=None, keyid=None, profile=N
.. code-block:: bash
salt myminion boto3_elasticache.delete_subnet_group my-subnet-group region=us-east-1
- '''
- return _delete_resource(name, name_param='CacheSubnetGroupName',
- desc='cache subnet group', res_type='cache_subnet_group',
- region=region, key=key, keyid=keyid, profile=profile, **args)
+ """
+ return _delete_resource(
+ name,
+ name_param="CacheSubnetGroupName",
+ desc="cache subnet group",
+ res_type="cache_subnet_group",
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ **args
+ )
-def describe_cache_security_groups(name=None, conn=None, region=None, key=None, keyid=None, profile=None):
- '''
+def describe_cache_security_groups(
+ name=None, conn=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Return details about all (or just one) Elasticache cache clusters.
Example:
@@ -635,15 +903,22 @@ def describe_cache_security_groups(name=None, conn=None, region=None, key=None,
salt myminion boto3_elasticache.describe_cache_security_groups
salt myminion boto3_elasticache.describe_cache_security_groups mycachesecgrp
- '''
- return _describe_resource(name=name, name_param='CacheSecurityGroupName',
- res_type='cache_security_group',
- info_node='CacheSecurityGroups', conn=conn, region=region, key=key,
- keyid=keyid, profile=profile)
+ """
+ return _describe_resource(
+ name=name,
+ name_param="CacheSecurityGroupName",
+ res_type="cache_security_group",
+ info_node="CacheSecurityGroups",
+ conn=conn,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
def cache_security_group_exists(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if an ElastiCache security group exists.
Example:
@@ -651,12 +926,18 @@ def cache_security_group_exists(name, region=None, key=None, keyid=None, profile
.. code-block:: bash
salt myminion boto3_elasticache.cache_security_group_exists mysecuritygroup
- '''
- return bool(describe_cache_security_groups(name=name, region=region, key=key, keyid=keyid, profile=profile))
+ """
+ return bool(
+ describe_cache_security_groups(
+ name=name, region=region, key=key, keyid=keyid, profile=profile
+ )
+ )
-def create_cache_security_group(name, region=None, key=None, keyid=None, profile=None, **args):
- '''
+def create_cache_security_group(
+ name, region=None, key=None, keyid=None, profile=None, **args
+):
+ """
Create a cache security group.
Example:
@@ -664,14 +945,24 @@ def create_cache_security_group(name, region=None, key=None, keyid=None, profile
.. code-block:: bash
salt myminion boto3_elasticache.create_cache_security_group mycachesecgrp Description='My Cache Security Group'
- '''
- return _create_resource(name, name_param='CacheSecurityGroupName', desc='cache security group',
- res_type='cache_security_group',
- region=region, key=key, keyid=keyid, profile=profile, **args)
+ """
+ return _create_resource(
+ name,
+ name_param="CacheSecurityGroupName",
+ desc="cache security group",
+ res_type="cache_security_group",
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ **args
+ )
-def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None, **args):
- '''
+def delete_cache_security_group(
+ name, region=None, key=None, keyid=None, profile=None, **args
+):
+ """
Delete a cache security group.
Example:
@@ -679,14 +970,24 @@ def delete_cache_security_group(name, region=None, key=None, keyid=None, profile
.. code-block:: bash
salt myminion boto3_elasticache.delete_cache_security_group myelasticachesg
- '''
- return _delete_resource(name, name_param='CacheSecurityGroupName',
- desc='cache security group', res_type='cache_security_group',
- region=region, key=key, keyid=keyid, profile=profile, **args)
+ """
+ return _delete_resource(
+ name,
+ name_param="CacheSecurityGroupName",
+ desc="cache security group",
+ res_type="cache_security_group",
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ **args
+ )
-def authorize_cache_security_group_ingress(name, region=None, key=None, keyid=None, profile=None, **args):
- '''
+def authorize_cache_security_group_ingress(
+ name, region=None, key=None, keyid=None, profile=None, **args
+):
+ """
Authorize network ingress from an ec2 security group to a cache security group.
Example:
@@ -697,30 +998,36 @@ def authorize_cache_security_group_ingress(name, region=None, key=None, keyid=No
mycachesecgrp \
EC2SecurityGroupName=someEC2sg \
EC2SecurityGroupOwnerId=SOMEOWNERID
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- if 'CacheSecurityGroupName' in args:
+ if "CacheSecurityGroupName" in args:
log.info(
"'name: %s' param being overridden by explicitly provided "
"'CacheSecurityGroupName: %s'",
- name, args['CacheSecurityGroupName']
+ name,
+ args["CacheSecurityGroupName"],
)
- name = args['CacheSecurityGroupName']
+ name = args["CacheSecurityGroupName"]
else:
- args['CacheSubnetGroupName'] = name
- args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
+ args["CacheSubnetGroupName"] = name
+ args = dict([(k, v) for k, v in args.items() if not k.startswith("_")])
try:
conn.authorize_cache_security_group_ingress(**args)
- log.info('Authorized %s to cache security group %s.',
- args['EC2SecurityGroupName'], name)
+ log.info(
+ "Authorized %s to cache security group %s.",
+ args["EC2SecurityGroupName"],
+ name,
+ )
return True
except botocore.exceptions.ClientError as e:
- log.error('Failed to update security group %s: %s', name, e)
+ log.error("Failed to update security group %s: %s", name, e)
return False
-def revoke_cache_security_group_ingress(name, region=None, key=None, keyid=None, profile=None, **args):
- '''
+def revoke_cache_security_group_ingress(
+ name, region=None, key=None, keyid=None, profile=None, **args
+):
+ """
Revoke network ingress from an ec2 security group to a cache security
group.
@@ -732,30 +1039,36 @@ def revoke_cache_security_group_ingress(name, region=None, key=None, keyid=None,
mycachesecgrp \
EC2SecurityGroupName=someEC2sg \
EC2SecurityGroupOwnerId=SOMEOWNERID
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- if 'CacheSecurityGroupName' in args:
+ if "CacheSecurityGroupName" in args:
log.info(
"'name: %s' param being overridden by explicitly provided "
"'CacheSecurityGroupName: %s'",
- name, args['CacheSecurityGroupName']
+ name,
+ args["CacheSecurityGroupName"],
)
- name = args['CacheSecurityGroupName']
+ name = args["CacheSecurityGroupName"]
else:
- args['CacheSubnetGroupName'] = name
- args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
+ args["CacheSubnetGroupName"] = name
+ args = dict([(k, v) for k, v in args.items() if not k.startswith("_")])
try:
conn.revoke_cache_security_group_ingress(**args)
- log.info('Revoked %s from cache security group %s.',
- args['EC2SecurityGroupName'], name)
+ log.info(
+ "Revoked %s from cache security group %s.",
+ args["EC2SecurityGroupName"],
+ name,
+ )
return True
except botocore.exceptions.ClientError as e:
- log.error('Failed to update security group %s: %s', name, e)
+ log.error("Failed to update security group %s: %s", name, e)
return False
-def list_tags_for_resource(name, region=None, key=None, keyid=None, profile=None, **args):
- '''
+def list_tags_for_resource(
+ name, region=None, key=None, keyid=None, profile=None, **args
+):
+ """
List tags on an Elasticache resource.
Note that this function is essentially useless as it requires a full AWS ARN for the
@@ -771,29 +1084,31 @@ def list_tags_for_resource(name, region=None, key=None, keyid=None, profile=None
salt myminion boto3_elasticache.list_tags_for_resource \
name'=arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- if 'ResourceName' in args:
+ if "ResourceName" in args:
log.info(
"'name: %s' param being overridden by explicitly provided "
- "'ResourceName: %s'", name, args['ResourceName']
+ "'ResourceName: %s'",
+ name,
+ args["ResourceName"],
)
- name = args['ResourceName']
+ name = args["ResourceName"]
else:
- args['ResourceName'] = name
- args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
+ args["ResourceName"] = name
+ args = dict([(k, v) for k, v in args.items() if not k.startswith("_")])
try:
r = conn.list_tags_for_resource(**args)
- if r and 'Taglist' in r:
- return r['TagList']
+ if r and "Taglist" in r:
+ return r["TagList"]
return []
except botocore.exceptions.ClientError as e:
- log.error('Failed to list tags for resource %s: %s', name, e)
+ log.error("Failed to list tags for resource %s: %s", name, e)
return []
def add_tags_to_resource(name, region=None, key=None, keyid=None, profile=None, **args):
- '''
+ """
Add tags to an Elasticache resource.
Note that this function is essentially useless as it requires a full AWS ARN for the
@@ -810,28 +1125,32 @@ def add_tags_to_resource(name, region=None, key=None, keyid=None, profile=None,
salt myminion boto3_elasticache.add_tags_to_resource \
name'=arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot' \
Tags="[{'Key': 'TeamOwner', 'Value': 'infrastructure'}]"
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- if 'ResourceName' in args:
+ if "ResourceName" in args:
log.info(
"'name: %s' param being overridden by explicitly provided "
- "'ResourceName: %s'", name, args['ResourceName']
+ "'ResourceName: %s'",
+ name,
+ args["ResourceName"],
)
- name = args['ResourceName']
+ name = args["ResourceName"]
else:
- args['ResourceName'] = name
- args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
+ args["ResourceName"] = name
+ args = dict([(k, v) for k, v in args.items() if not k.startswith("_")])
try:
conn.add_tags_to_resource(**args)
- log.info('Added tags %s to %s.', args['Tags'], name)
+ log.info("Added tags %s to %s.", args["Tags"], name)
return True
except botocore.exceptions.ClientError as e:
- log.error('Failed to add tags to %s: %s', name, e)
+ log.error("Failed to add tags to %s: %s", name, e)
return False
-def remove_tags_from_resource(name, region=None, key=None, keyid=None, profile=None, **args):
- '''
+def remove_tags_from_resource(
+ name, region=None, key=None, keyid=None, profile=None, **args
+):
+ """
Remove tags from an Elasticache resource.
Note that this function is essentially useless as it requires a full AWS ARN for the
@@ -848,28 +1167,30 @@ def remove_tags_from_resource(name, region=None, key=None, keyid=None, profile=N
salt myminion boto3_elasticache.remove_tags_from_resource \
name'=arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot' \
TagKeys="['TeamOwner']"
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- if 'ResourceName' in args:
+ if "ResourceName" in args:
log.info(
"'name: %s' param being overridden by explicitly provided "
- "'ResourceName: %s'", name, args['ResourceName']
+ "'ResourceName: %s'",
+ name,
+ args["ResourceName"],
)
- name = args['ResourceName']
+ name = args["ResourceName"]
else:
- args['ResourceName'] = name
- args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
+ args["ResourceName"] = name
+ args = dict([(k, v) for k, v in args.items() if not k.startswith("_")])
try:
conn.remove_tags_from_resource(**args)
- log.info('Added tags %s to %s.', args['Tags'], name)
+ log.info("Added tags %s to %s.", args["Tags"], name)
return True
except botocore.exceptions.ClientError as e:
- log.error('Failed to add tags to %s: %s', name, e)
+ log.error("Failed to add tags to %s: %s", name, e)
return False
def copy_snapshot(name, region=None, key=None, keyid=None, profile=None, **args):
- '''
+ """
Make a copy of an existing snapshot.
Example:
@@ -878,29 +1199,32 @@ def copy_snapshot(name, region=None, key=None, keyid=None, profile=None, **args)
salt myminion boto3_elasticache.copy_snapshot name=mySnapshot \
TargetSnapshotName=copyOfMySnapshot
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- if 'SourceSnapshotName' in args:
+ if "SourceSnapshotName" in args:
log.info(
"'name: %s' param being overridden by explicitly provided "
- "'SourceSnapshotName: %s'", name, args['SourceSnapshotName']
+ "'SourceSnapshotName: %s'",
+ name,
+ args["SourceSnapshotName"],
)
- name = args['SourceSnapshotName']
+ name = args["SourceSnapshotName"]
else:
- args['SourceSnapshotName'] = name
- args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
+ args["SourceSnapshotName"] = name
+ args = dict([(k, v) for k, v in args.items() if not k.startswith("_")])
try:
conn.copy_snapshot(**args)
- log.info('Snapshot %s copied to %s.', name, args['TargetSnapshotName'])
+ log.info("Snapshot %s copied to %s.", name, args["TargetSnapshotName"])
return True
except botocore.exceptions.ClientError as e:
- log.error('Failed to copy snapshot %s: %s', name, e)
+ log.error("Failed to copy snapshot %s: %s", name, e)
return False
-def describe_cache_parameter_groups(name=None, conn=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def describe_cache_parameter_groups(
+ name=None, conn=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Return details about all (or just one) Elasticache cache clusters.
Example:
@@ -909,14 +1233,24 @@ def describe_cache_parameter_groups(name=None, conn=None, region=None, key=None,
salt myminion boto3_elasticache.describe_cache_parameter_groups
salt myminion boto3_elasticache.describe_cache_parameter_groups myParameterGroup
- '''
- return _describe_resource(name=name, name_param='CacheParameterGroupName',
- res_type='cache_parameter_group', info_node='CacheParameterGroups',
- conn=conn, region=region, key=key, keyid=keyid, profile=profile)
+ """
+ return _describe_resource(
+ name=name,
+ name_param="CacheParameterGroupName",
+ res_type="cache_parameter_group",
+ info_node="CacheParameterGroups",
+ conn=conn,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
-def create_cache_parameter_group(name, region=None, key=None, keyid=None, profile=None, **args):
- '''
+def create_cache_parameter_group(
+ name, region=None, key=None, keyid=None, profile=None, **args
+):
+ """
Create a cache parameter group.
Example:
@@ -927,14 +1261,24 @@ def create_cache_parameter_group(name, region=None, key=None, keyid=None, profil
name=myParamGroup \
CacheParameterGroupFamily=redis2.8 \
Description="My Parameter Group"
- '''
- return _create_resource(name, name_param='CacheParameterGroupName',
- desc='cache parameter group', res_type='cache_parameter_group',
- region=region, key=key, keyid=keyid, profile=profile, **args)
+ """
+ return _create_resource(
+ name,
+ name_param="CacheParameterGroupName",
+ desc="cache parameter group",
+ res_type="cache_parameter_group",
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ **args
+ )
-def delete_cache_parameter_group(name, region=None, key=None, keyid=None, profile=None, **args):
- '''
+def delete_cache_parameter_group(
+ name, region=None, key=None, keyid=None, profile=None, **args
+):
+ """
Delete a cache parameter group.
Example:
@@ -942,7 +1286,15 @@ def delete_cache_parameter_group(name, region=None, key=None, keyid=None, profil
.. code-block:: bash
salt myminion boto3_elasticache.delete_cache_parameter_group myParamGroup
- '''
- return _delete_resource(name, name_param='CacheParameterGroupName',
- desc='cache parameter group', res_type='cache_parameter_group',
- region=region, key=key, keyid=keyid, profile=profile, **args)
+ """
+ return _delete_resource(
+ name,
+ name_param="CacheParameterGroupName",
+ desc="cache parameter group",
+ res_type="cache_parameter_group",
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ **args
+ )
diff --git a/salt/modules/boto3_elasticsearch.py b/salt/modules/boto3_elasticsearch.py
index b50c128e834..7644afefdb7 100644
--- a/salt/modules/boto3_elasticsearch.py
+++ b/salt/modules/boto3_elasticsearch.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon Elasticsearch Service
.. versionadded:: Natrium
@@ -46,20 +46,22 @@ Connection module for Amazon Elasticsearch Service
:codeauthor: Herbert Buurman
:depends: boto3
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
-# Import Salt libs
-from salt.ext import six
import salt.utils.compat
import salt.utils.json
import salt.utils.versions
from salt.exceptions import SaltInvocationError
+
+# Import Salt libs
+from salt.ext import six
from salt.utils.decorators import depends
# Import third party libs
@@ -69,9 +71,11 @@ try:
# pylint: disable=unused-import
import boto3
import botocore
+
# pylint: enable=unused-import
from botocore.exceptions import ClientError, ParamValidationError, WaiterError
- logging.getLogger('boto3').setLevel(logging.INFO)
+
+ logging.getLogger("boto3").setLevel(logging.INFO)
except ImportError:
pass
@@ -79,25 +83,29 @@ log = logging.getLogger(__name__)
def __virtual__():
- '''
+ """
Only load if boto libraries exist and if boto libraries are greater than
a given version.
- '''
- return salt.utils.versions.check_boto_reqs(boto3_ver='1.2.7')
+ """
+ return salt.utils.versions.check_boto_reqs(boto3_ver="1.2.7")
def __init__(opts):
_ = opts
salt.utils.compat.pack_dunder(__name__)
- __utils__['boto3.assign_funcs'](__name__, 'es')
+ __utils__["boto3.assign_funcs"](__name__, "es")
def add_tags(
- domain_name=None,
- arn=None,
- tags=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+ domain_name=None,
+ arn=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Attaches tags to an existing Elasticsearch domain.
Tags are a set of case-sensitive key value pairs.
An Elasticsearch domain may have up to 10 tags.
@@ -118,39 +126,53 @@ def add_tags(
.. code-block:: bash
salt myminion boto3_elasticsearch.add_tags domain_name=mydomain tags='{"foo": "bar", "baz": "qux"}'
- '''
+ """
if not any((arn, domain_name)):
- raise SaltInvocationError('At least one of domain_name or arn must be specified.')
- ret = {'result': False}
+ raise SaltInvocationError(
+ "At least one of domain_name or arn must be specified."
+ )
+ ret = {"result": False}
if arn is None:
res = describe_elasticsearch_domain(
domain_name=domain_name,
- region=region, key=key, keyid=keyid, profile=profile)
- if 'error' in res:
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ if "error" in res:
ret.update(res)
- elif not res['result']:
- ret.update({'error': 'The domain with name "{}" does not exist.'.format(domain_name)})
+ elif not res["result"]:
+ ret.update(
+ {
+ "error": 'The domain with name "{}" does not exist.'.format(
+ domain_name
+ )
+ }
+ )
else:
- arn = res['response'].get('ARN')
+ arn = res["response"].get("ARN")
if arn:
boto_params = {
- 'ARN': arn,
- 'TagList': [{'Key': k, 'Value': value} for k, value in six.iteritems(tags or {})]
+ "ARN": arn,
+ "TagList": [
+ {"Key": k, "Value": value} for k, value in six.iteritems(tags or {})
+ ],
}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.add_tags(**boto_params)
- ret['result'] = True
+ ret["result"] = True
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-@depends('botocore', version='1.12.21')
+@depends("botocore", version="1.12.21")
def cancel_elasticsearch_service_software_update(
- domain_name,
- region=None, keyid=None, key=None, profile=None):
- '''
+ domain_name, region=None, keyid=None, key=None, profile=None
+):
+ """
Cancels a scheduled service software update for an Amazon ES domain. You can
only perform this operation before the AutomatedUpdateDate and when the UpdateStatus
is in the PENDING_UPDATE state.
@@ -165,34 +187,38 @@ def cancel_elasticsearch_service_software_update(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
+ """
+ ret = {"result": False}
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
res = conn.cancel_elasticsearch_service_software_update(DomainName=domain_name)
- ret['result'] = True
- res['response'] = res['ServiceSoftwareOptions']
+ ret["result"] = True
+ res["response"] = res["ServiceSoftwareOptions"]
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
def create_elasticsearch_domain(
- domain_name,
- elasticsearch_version=None,
- elasticsearch_cluster_config=None,
- ebs_options=None,
- access_policies=None,
- snapshot_options=None,
- vpc_options=None,
- cognito_options=None,
- encryption_at_rest_options=None,
- node_to_node_encryption_options=None,
- advanced_options=None,
- log_publishing_options=None,
- blocking=False,
- region=None, key=None, keyid=None, profile=None):
- '''
+ domain_name,
+ elasticsearch_version=None,
+ elasticsearch_cluster_config=None,
+ ebs_options=None,
+ access_policies=None,
+ snapshot_options=None,
+ vpc_options=None,
+ cognito_options=None,
+ encryption_at_rest_options=None,
+ node_to_node_encryption_options=None,
+ advanced_options=None,
+ log_publishing_options=None,
+ blocking=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid config, create a domain.
:param str domain_name: The name of the Elasticsearch domain that you are creating.
@@ -319,43 +345,48 @@ def create_elasticsearch_domain(
"Condition": {"IpAddress": {"aws:SourceIp": ["127.0.0.1"]}}}]}' \\
snapshot_options='{"AutomatedSnapshotStartHour": 0}' \\
advanced_options='{"rest.action.multi.allow_explicit_index": "true"}'
- '''
- boto_kwargs = salt.utils.data.filter_falsey({
- 'DomainName': domain_name,
- 'ElasticsearchVersion': six.text_type(elasticsearch_version or ''),
- 'ElasticsearchClusterConfig': elasticsearch_cluster_config,
- 'EBSOptions': ebs_options,
- 'AccessPolicies': (salt.utils.json.dumps(access_policies)
- if isinstance(access_policies, dict)
- else access_policies),
- 'SnapshotOptions': snapshot_options,
- 'VPCOptions': vpc_options,
- 'CognitoOptions': cognito_options,
- 'EncryptionAtRestOptions': encryption_at_rest_options,
- 'NodeToNodeEncryptionOptions': node_to_node_encryption_options,
- 'AdvancedOptions': advanced_options,
- 'LogPublishingOptions': log_publishing_options,
- })
- ret = {'result': False}
+ """
+ boto_kwargs = salt.utils.data.filter_falsey(
+ {
+ "DomainName": domain_name,
+ "ElasticsearchVersion": six.text_type(elasticsearch_version or ""),
+ "ElasticsearchClusterConfig": elasticsearch_cluster_config,
+ "EBSOptions": ebs_options,
+ "AccessPolicies": (
+ salt.utils.json.dumps(access_policies)
+ if isinstance(access_policies, dict)
+ else access_policies
+ ),
+ "SnapshotOptions": snapshot_options,
+ "VPCOptions": vpc_options,
+ "CognitoOptions": cognito_options,
+ "EncryptionAtRestOptions": encryption_at_rest_options,
+ "NodeToNodeEncryptionOptions": node_to_node_encryption_options,
+ "AdvancedOptions": advanced_options,
+ "LogPublishingOptions": log_publishing_options,
+ }
+ )
+ ret = {"result": False}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
res = conn.create_elasticsearch_domain(**boto_kwargs)
- if res and 'DomainStatus' in res:
- ret['result'] = True
- ret['response'] = res['DomainStatus']
+ if res and "DomainStatus" in res:
+ ret["result"] = True
+ ret["response"] = res["DomainStatus"]
if blocking:
- waiter = __utils__['boto3_elasticsearch.get_waiter'](conn, waiter='ESDomainAvailable')
+ waiter = __utils__["boto3_elasticsearch.get_waiter"](
+ conn, waiter="ESDomainAvailable"
+ )
waiter.wait(DomainName=domain_name)
except (ParamValidationError, ClientError, WaiterError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
def delete_elasticsearch_domain(
- domain_name,
- blocking=False,
- region=None, key=None, keyid=None, profile=None):
- '''
+ domain_name, blocking=False, region=None, key=None, keyid=None, profile=None
+):
+ """
Permanently deletes the specified Elasticsearch domain and all of its data.
Once a domain is deleted, it cannot be recovered.
@@ -369,24 +400,25 @@ def delete_elasticsearch_domain(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
+ """
+ ret = {"result": False}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_elasticsearch_domain(DomainName=domain_name)
- ret['result'] = True
+ ret["result"] = True
if blocking:
- waiter = __utils__['boto3_elasticsearch.get_waiter'](conn, waiter='ESDomainDeleted')
+ waiter = __utils__["boto3_elasticsearch.get_waiter"](
+ conn, waiter="ESDomainDeleted"
+ )
waiter.wait(DomainName=domain_name)
except (ParamValidationError, ClientError, WaiterError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-@depends('botocore', version='1.7.30')
-def delete_elasticsearch_service_role(
- region=None, keyid=None, key=None, profile=None):
- '''
+@depends("botocore", version="1.7.30")
+def delete_elasticsearch_service_role(region=None, keyid=None, key=None, profile=None):
+ """
Deletes the service-linked role that Elasticsearch Service uses to manage and
maintain VPC domains. Role deletion will fail if any existing VPC domains use
the role. You must delete any such Elasticsearch domains before deleting the role.
@@ -397,21 +429,21 @@ def delete_elasticsearch_service_role(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
+ """
+ ret = {"result": False}
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
conn.delete_elasticsearch_service_role()
- ret['result'] = True
+ ret["result"] = True
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
def describe_elasticsearch_domain(
- domain_name,
- region=None, keyid=None, key=None, profile=None):
- '''
+ domain_name, region=None, keyid=None, key=None, profile=None
+):
+ """
Given a domain name gets its status description.
:param str domain_name: The name of the domain to get the status of.
@@ -423,23 +455,23 @@ def describe_elasticsearch_domain(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
+ """
+ ret = {"result": False}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
res = conn.describe_elasticsearch_domain(DomainName=domain_name)
- if res and 'DomainStatus' in res:
- ret['result'] = True
- ret['response'] = res['DomainStatus']
+ if res and "DomainStatus" in res:
+ ret["result"] = True
+ ret["response"] = res["DomainStatus"]
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
def describe_elasticsearch_domain_config(
- domain_name,
- region=None, keyid=None, key=None, profile=None):
- '''
+ domain_name, region=None, keyid=None, key=None, profile=None
+):
+ """
Provides cluster configuration information about the specified Elasticsearch domain,
such as the state, creation date, update version, and update date for cluster options.
@@ -452,23 +484,23 @@ def describe_elasticsearch_domain_config(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
+ """
+ ret = {"result": False}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
res = conn.describe_elasticsearch_domain_config(DomainName=domain_name)
- if res and 'DomainConfig' in res:
- ret['result'] = True
- ret['response'] = res['DomainConfig']
+ if res and "DomainConfig" in res:
+ ret["result"] = True
+ ret["response"] = res["DomainConfig"]
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
def describe_elasticsearch_domains(
- domain_names,
- region=None, keyid=None, key=None, profile=None):
- '''
+ domain_names, region=None, keyid=None, key=None, profile=None
+):
+ """
Returns domain configuration information about the specified Elasticsearch
domains, including the domain ID, domain endpoint, and domain ARN.
@@ -486,26 +518,30 @@ def describe_elasticsearch_domains(
.. code-block:: bash
salt myminion boto3_elasticsearch.describe_elasticsearch_domains '["domain_a", "domain_b"]'
- '''
- ret = {'result': False}
+ """
+ ret = {"result": False}
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
res = conn.describe_elasticsearch_domains(DomainNames=domain_names)
- if res and 'DomainStatusList' in res:
- ret['result'] = True
- ret['response'] = res['DomainStatusList']
+ if res and "DomainStatusList" in res:
+ ret["result"] = True
+ ret["response"] = res["DomainStatusList"]
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-@depends('botocore', version='1.5.18')
+@depends("botocore", version="1.5.18")
def describe_elasticsearch_instance_type_limits(
- instance_type,
- elasticsearch_version,
- domain_name=None,
- region=None, keyid=None, key=None, profile=None):
- '''
+ instance_type,
+ elasticsearch_version,
+ domain_name=None,
+ region=None,
+ keyid=None,
+ key=None,
+ profile=None,
+):
+ """
Describe Elasticsearch Limits for a given InstanceType and ElasticsearchVersion.
When modifying existing Domain, specify the `` DomainName `` to know what Limits
are supported for modifying.
@@ -532,29 +568,35 @@ def describe_elasticsearch_instance_type_limits(
salt myminion boto3_elasticsearch.describe_elasticsearch_instance_type_limits \\
instance_type=r3.8xlarge.elasticsearch \\
elasticsearch_version='6.2'
- '''
- ret = {'result': False}
- boto_params = salt.utils.data.filter_falsey({
- 'DomainName': domain_name,
- 'InstanceType': instance_type,
- 'ElasticsearchVersion': six.text_type(elasticsearch_version),
- })
+ """
+ ret = {"result": False}
+ boto_params = salt.utils.data.filter_falsey(
+ {
+ "DomainName": domain_name,
+ "InstanceType": instance_type,
+ "ElasticsearchVersion": six.text_type(elasticsearch_version),
+ }
+ )
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
res = conn.describe_elasticsearch_instance_type_limits(**boto_params)
- if res and 'LimitsByRole' in res:
- ret['result'] = True
- ret['response'] = res['LimitsByRole']
+ if res and "LimitsByRole" in res:
+ ret["result"] = True
+ ret["response"] = res["LimitsByRole"]
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-@depends('botocore', version='1.10.15')
+@depends("botocore", version="1.10.15")
def describe_reserved_elasticsearch_instance_offerings(
- reserved_elasticsearch_instance_offering_id=None,
- region=None, keyid=None, key=None, profile=None):
- '''
+ reserved_elasticsearch_instance_offering_id=None,
+ region=None,
+ keyid=None,
+ key=None,
+ profile=None,
+):
+ """
Lists available reserved Elasticsearch instance offerings.
:param str reserved_elasticsearch_instance_offering_id: The offering identifier
@@ -568,31 +610,35 @@ def describe_reserved_elasticsearch_instance_offerings(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
+ """
+ ret = {"result": False}
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
boto_params = {
- 'ReservedElasticsearchInstanceOfferingId': reserved_elasticsearch_instance_offering_id
+ "ReservedElasticsearchInstanceOfferingId": reserved_elasticsearch_instance_offering_id
}
res = []
for page in conn.get_paginator(
- 'describe_reserved_elasticsearch_instance_offerings'
- ).paginate(**boto_params):
- res.extend(page['ReservedElasticsearchInstanceOfferings'])
+ "describe_reserved_elasticsearch_instance_offerings"
+ ).paginate(**boto_params):
+ res.extend(page["ReservedElasticsearchInstanceOfferings"])
if res:
- ret['result'] = True
- ret['response'] = res
+ ret["result"] = True
+ ret["response"] = res
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-@depends('botocore', version='1.10.15')
+@depends("botocore", version="1.10.15")
def describe_reserved_elasticsearch_instances(
- reserved_elasticsearch_instance_id=None,
- region=None, keyid=None, key=None, profile=None):
- '''
+ reserved_elasticsearch_instance_id=None,
+ region=None,
+ keyid=None,
+ key=None,
+ profile=None,
+):
+ """
Returns information about reserved Elasticsearch instances for this account.
:param str reserved_elasticsearch_instance_id: The reserved instance identifier
@@ -610,31 +656,31 @@ def describe_reserved_elasticsearch_instances(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
+ """
+ ret = {"result": False}
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
boto_params = {
- 'ReservedElasticsearchInstanceId': reserved_elasticsearch_instance_id,
+ "ReservedElasticsearchInstanceId": reserved_elasticsearch_instance_id,
}
res = []
for page in conn.get_paginator(
- 'describe_reserved_elasticsearch_instances'
- ).paginate(**boto_params):
- res.extend(page['ReservedElasticsearchInstances'])
+ "describe_reserved_elasticsearch_instances"
+ ).paginate(**boto_params):
+ res.extend(page["ReservedElasticsearchInstances"])
if res:
- ret['result'] = True
- ret['response'] = res
+ ret["result"] = True
+ ret["response"] = res
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-@depends('botocore', version='1.10.77')
+@depends("botocore", version="1.10.77")
def get_compatible_elasticsearch_versions(
- domain_name=None,
- region=None, keyid=None, key=None, profile=None):
- '''
+ domain_name=None, region=None, keyid=None, key=None, profile=None
+):
+ """
Returns a list of upgrade compatible Elastisearch versions. You can optionally
pass a ``domain_name`` to get all upgrade compatible Elasticsearch versions
for that specific domain.
@@ -648,27 +694,23 @@ def get_compatible_elasticsearch_versions(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
- boto_params = salt.utils.data.filter_falsey({
- 'DomainName': domain_name,
- })
+ """
+ ret = {"result": False}
+ boto_params = salt.utils.data.filter_falsey({"DomainName": domain_name})
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
res = conn.get_compatible_elasticsearch_versions(**boto_params)
- if res and 'CompatibleElasticsearchVersions' in res:
- ret['result'] = True
- ret['response'] = res['CompatibleElasticsearchVersions']
+ if res and "CompatibleElasticsearchVersions" in res:
+ ret["result"] = True
+ ret["response"] = res["CompatibleElasticsearchVersions"]
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-@depends('botocore', version='1.10.77')
-def get_upgrade_history(
- domain_name,
- region=None, keyid=None, key=None, profile=None):
- '''
+@depends("botocore", version="1.10.77")
+def get_upgrade_history(domain_name, region=None, keyid=None, key=None, profile=None):
+ """
Retrieves the complete history of the last 10 upgrades that were performed on the domain.
:param str domain_name: The name of an Elasticsearch domain. Domain names are
@@ -683,27 +725,25 @@ def get_upgrade_history(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
+ """
+ ret = {"result": False}
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
- boto_params = {'DomainName': domain_name}
+ boto_params = {"DomainName": domain_name}
res = []
- for page in conn.get_paginator('get_upgrade_history').paginate(**boto_params):
- res.extend(page['UpgradeHistories'])
+ for page in conn.get_paginator("get_upgrade_history").paginate(**boto_params):
+ res.extend(page["UpgradeHistories"])
if res:
- ret['result'] = True
- ret['response'] = res
+ ret["result"] = True
+ ret["response"] = res
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-@depends('botocore', version='1.10.77')
-def get_upgrade_status(
- domain_name,
- region=None, keyid=None, key=None, profile=None):
- '''
+@depends("botocore", version="1.10.77")
+def get_upgrade_status(domain_name, region=None, keyid=None, key=None, profile=None):
+ """
Retrieves the latest status of the last upgrade or upgrade eligibility check
that was performed on the domain.
@@ -719,23 +759,22 @@ def get_upgrade_status(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
- boto_params = {'DomainName': domain_name}
+ """
+ ret = {"result": False}
+ boto_params = {"DomainName": domain_name}
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
res = conn.get_upgrade_status(**boto_params)
- ret['result'] = True
- ret['response'] = res
- del res['ResponseMetadata']
+ ret["result"] = True
+ ret["response"] = res
+ del res["ResponseMetadata"]
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-def list_domain_names(
- region=None, keyid=None, key=None, profile=None):
- '''
+def list_domain_names(region=None, keyid=None, key=None, profile=None):
+ """
Returns the name of all Elasticsearch domains owned by the current user's account.
:rtype: dict
@@ -745,25 +784,29 @@ def list_domain_names(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
+ """
+ ret = {"result": False}
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
res = conn.list_domain_names()
- if res and 'DomainNames' in res:
- ret['result'] = True
- ret['response'] = [item['DomainName'] for item in res['DomainNames']]
+ if res and "DomainNames" in res:
+ ret["result"] = True
+ ret["response"] = [item["DomainName"] for item in res["DomainNames"]]
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-@depends('botocore', version='1.5.18')
+@depends("botocore", version="1.5.18")
def list_elasticsearch_instance_types(
- elasticsearch_version,
- domain_name=None,
- region=None, keyid=None, key=None, profile=None):
- '''
+ elasticsearch_version,
+ domain_name=None,
+ region=None,
+ keyid=None,
+ key=None,
+ profile=None,
+):
+ """
List all Elasticsearch instance types that are supported for given ElasticsearchVersion.
:param str elasticsearch_version: Version of Elasticsearch for which list of
@@ -779,29 +822,32 @@ def list_elasticsearch_instance_types(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
+ """
+ ret = {"result": False}
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
- boto_params = salt.utils.data.filter_falsey({
- 'ElasticsearchVersion': six.text_type(elasticsearch_version),
- 'DomainName': domain_name,
- })
+ boto_params = salt.utils.data.filter_falsey(
+ {
+ "ElasticsearchVersion": six.text_type(elasticsearch_version),
+ "DomainName": domain_name,
+ }
+ )
res = []
- for page in conn.get_paginator('list_elasticsearch_instance_types').paginate(**boto_params):
- res.extend(page['ElasticsearchInstanceTypes'])
+ for page in conn.get_paginator("list_elasticsearch_instance_types").paginate(
+ **boto_params
+ ):
+ res.extend(page["ElasticsearchInstanceTypes"])
if res:
- ret['result'] = True
- ret['response'] = res
+ ret["result"] = True
+ ret["response"] = res
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-@depends('botocore', version='1.5.18')
-def list_elasticsearch_versions(
- region=None, keyid=None, key=None, profile=None):
- '''
+@depends("botocore", version="1.5.18")
+def list_elasticsearch_versions(region=None, keyid=None, key=None, profile=None):
+ """
List all supported Elasticsearch versions.
:rtype: dict
@@ -811,26 +857,25 @@ def list_elasticsearch_versions(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
+ """
+ ret = {"result": False}
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
res = []
- for page in conn.get_paginator('list_elasticsearch_versions').paginate():
- res.extend(page['ElasticsearchVersions'])
+ for page in conn.get_paginator("list_elasticsearch_versions").paginate():
+ res.extend(page["ElasticsearchVersions"])
if res:
- ret['result'] = True
- ret['response'] = res
+ ret["result"] = True
+ ret["response"] = res
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
def list_tags(
- domain_name=None,
- arn=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+ domain_name=None, arn=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Returns all tags for the given Elasticsearch domain.
:rtype: dict
@@ -840,38 +885,56 @@ def list_tags(
.. versionadded:: Natrium
- '''
+ """
if not any((arn, domain_name)):
- raise SaltInvocationError('At least one of domain_name or arn must be specified.')
- ret = {'result': False}
+ raise SaltInvocationError(
+ "At least one of domain_name or arn must be specified."
+ )
+ ret = {"result": False}
if arn is None:
res = describe_elasticsearch_domain(
domain_name=domain_name,
- region=region, key=key, keyid=keyid, profile=profile)
- if 'error' in res:
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ if "error" in res:
ret.update(res)
- elif not res['result']:
- ret.update({'error': 'The domain with name "{}" does not exist.'.format(domain_name)})
+ elif not res["result"]:
+ ret.update(
+ {
+ "error": 'The domain with name "{}" does not exist.'.format(
+ domain_name
+ )
+ }
+ )
else:
- arn = res['response'].get('ARN')
+ arn = res["response"].get("ARN")
if arn:
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
res = conn.list_tags(ARN=arn)
- ret['result'] = True
- ret['response'] = {item['Key']: item['Value'] for item in res.get('TagList', [])}
+ ret["result"] = True
+ ret["response"] = {
+ item["Key"]: item["Value"] for item in res.get("TagList", [])
+ }
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-@depends('botocore', version='1.10.15')
+@depends("botocore", version="1.10.15")
def purchase_reserved_elasticsearch_instance_offering(
- reserved_elasticsearch_instance_offering_id,
- reservation_name,
- instance_count=None,
- region=None, keyid=None, key=None, profile=None):
- '''
+ reserved_elasticsearch_instance_offering_id,
+ reservation_name,
+ instance_count=None,
+ region=None,
+ keyid=None,
+ key=None,
+ profile=None,
+):
+ """
Allows you to purchase reserved Elasticsearch instances.
:param str reserved_elasticsearch_instance_offering_id: The ID of the reserved
@@ -886,30 +949,36 @@ def purchase_reserved_elasticsearch_instance_offering(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
- boto_params = salt.utils.data.filter_falsey({
- 'ReservedElasticsearchInstanceOfferingId': reserved_elasticsearch_instance_offering_id,
- 'ReservationName': reservation_name,
- 'InstanceCount': instance_count,
- })
+ """
+ ret = {"result": False}
+ boto_params = salt.utils.data.filter_falsey(
+ {
+ "ReservedElasticsearchInstanceOfferingId": reserved_elasticsearch_instance_offering_id,
+ "ReservationName": reservation_name,
+ "InstanceCount": instance_count,
+ }
+ )
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
res = conn.purchase_reserved_elasticsearch_instance_offering(**boto_params)
if res:
- ret['result'] = True
- ret['response'] = res
+ ret["result"] = True
+ ret["response"] = res
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
def remove_tags(
- tag_keys,
- domain_name=None,
- arn=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+ tag_keys,
+ domain_name=None,
+ arn=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Removes the specified set of tags from the specified Elasticsearch domain.
:param list tag_keys: List with tag keys you want to remove from the Elasticsearch domain.
@@ -928,36 +997,47 @@ def remove_tags(
.. code-block:: bash
salt myminion boto3_elasticsearch.remove_tags '["foo", "bar"]' domain_name=my_domain
- '''
+ """
if not any((arn, domain_name)):
- raise SaltInvocationError('At least one of domain_name or arn must be specified.')
- ret = {'result': False}
+ raise SaltInvocationError(
+ "At least one of domain_name or arn must be specified."
+ )
+ ret = {"result": False}
if arn is None:
res = describe_elasticsearch_domain(
domain_name=domain_name,
- region=region, key=key, keyid=keyid, profile=profile)
- if 'error' in res:
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ if "error" in res:
ret.update(res)
- elif not res['result']:
- ret.update({'error': 'The domain with name "{}" does not exist.'.format(domain_name)})
+ elif not res["result"]:
+ ret.update(
+ {
+ "error": 'The domain with name "{}" does not exist.'.format(
+ domain_name
+ )
+ }
+ )
else:
- arn = res['response'].get('ARN')
+ arn = res["response"].get("ARN")
if arn:
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- conn.remove_tags(ARN=arn,
- TagKeys=tag_keys)
- ret['result'] = True
+ conn.remove_tags(ARN=arn, TagKeys=tag_keys)
+ ret["result"] = True
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-@depends('botocore', version='1.12.21')
+@depends("botocore", version="1.12.21")
def start_elasticsearch_service_software_update(
- domain_name,
- region=None, keyid=None, key=None, profile=None):
- '''
+ domain_name, region=None, keyid=None, key=None, profile=None
+):
+ """
Schedules a service software update for an Amazon ES domain.
:param str domain_name: The name of the domain that you want to update to the
@@ -970,33 +1050,37 @@ def start_elasticsearch_service_software_update(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
- boto_params = {'DomainName': domain_name}
+ """
+ ret = {"result": False}
+ boto_params = {"DomainName": domain_name}
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
res = conn.start_elasticsearch_service_software_update(**boto_params)
- if res and 'ServiceSoftwareOptions' in res:
- ret['result'] = True
- ret['response'] = res['ServiceSoftwareOptions']
+ if res and "ServiceSoftwareOptions" in res:
+ ret["result"] = True
+ ret["response"] = res["ServiceSoftwareOptions"]
except (ParamValidationError, ClientError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
def update_elasticsearch_domain_config(
- domain_name,
- elasticsearch_cluster_config=None,
- ebs_options=None,
- vpc_options=None,
- access_policies=None,
- snapshot_options=None,
- cognito_options=None,
- advanced_options=None,
- log_publishing_options=None,
- blocking=False,
- region=None, key=None, keyid=None, profile=None):
- '''
+ domain_name,
+ elasticsearch_cluster_config=None,
+ ebs_options=None,
+ vpc_options=None,
+ access_policies=None,
+ snapshot_options=None,
+ cognito_options=None,
+ advanced_options=None,
+ log_publishing_options=None,
+ blocking=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Modifies the cluster configuration of the specified Elasticsearch domain,
for example setting the instance type and the number of instances.
@@ -1102,45 +1186,55 @@ def update_elasticsearch_domain_config(
"Condition": {"IpAddress": {"aws:SourceIp": ["127.0.0.1"]}}}]}' \\
snapshot_options='{"AutomatedSnapshotStartHour": 0}' \\
advanced_options='{"rest.action.multi.allow_explicit_index": "true"}'
- '''
- ret = {'result': False}
- boto_kwargs = salt.utils.data.filter_falsey({
- 'DomainName': domain_name,
- 'ElasticsearchClusterConfig': elasticsearch_cluster_config,
- 'EBSOptions': ebs_options,
- 'SnapshotOptions': snapshot_options,
- 'VPCOptions': vpc_options,
- 'CognitoOptions': cognito_options,
- 'AdvancedOptions': advanced_options,
- 'AccessPolicies': (salt.utils.json.dumps(access_policies)
- if isinstance(access_policies, dict)
- else access_policies),
- 'LogPublishingOptions': log_publishing_options,
- })
+ """
+ ret = {"result": False}
+ boto_kwargs = salt.utils.data.filter_falsey(
+ {
+ "DomainName": domain_name,
+ "ElasticsearchClusterConfig": elasticsearch_cluster_config,
+ "EBSOptions": ebs_options,
+ "SnapshotOptions": snapshot_options,
+ "VPCOptions": vpc_options,
+ "CognitoOptions": cognito_options,
+ "AdvancedOptions": advanced_options,
+ "AccessPolicies": (
+ salt.utils.json.dumps(access_policies)
+ if isinstance(access_policies, dict)
+ else access_policies
+ ),
+ "LogPublishingOptions": log_publishing_options,
+ }
+ )
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
res = conn.update_elasticsearch_domain_config(**boto_kwargs)
- if not res or 'DomainConfig' not in res:
- log.warning('Domain was not updated')
+ if not res or "DomainConfig" not in res:
+ log.warning("Domain was not updated")
else:
- ret['result'] = True
- ret['response'] = res['DomainConfig']
+ ret["result"] = True
+ ret["response"] = res["DomainConfig"]
if blocking:
- waiter = __utils__['boto3_elasticsearch.get_waiter'](conn, waiter='ESDomainAvailable')
+ waiter = __utils__["boto3_elasticsearch.get_waiter"](
+ conn, waiter="ESDomainAvailable"
+ )
waiter.wait(DomainName=domain_name)
except (ParamValidationError, ClientError, WaiterError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-@depends('botocore', version='1.10.77')
+@depends("botocore", version="1.10.77")
def upgrade_elasticsearch_domain(
- domain_name,
- target_version,
- perform_check_only=None,
- blocking=False,
- region=None, keyid=None, key=None, profile=None):
- '''
+ domain_name,
+ target_version,
+ perform_check_only=None,
+ blocking=False,
+ region=None,
+ keyid=None,
+ key=None,
+ profile=None,
+):
+ """
Allows you to either upgrade your domain or perform an Upgrade eligibility
check to a compatible Elasticsearch version.
@@ -1170,31 +1264,33 @@ def upgrade_elasticsearch_domain(
salt myminion boto3_elasticsearch.upgrade_elasticsearch_domain mydomain \\
target_version='6.7' \\
perform_check_only=True
- '''
- ret = {'result': False}
- boto_params = salt.utils.data.filter_falsey({
- 'DomainName': domain_name,
- 'TargetVersion': six.text_type(target_version),
- 'PerformCheckOnly': perform_check_only,
- })
+ """
+ ret = {"result": False}
+ boto_params = salt.utils.data.filter_falsey(
+ {
+ "DomainName": domain_name,
+ "TargetVersion": six.text_type(target_version),
+ "PerformCheckOnly": perform_check_only,
+ }
+ )
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
res = conn.upgrade_elasticsearch_domain(**boto_params)
if res:
- ret['result'] = True
- ret['response'] = res
+ ret["result"] = True
+ ret["response"] = res
if blocking:
- waiter = __utils__['boto3_elasticsearch.get_waiter'](conn, waiter='ESUpgradeFinished')
+ waiter = __utils__["boto3_elasticsearch.get_waiter"](
+ conn, waiter="ESUpgradeFinished"
+ )
waiter.wait(DomainName=domain_name)
except (ParamValidationError, ClientError, WaiterError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-def exists(
- domain_name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def exists(domain_name, region=None, key=None, keyid=None, profile=None):
+ """
Given a domain name, check to see if the given domain exists.
:param str domain_name: The name of the domain to check.
@@ -1205,22 +1301,20 @@ def exists(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
+ """
+ ret = {"result": False}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.describe_elasticsearch_domain(DomainName=domain_name)
- ret['result'] = True
+ ret["result"] = True
except (ParamValidationError, ClientError) as exp:
- if exp.response.get('Error', {}).get('Code') != 'ResourceNotFoundException':
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ if exp.response.get("Error", {}).get("Code") != "ResourceNotFoundException":
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-def wait_for_upgrade(
- domain_name,
- region=None, keyid=None, key=None, profile=None):
- '''
+def wait_for_upgrade(domain_name, region=None, keyid=None, key=None, profile=None):
+ """
Block until an upgrade-in-progress for domain ``name`` is finished.
:param str name: The name of the domain to wait for.
@@ -1231,24 +1325,25 @@ def wait_for_upgrade(
.. versionadded:: Natrium
- '''
- ret = {'result': False}
+ """
+ ret = {"result": False}
try:
conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile)
- waiter = __utils__['boto3_elasticsearch.get_waiter'](conn, waiter='ESUpgradeFinished')
+ waiter = __utils__["boto3_elasticsearch.get_waiter"](
+ conn, waiter="ESUpgradeFinished"
+ )
waiter.wait(DomainName=domain_name)
- ret['result'] = True
+ ret["result"] = True
except (ParamValidationError, ClientError, WaiterError) as exp:
- ret.update({'error': __utils__['boto3.get_error'](exp)['message']})
+ ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
-@depends('botocore', version='1.10.77')
+@depends("botocore", version="1.10.77")
def check_upgrade_eligibility(
- domain_name,
- elasticsearch_version,
- region=None, keyid=None, key=None, profile=None):
- '''
+ domain_name, elasticsearch_version, region=None, keyid=None, key=None, profile=None
+):
+ """
Helper function to determine in one call if an Elasticsearch domain can be
upgraded to the specified Elasticsearch version.
@@ -1281,20 +1376,21 @@ def check_upgrade_eligibility(
.. code-block:: bash
salt myminion boto3_elasticsearch.check_upgrade_eligibility mydomain '6.7'
- '''
- ret = {'result': False}
+ """
+ ret = {"result": False}
# Check if the desired version is in the list of compatible versions
res = get_compatible_elasticsearch_versions(
- domain_name,
- region=region, keyid=keyid, key=key, profile=profile)
- if 'error' in res:
+ domain_name, region=region, keyid=keyid, key=key, profile=profile
+ )
+ if "error" in res:
return res
- compatible_versions = res['response'][0]['TargetVersions']
+ compatible_versions = res["response"][0]["TargetVersions"]
if six.text_type(elasticsearch_version) not in compatible_versions:
- ret['result'] = True
- ret['response'] = False
- ret['error'] = ('Desired version "{}" not in compatible versions: {}.'
- ''.format(elasticsearch_version, compatible_versions))
+ ret["result"] = True
+ ret["response"] = False
+ ret["error"] = 'Desired version "{}" not in compatible versions: {}.' "".format(
+ elasticsearch_version, compatible_versions
+ )
return ret
# Check if the domain is eligible to upgrade to the desired version
res = upgrade_elasticsearch_domain(
@@ -1302,14 +1398,24 @@ def check_upgrade_eligibility(
elasticsearch_version,
perform_check_only=True,
blocking=True,
- region=region, keyid=keyid, key=key, profile=profile)
- if 'error' in res:
+ region=region,
+ keyid=keyid,
+ key=key,
+ profile=profile,
+ )
+ if "error" in res:
return res
- res = wait_for_upgrade(domain_name, region=region, keyid=keyid, key=key, profile=profile)
- if 'error' in res:
+ res = wait_for_upgrade(
+ domain_name, region=region, keyid=keyid, key=key, profile=profile
+ )
+ if "error" in res:
return res
- res = get_upgrade_status(domain_name, region=region, keyid=keyid, key=key, profile=profile)
- ret['result'] = True
- ret['response'] = (res['response']['UpgradeStep'] == 'PRE_UPGRADE_CHECK' and
- res['response']['StepStatus'] == 'SUCCEEDED')
+ res = get_upgrade_status(
+ domain_name, region=region, keyid=keyid, key=key, profile=profile
+ )
+ ret["result"] = True
+ ret["response"] = (
+ res["response"]["UpgradeStep"] == "PRE_UPGRADE_CHECK"
+ and res["response"]["StepStatus"] == "SUCCEEDED"
+ )
return ret
diff --git a/salt/modules/boto3_route53.py b/salt/modules/boto3_route53.py
index f0a12667853..1efbe507727 100644
--- a/salt/modules/boto3_route53.py
+++ b/salt/modules/boto3_route53.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Execution module for Amazon Route53 written against Boto 3
.. versionadded:: 2017.7.0
@@ -43,68 +43,73 @@ Execution module for Amazon Route53 written against Boto 3
None as well.
:depends: boto3
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602,W0106
+# pylint: disable=E0602,W0106
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
-import time
import re
+import time
# Import Salt libs
import salt.utils.compat
import salt.utils.versions
-from salt.exceptions import SaltInvocationError, CommandExecutionError
+from salt.exceptions import CommandExecutionError, SaltInvocationError
from salt.ext import six
+from salt.ext.six.moves import range
+
log = logging.getLogger(__name__) # pylint: disable=W1699
# Import third party libs
try:
- #pylint: disable=unused-import
+ # pylint: disable=unused-import
import boto3
- #pylint: enable=unused-import
+
+ # pylint: enable=unused-import
from botocore.exceptions import ClientError
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def __virtual__():
- '''
+ """
Only load if boto libraries exist and if boto libraries are greater than
a given version.
- '''
+ """
return salt.utils.versions.check_boto_reqs()
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO3:
- __utils__['boto3.assign_funcs'](__name__, 'route53')
+ __utils__["boto3.assign_funcs"](__name__, "route53")
-def _collect_results(func, item, args, marker='Marker', nextmarker='NextMarker'):
+def _collect_results(func, item, args, marker="Marker", nextmarker="NextMarker"):
ret = []
- Marker = args.get(marker, '')
+ Marker = args.get(marker, "")
tries = 10
while Marker is not None:
try:
r = func(**args)
except ClientError as e:
- if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
+ if tries and e.response.get("Error", {}).get("Code") == "Throttling":
# Rate limited - retry
- log.debug('Throttled by AWS API.')
+ log.debug("Throttled by AWS API.")
time.sleep(3)
tries -= 1
continue
- log.error('Could not collect results from %s(): %s', func, e)
+ log.error("Could not collect results from %s(): %s", func, e)
return []
i = r.get(item, []) if item else r
- i.pop('ResponseMetadata', None) if isinstance(i, dict) else None
+ i.pop("ResponseMetadata", None) if isinstance(i, dict) else None
ret += i if isinstance(i, list) else [i]
Marker = r.get(nextmarker)
args.update({marker: Marker})
@@ -112,26 +117,33 @@ def _collect_results(func, item, args, marker='Marker', nextmarker='NextMarker')
def _wait_for_sync(change, conn, tries=10, sleep=20):
- for retry in range(1, tries+1):
- log.info('Getting route53 status (attempt %s)', retry)
- status = 'wait'
+ for retry in range(1, tries + 1):
+ log.info("Getting route53 status (attempt %s)", retry)
+ status = "wait"
try:
- status = conn.get_change(Id=change)['ChangeInfo']['Status']
+ status = conn.get_change(Id=change)["ChangeInfo"]["Status"]
except ClientError as e:
- if e.response.get('Error', {}).get('Code') == 'Throttling':
- log.debug('Throttled by AWS API.')
+ if e.response.get("Error", {}).get("Code") == "Throttling":
+ log.debug("Throttled by AWS API.")
else:
six.reraise(*sys.exc_info())
- if status == 'INSYNC':
+ if status == "INSYNC":
return True
time.sleep(sleep)
- log.error('Timed out waiting for Route53 INSYNC status.')
+ log.error("Timed out waiting for Route53 INSYNC status.")
return False
-def find_hosted_zone(Id=None, Name=None, PrivateZone=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def find_hosted_zone(
+ Id=None,
+ Name=None,
+ PrivateZone=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Find a hosted zone with the given characteristics.
Id
@@ -164,28 +176,35 @@ def find_hosted_zone(Id=None, Name=None, PrivateZone=None,
salt myminion boto3_route53.find_hosted_zone Name=salt.org. \
profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}'
- '''
+ """
if not _exactly_one((Id, Name)):
- raise SaltInvocationError('Exactly one of either Id or Name is required.')
+ raise SaltInvocationError("Exactly one of either Id or Name is required.")
if PrivateZone is not None and not isinstance(PrivateZone, bool):
- raise SaltInvocationError('If set, PrivateZone must be a bool (e.g. True / False).')
+ raise SaltInvocationError(
+ "If set, PrivateZone must be a bool (e.g. True / False)."
+ )
if Id:
ret = get_hosted_zone(Id, region=region, key=key, keyid=keyid, profile=profile)
else:
- ret = get_hosted_zones_by_domain(Name, region=region, key=key, keyid=keyid, profile=profile)
+ ret = get_hosted_zones_by_domain(
+ Name, region=region, key=key, keyid=keyid, profile=profile
+ )
if PrivateZone is not None:
- ret = [m for m in ret if m['HostedZone']['Config']['PrivateZone'] is PrivateZone]
+ ret = [
+ m for m in ret if m["HostedZone"]["Config"]["PrivateZone"] is PrivateZone
+ ]
if len(ret) > 1:
log.error(
- 'Request matched more than one Hosted Zone (%s). Refine your '
- 'criteria and try again.', [z['HostedZone']['Id'] for z in ret]
+ "Request matched more than one Hosted Zone (%s). Refine your "
+ "criteria and try again.",
+ [z["HostedZone"]["Id"] for z in ret],
)
ret = []
return ret
def get_hosted_zone(Id, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Return detailed info about the given zone.
Id
@@ -209,14 +228,14 @@ def get_hosted_zone(Id, region=None, key=None, keyid=None, profile=None):
salt myminion boto3_route53.get_hosted_zone Z1234567690 \
profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- args = {'Id': Id}
+ args = {"Id": Id}
return _collect_results(conn.get_hosted_zone, None, args)
def get_hosted_zones_by_domain(Name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Find any zones with the given domain name and return detailed info about them.
Note that this can return multiple Route53 zones, since a domain name can be used in
both public and private zones.
@@ -242,18 +261,25 @@ def get_hosted_zones_by_domain(Name, region=None, key=None, keyid=None, profile=
salt myminion boto3_route53.get_hosted_zones_by_domain salt.org. \
profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- zones = [z for z in _collect_results(conn.list_hosted_zones, 'HostedZones', {})
- if z['Name'] == aws_encode(Name)]
+ zones = [
+ z
+ for z in _collect_results(conn.list_hosted_zones, "HostedZones", {})
+ if z["Name"] == aws_encode(Name)
+ ]
ret = []
for z in zones:
- ret += get_hosted_zone(Id=z['Id'], region=region, key=key, keyid=keyid, profile=profile)
+ ret += get_hosted_zone(
+ Id=z["Id"], region=region, key=key, keyid=keyid, profile=profile
+ )
return ret
-def list_hosted_zones(DelegationSetId=None, region=None, key=None, keyid=None, profile=None):
- '''
+def list_hosted_zones(
+ DelegationSetId=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Return detailed info about all zones in the bound account.
DelegationSetId
@@ -278,16 +304,27 @@ def list_hosted_zones(DelegationSetId=None, region=None, key=None, keyid=None, p
salt myminion boto3_route53.describe_hosted_zones \
profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- args = {'DelegationSetId': DelegationSetId} if DelegationSetId else {}
- return _collect_results(conn.list_hosted_zones, 'HostedZones', args)
+ args = {"DelegationSetId": DelegationSetId} if DelegationSetId else {}
+ return _collect_results(conn.list_hosted_zones, "HostedZones", args)
-def create_hosted_zone(Name, VPCId=None, VPCName=None, VPCRegion=None, CallerReference=None,
- Comment='', PrivateZone=False, DelegationSetId=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_hosted_zone(
+ Name,
+ VPCId=None,
+ VPCName=None,
+ VPCRegion=None,
+ CallerReference=None,
+ Comment="",
+ PrivateZone=False,
+ DelegationSetId=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create a new Route53 Hosted Zone. Returns a Python data structure with information about the
newly created Hosted Zone.
@@ -343,78 +380,103 @@ def create_hosted_zone(Name, VPCId=None, VPCName=None, VPCRegion=None, CallerRef
CLI Example::
salt myminion boto3_route53.create_hosted_zone example.org.
- '''
- if not Name.endswith('.'):
- raise SaltInvocationError('Domain must be fully-qualified, complete with trailing period.')
+ """
+ if not Name.endswith("."):
+ raise SaltInvocationError(
+ "Domain must be fully-qualified, complete with trailing period."
+ )
Name = aws_encode(Name)
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- deets = find_hosted_zone(Name=Name, PrivateZone=PrivateZone,
- region=region, key=key, keyid=keyid, profile=profile)
+ deets = find_hosted_zone(
+ Name=Name,
+ PrivateZone=PrivateZone,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if deets:
log.info(
- 'Route 53 hosted zone %s already exists. You may want to pass '
- 'e.g. \'PrivateZone=True\' or similar...', Name
+ "Route 53 hosted zone %s already exists. You may want to pass "
+ "e.g. 'PrivateZone=True' or similar...",
+ Name,
)
return None
args = {
- 'Name': Name,
- 'CallerReference': CallerReference,
- 'HostedZoneConfig': {
- 'Comment': Comment,
- 'PrivateZone': PrivateZone
- }
- }
- args.update({'DelegationSetId': DelegationSetId}) if DelegationSetId else None
+ "Name": Name,
+ "CallerReference": CallerReference,
+ "HostedZoneConfig": {"Comment": Comment, "PrivateZone": PrivateZone},
+ }
+ args.update({"DelegationSetId": DelegationSetId}) if DelegationSetId else None
if PrivateZone:
if not _exactly_one((VPCName, VPCId)):
- raise SaltInvocationError('Either VPCName or VPCId is required when creating a '
- 'private zone.')
- vpcs = __salt__['boto_vpc.describe_vpcs'](
- vpc_id=VPCId, name=VPCName, region=region, key=key,
- keyid=keyid, profile=profile).get('vpcs', [])
+ raise SaltInvocationError(
+ "Either VPCName or VPCId is required when creating a " "private zone."
+ )
+ vpcs = __salt__["boto_vpc.describe_vpcs"](
+ vpc_id=VPCId,
+ name=VPCName,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ).get("vpcs", [])
if VPCRegion and vpcs:
- vpcs = [v for v in vpcs if v['region'] == VPCRegion]
+ vpcs = [v for v in vpcs if v["region"] == VPCRegion]
if not vpcs:
- log.error('Private zone requested but no VPC matching given criteria found.')
+ log.error(
+ "Private zone requested but no VPC matching given criteria found."
+ )
return None
if len(vpcs) > 1:
log.error(
- 'Private zone requested but multiple VPCs matching given '
- 'criteria found: %s.', [v['id'] for v in vpcs]
+ "Private zone requested but multiple VPCs matching given "
+ "criteria found: %s.",
+ [v["id"] for v in vpcs],
)
return None
vpc = vpcs[0]
if VPCName:
- VPCId = vpc['id']
+ VPCId = vpc["id"]
if not VPCRegion:
- VPCRegion = vpc['region']
- args.update({'VPC': {'VPCId': VPCId, 'VPCRegion': VPCRegion}})
+ VPCRegion = vpc["region"]
+ args.update({"VPC": {"VPCId": VPCId, "VPCRegion": VPCRegion}})
else:
if any((VPCId, VPCName, VPCRegion)):
- log.info('Options VPCId, VPCName, and VPCRegion are ignored when creating '
- 'non-private zones.')
+ log.info(
+ "Options VPCId, VPCName, and VPCRegion are ignored when creating "
+ "non-private zones."
+ )
tries = 10
while tries:
try:
r = conn.create_hosted_zone(**args)
- r.pop('ResponseMetadata', None)
- if _wait_for_sync(r['ChangeInfo']['Id'], conn):
+ r.pop("ResponseMetadata", None)
+ if _wait_for_sync(r["ChangeInfo"]["Id"], conn):
return [r]
return []
except ClientError as e:
- if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
- log.debug('Throttled by AWS API.')
+ if tries and e.response.get("Error", {}).get("Code") == "Throttling":
+ log.debug("Throttled by AWS API.")
time.sleep(3)
tries -= 1
continue
- log.error('Failed to create hosted zone %s: %s', Name, e)
+ log.error("Failed to create hosted zone %s: %s", Name, e)
return []
return []
-def update_hosted_zone_comment(Id=None, Name=None, Comment=None, PrivateZone=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def update_hosted_zone_comment(
+ Id=None,
+ Name=None,
+ Comment=None,
+ PrivateZone=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Update the comment on an existing Route 53 hosted zone.
Id
@@ -433,39 +495,53 @@ def update_hosted_zone_comment(Id=None, Name=None, Comment=None, PrivateZone=Non
salt myminion boto3_route53.update_hosted_zone_comment Name=example.org. \
Comment="This is an example comment for an example zone"
- '''
+ """
if not _exactly_one((Id, Name)):
- raise SaltInvocationError('Exactly one of either Id or Name is required.')
+ raise SaltInvocationError("Exactly one of either Id or Name is required.")
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if Name:
- args = {'Name': Name, 'PrivateZone': PrivateZone, 'region': region,
- 'key': key, 'keyid': keyid, 'profile': profile}
+ args = {
+ "Name": Name,
+ "PrivateZone": PrivateZone,
+ "region": region,
+ "key": key,
+ "keyid": keyid,
+ "profile": profile,
+ }
zone = find_hosted_zone(**args)
if not zone:
log.error("Couldn't resolve domain name %s to a hosted zone ID.", Name)
return []
- Id = zone[0]['HostedZone']['Id']
+ Id = zone[0]["HostedZone"]["Id"]
tries = 10
while tries:
try:
r = conn.update_hosted_zone_comment(Id=Id, Comment=Comment)
- r.pop('ResponseMetadata', None)
+ r.pop("ResponseMetadata", None)
return [r]
except ClientError as e:
- if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
- log.debug('Throttled by AWS API.')
+ if tries and e.response.get("Error", {}).get("Code") == "Throttling":
+ log.debug("Throttled by AWS API.")
time.sleep(3)
tries -= 1
continue
- log.error('Failed to update comment on hosted zone %s: %s',
- Name or Id, e)
+ log.error("Failed to update comment on hosted zone %s: %s", Name or Id, e)
return []
-def associate_vpc_with_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
- VPCName=None, VPCRegion=None, Comment=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def associate_vpc_with_hosted_zone(
+ HostedZoneId=None,
+ Name=None,
+ VPCId=None,
+ VPCName=None,
+ VPCRegion=None,
+ Comment=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Associates an Amazon VPC with a private hosted zone.
To perform the association, the VPC and the private hosted zone must already exist. You can't
@@ -510,67 +586,93 @@ def associate_vpc_with_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
Name=example.org. VPCName=myVPC \
VPCRegion=us-east-1 Comment="Whoo-hoo! I added another VPC."
- '''
+ """
if not _exactly_one((HostedZoneId, Name)):
- raise SaltInvocationError('Exactly one of either HostedZoneId or Name is required.')
+ raise SaltInvocationError(
+ "Exactly one of either HostedZoneId or Name is required."
+ )
if not _exactly_one((VPCId, VPCName)):
- raise SaltInvocationError('Exactly one of either VPCId or VPCName is required.')
+ raise SaltInvocationError("Exactly one of either VPCId or VPCName is required.")
if Name:
# {'PrivateZone': True} because you can only associate VPCs with private hosted zones.
- args = {'Name': Name, 'PrivateZone': True, 'region': region,
- 'key': key, 'keyid': keyid, 'profile': profile}
+ args = {
+ "Name": Name,
+ "PrivateZone": True,
+ "region": region,
+ "key": key,
+ "keyid": keyid,
+ "profile": profile,
+ }
zone = find_hosted_zone(**args)
if not zone:
log.error(
- "Couldn't resolve domain name %s to a private hosted zone ID.",
- Name
+ "Couldn't resolve domain name %s to a private hosted zone ID.", Name
)
return False
- HostedZoneId = zone[0]['HostedZone']['Id']
- vpcs = __salt__['boto_vpc.describe_vpcs'](vpc_id=VPCId, name=VPCName, region=region, key=key,
- keyid=keyid, profile=profile).get('vpcs', [])
+ HostedZoneId = zone[0]["HostedZone"]["Id"]
+ vpcs = __salt__["boto_vpc.describe_vpcs"](
+ vpc_id=VPCId, name=VPCName, region=region, key=key, keyid=keyid, profile=profile
+ ).get("vpcs", [])
if VPCRegion and vpcs:
- vpcs = [v for v in vpcs if v['region'] == VPCRegion]
+ vpcs = [v for v in vpcs if v["region"] == VPCRegion]
if not vpcs:
- log.error('No VPC matching the given criteria found.')
+ log.error("No VPC matching the given criteria found.")
return False
if len(vpcs) > 1:
- log.error('Multiple VPCs matching the given criteria found: %s.',
- ', '.join([v['id'] for v in vpcs]))
+ log.error(
+ "Multiple VPCs matching the given criteria found: %s.",
+ ", ".join([v["id"] for v in vpcs]),
+ )
return False
vpc = vpcs[0]
if VPCName:
- VPCId = vpc['id']
+ VPCId = vpc["id"]
if not VPCRegion:
- VPCRegion = vpc['region']
- args = {'HostedZoneId': HostedZoneId, 'VPC': {'VPCId': VPCId, 'VPCRegion': VPCRegion}}
- args.update({'Comment': Comment}) if Comment is not None else None
+ VPCRegion = vpc["region"]
+ args = {
+ "HostedZoneId": HostedZoneId,
+ "VPC": {"VPCId": VPCId, "VPCRegion": VPCRegion},
+ }
+ args.update({"Comment": Comment}) if Comment is not None else None
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tries = 10
while tries:
try:
r = conn.associate_vpc_with_hosted_zone(**args)
- return _wait_for_sync(r['ChangeInfo']['Id'], conn)
+ return _wait_for_sync(r["ChangeInfo"]["Id"], conn)
except ClientError as e:
- if e.response.get('Error', {}).get('Code') == 'ConflictingDomainExists':
- log.debug('VPC Association already exists.')
+ if e.response.get("Error", {}).get("Code") == "ConflictingDomainExists":
+ log.debug("VPC Association already exists.")
# return True since the current state is the desired one
return True
- if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
- log.debug('Throttled by AWS API.')
+ if tries and e.response.get("Error", {}).get("Code") == "Throttling":
+ log.debug("Throttled by AWS API.")
time.sleep(3)
tries -= 1
continue
- log.error('Failed to associate VPC %s with hosted zone %s: %s',
- VPCName or VPCId, Name or HostedZoneId, e)
+ log.error(
+ "Failed to associate VPC %s with hosted zone %s: %s",
+ VPCName or VPCId,
+ Name or HostedZoneId,
+ e,
+ )
return False
-def disassociate_vpc_from_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
- VPCName=None, VPCRegion=None, Comment=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def disassociate_vpc_from_hosted_zone(
+ HostedZoneId=None,
+ Name=None,
+ VPCId=None,
+ VPCName=None,
+ VPCRegion=None,
+ Comment=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Disassociates an Amazon VPC from a private hosted zone.
You can't disassociate the last VPC from a private hosted zone. You also can't convert a
@@ -608,75 +710,95 @@ def disassociate_vpc_from_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
Name=example.org. VPCName=myVPC \
VPCRegion=us-east-1 Comment="Whoops! Don't wanna talk to this-here zone no more."
- '''
+ """
if not _exactly_one((HostedZoneId, Name)):
- raise SaltInvocationError('Exactly one of either HostedZoneId or Name is required.')
+ raise SaltInvocationError(
+ "Exactly one of either HostedZoneId or Name is required."
+ )
if not _exactly_one((VPCId, VPCName)):
- raise SaltInvocationError('Exactly one of either VPCId or VPCName is required.')
+ raise SaltInvocationError("Exactly one of either VPCId or VPCName is required.")
if Name:
# {'PrivateZone': True} because you can only associate VPCs with private hosted zones.
- args = {'Name': Name, 'PrivateZone': True, 'region': region,
- 'key': key, 'keyid': keyid, 'profile': profile}
+ args = {
+ "Name": Name,
+ "PrivateZone": True,
+ "region": region,
+ "key": key,
+ "keyid": keyid,
+ "profile": profile,
+ }
zone = find_hosted_zone(**args)
if not zone:
- log.error("Couldn't resolve domain name %s to a private hosted zone ID.", Name)
+ log.error(
+ "Couldn't resolve domain name %s to a private hosted zone ID.", Name
+ )
return False
- HostedZoneId = zone[0]['HostedZone']['Id']
- vpcs = __salt__['boto_vpc.describe_vpcs'](vpc_id=VPCId, name=VPCName, region=region, key=key,
- keyid=keyid, profile=profile).get('vpcs', [])
+ HostedZoneId = zone[0]["HostedZone"]["Id"]
+ vpcs = __salt__["boto_vpc.describe_vpcs"](
+ vpc_id=VPCId, name=VPCName, region=region, key=key, keyid=keyid, profile=profile
+ ).get("vpcs", [])
if VPCRegion and vpcs:
- vpcs = [v for v in vpcs if v['region'] == VPCRegion]
+ vpcs = [v for v in vpcs if v["region"] == VPCRegion]
if not vpcs:
- log.error('No VPC matching the given criteria found.')
+ log.error("No VPC matching the given criteria found.")
return False
if len(vpcs) > 1:
- log.error('Multiple VPCs matching the given criteria found: %s.',
- ', '.join([v['id'] for v in vpcs]))
+ log.error(
+ "Multiple VPCs matching the given criteria found: %s.",
+ ", ".join([v["id"] for v in vpcs]),
+ )
return False
vpc = vpcs[0]
if VPCName:
- VPCId = vpc['id']
+ VPCId = vpc["id"]
if not VPCRegion:
- VPCRegion = vpc['region']
- args = ({'HostedZoneId': HostedZoneId, 'VPC': {'VPCId': VPCId, 'VPCRegion': VPCRegion}})
- args.update({'Comment': Comment}) if Comment is not None else None
+ VPCRegion = vpc["region"]
+ args = {
+ "HostedZoneId": HostedZoneId,
+ "VPC": {"VPCId": VPCId, "VPCRegion": VPCRegion},
+ }
+ args.update({"Comment": Comment}) if Comment is not None else None
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tries = 10
while tries:
try:
r = conn.disassociate_vpc_from_hosted_zone(**args)
- return _wait_for_sync(r['ChangeInfo']['Id'], conn)
+ return _wait_for_sync(r["ChangeInfo"]["Id"], conn)
except ClientError as e:
- if e.response.get('Error', {}).get('Code') == 'VPCAssociationNotFound':
- log.debug('No VPC Association exists.')
+ if e.response.get("Error", {}).get("Code") == "VPCAssociationNotFound":
+ log.debug("No VPC Association exists.")
# return True since the current state is the desired one
return True
- if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
- log.debug('Throttled by AWS API.')
+ if tries and e.response.get("Error", {}).get("Code") == "Throttling":
+ log.debug("Throttled by AWS API.")
time.sleep(3)
tries -= 1
continue
- log.error('Failed to associate VPC %s with hosted zone %s: %s',
- VPCName or VPCId, Name or HostedZoneId, e)
+ log.error(
+ "Failed to associate VPC %s with hosted zone %s: %s",
+ VPCName or VPCId,
+ Name or HostedZoneId,
+ e,
+ )
return False
-#def create_vpc_association_authorization(*args, **kwargs):
+# def create_vpc_association_authorization(*args, **kwargs):
# '''
# unimplemented
# '''
# pass
-#def delete_vpc_association_authorization(*args, **kwargs):
+# def delete_vpc_association_authorization(*args, **kwargs):
# '''
# unimplemented
# '''
# pass
-#def list_vpc_association_authorizations(*args, **kwargs):
+# def list_vpc_association_authorizations(*args, **kwargs):
# '''
# unimplemented
# '''
@@ -684,45 +806,54 @@ def disassociate_vpc_from_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
def delete_hosted_zone(Id, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete a Route53 hosted zone.
CLI Example::
salt myminion boto3_route53.delete_hosted_zone Z1234567890
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
r = conn.delete_hosted_zone(Id=Id)
- return _wait_for_sync(r['ChangeInfo']['Id'], conn)
+ return _wait_for_sync(r["ChangeInfo"]["Id"], conn)
except ClientError as e:
- log.error('Failed to delete hosted zone %s: %s', Id, e)
+ log.error("Failed to delete hosted zone %s: %s", Id, e)
return False
-def delete_hosted_zone_by_domain(Name, PrivateZone=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def delete_hosted_zone_by_domain(
+ Name, PrivateZone=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Delete a Route53 hosted zone by domain name, and PrivateZone status if provided.
CLI Example::
salt myminion boto3_route53.delete_hosted_zone_by_domain example.org.
- '''
- args = {'Name': Name, 'PrivateZone': PrivateZone,
- 'region': region, 'key': key, 'keyid': keyid, 'profile': profile}
+ """
+ args = {
+ "Name": Name,
+ "PrivateZone": PrivateZone,
+ "region": region,
+ "key": key,
+ "keyid": keyid,
+ "profile": profile,
+ }
# Be extra pedantic in the service of safety - if public/private is not provided and the domain
# name resolves to both, fail and require them to declare it explicitly.
zone = find_hosted_zone(**args)
if not zone:
log.error("Couldn't resolve domain name %s to a hosted zone ID.", Name)
return False
- Id = zone[0]['HostedZone']['Id']
- return delete_hosted_zone(Id=Id, region=region, key=key, keyid=keyid, profile=profile)
+ Id = zone[0]["HostedZone"]["Id"]
+ return delete_hosted_zone(
+ Id=Id, region=region, key=key, keyid=keyid, profile=profile
+ )
def aws_encode(x):
- '''
+ """
An implementation of the encoding required to suport AWS's domain name
rules defined here__:
@@ -739,41 +870,56 @@ def aws_encode(x):
.. __: https://pypi.org/project/idna
- '''
+ """
ret = None
try:
- x.encode('ascii')
- ret = re.sub(r'\\x([a-f0-8]{2})',
- _hexReplace, x.encode('unicode_escape'))
+ x.encode("ascii")
+ ret = re.sub(r"\\x([a-f0-8]{2})", _hexReplace, x.encode("unicode_escape"))
except UnicodeEncodeError:
- ret = x.encode('idna')
+ ret = x.encode("idna")
except Exception as e: # pylint: disable=broad-except
- log.error("Couldn't encode %s using either 'unicode_escape' or 'idna' codecs", x)
+ log.error(
+ "Couldn't encode %s using either 'unicode_escape' or 'idna' codecs", x
+ )
raise CommandExecutionError(e)
- log.debug('AWS-encoded result for %s: %s', x, ret)
+ log.debug("AWS-encoded result for %s: %s", x, ret)
return ret
def _aws_encode_changebatch(o):
- '''
+ """
helper method to process a change batch & encode the bits which need encoding.
- '''
+ """
change_idx = 0
- while change_idx < len(o['Changes']):
- o['Changes'][change_idx]['ResourceRecordSet']['Name'] = aws_encode(o['Changes'][change_idx]['ResourceRecordSet']['Name'])
- if 'ResourceRecords' in o['Changes'][change_idx]['ResourceRecordSet']:
+ while change_idx < len(o["Changes"]):
+ o["Changes"][change_idx]["ResourceRecordSet"]["Name"] = aws_encode(
+ o["Changes"][change_idx]["ResourceRecordSet"]["Name"]
+ )
+ if "ResourceRecords" in o["Changes"][change_idx]["ResourceRecordSet"]:
rr_idx = 0
- while rr_idx < len(o['Changes'][change_idx]['ResourceRecordSet']['ResourceRecords']):
- o['Changes'][change_idx]['ResourceRecordSet']['ResourceRecords'][rr_idx]['Value'] = aws_encode(o['Changes'][change_idx]['ResourceRecordSet']['ResourceRecords'][rr_idx]['Value'])
+ while rr_idx < len(
+ o["Changes"][change_idx]["ResourceRecordSet"]["ResourceRecords"]
+ ):
+ o["Changes"][change_idx]["ResourceRecordSet"]["ResourceRecords"][
+ rr_idx
+ ]["Value"] = aws_encode(
+ o["Changes"][change_idx]["ResourceRecordSet"]["ResourceRecords"][
+ rr_idx
+ ]["Value"]
+ )
rr_idx += 1
- if 'AliasTarget' in o['Changes'][change_idx]['ResourceRecordSet']:
- o['Changes'][change_idx]['ResourceRecordSet']['AliasTarget']['DNSName'] = aws_encode(o['Changes'][change_idx]['ResourceRecordSet']['AliasTarget']['DNSName'])
+ if "AliasTarget" in o["Changes"][change_idx]["ResourceRecordSet"]:
+ o["Changes"][change_idx]["ResourceRecordSet"]["AliasTarget"][
+ "DNSName"
+ ] = aws_encode(
+ o["Changes"][change_idx]["ResourceRecordSet"]["AliasTarget"]["DNSName"]
+ )
change_idx += 1
return o
def _aws_decode(x):
- '''
+ """
An implementation of the decoding required to suport AWS's domain name
rules defined here__:
@@ -792,29 +938,37 @@ def _aws_decode(x):
We look for the existance of any escape codes which give us a clue that
we're received an escaped unicode string; or we assume it's idna encoded
and then decode as necessary.
- '''
- if '\\' in x:
- return x.decode('unicode_escape')
- return x.decode('idna')
+ """
+ if "\\" in x:
+ return x.decode("unicode_escape")
+ return x.decode("idna")
def _hexReplace(x):
- '''
+ """
Converts a hex code to a base 16 int then the octal of it, minus the leading
zero.
This is necessary because x.encode('unicode_escape') automatically assumes
you want a hex string, which AWS will accept but doesn't result in what
you really want unless it's an octal escape sequence
- '''
+ """
c = int(x.group(1), 16)
- return '\\' + str(oct(c))[1:]
+ return "\\" + str(oct(c))[1:]
-def get_resource_records(HostedZoneId=None, Name=None, StartRecordName=None,
- StartRecordType=None, PrivateZone=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def get_resource_records(
+ HostedZoneId=None,
+ Name=None,
+ StartRecordName=None,
+ StartRecordType=None,
+ PrivateZone=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Get all resource records from a given zone matching the provided StartRecordName (if given) or all
records in the zone (if not), optionally filtered by a specific StartRecordType. This will return
any and all RRs matching, regardless of their special AWS flavors (weighted, geolocation, alias,
@@ -832,19 +986,25 @@ def get_resource_records(HostedZoneId=None, Name=None, StartRecordName=None,
CLI example::
salt myminion boto3_route53.get_records test.example.org example.org A
- '''
+ """
if not _exactly_one((HostedZoneId, Name)):
- raise SaltInvocationError('Exactly one of either HostedZoneId or Name must '
- 'be provided.')
+ raise SaltInvocationError(
+ "Exactly one of either HostedZoneId or Name must " "be provided."
+ )
if Name:
- args = {'Name': Name, 'region': region, 'key': key, 'keyid': keyid,
- 'profile': profile}
- args.update({'PrivateZone': PrivateZone}) if PrivateZone is not None else None
+ args = {
+ "Name": Name,
+ "region": region,
+ "key": key,
+ "keyid": keyid,
+ "profile": profile,
+ }
+ args.update({"PrivateZone": PrivateZone}) if PrivateZone is not None else None
zone = find_hosted_zone(**args)
if not zone:
log.error("Couldn't resolve domain name %s to a hosted zone ID.", Name)
return []
- HostedZoneId = zone[0]['HostedZone']['Id']
+ HostedZoneId = zone[0]["HostedZone"]["Id"]
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
ret = []
@@ -855,34 +1015,42 @@ def get_resource_records(HostedZoneId=None, Name=None, StartRecordName=None,
while True:
if done:
return ret
- args = {'HostedZoneId': HostedZoneId}
- args.update({'StartRecordName': aws_encode(next_rr_name)}) if next_rr_name else None
+ args = {"HostedZoneId": HostedZoneId}
+ args.update(
+ {"StartRecordName": aws_encode(next_rr_name)}
+ ) if next_rr_name else None
# Grrr, can't specify type unless name is set... We'll do this via filtering later instead
- args.update({'StartRecordType': next_rr_type}) if next_rr_name and next_rr_type else None
- args.update({'StartRecordIdentifier': next_rr_id}) if next_rr_id else None
+ args.update(
+ {"StartRecordType": next_rr_type}
+ ) if next_rr_name and next_rr_type else None
+ args.update({"StartRecordIdentifier": next_rr_id}) if next_rr_id else None
try:
r = conn.list_resource_record_sets(**args)
- rrs = r['ResourceRecordSets']
- next_rr_name = r.get('NextRecordName')
- next_rr_type = r.get('NextRecordType')
- next_rr_id = r.get('NextRecordIdentifier')
+ rrs = r["ResourceRecordSets"]
+ next_rr_name = r.get("NextRecordName")
+ next_rr_type = r.get("NextRecordType")
+ next_rr_id = r.get("NextRecordIdentifier")
for rr in rrs:
- rr['Name'] = _aws_decode(rr['Name'])
+ rr["Name"] = _aws_decode(rr["Name"])
# now iterate over the ResourceRecords and replace any encoded
# value strings with the decoded versions
- if 'ResourceRecords' in rr:
+ if "ResourceRecords" in rr:
x = 0
- while x < len(rr['ResourceRecords']):
- if 'Value' in rr['ResourceRecords'][x]:
- rr['ResourceRecords'][x]['Value'] = _aws_decode(rr['ResourceRecords'][x]['Value'])
+ while x < len(rr["ResourceRecords"]):
+ if "Value" in rr["ResourceRecords"][x]:
+ rr["ResourceRecords"][x]["Value"] = _aws_decode(
+ rr["ResourceRecords"][x]["Value"]
+ )
x += 1
# or if we are an AliasTarget then decode the DNSName
- if 'AliasTarget' in rr:
- rr['AliasTarget']['DNSName'] = _aws_decode(rr['AliasTarget']['DNSName'])
- if StartRecordName and rr['Name'] != StartRecordName:
+ if "AliasTarget" in rr:
+ rr["AliasTarget"]["DNSName"] = _aws_decode(
+ rr["AliasTarget"]["DNSName"]
+ )
+ if StartRecordName and rr["Name"] != StartRecordName:
done = True
break
- if StartRecordType and rr['Type'] != StartRecordType:
+ if StartRecordType and rr["Type"] != StartRecordType:
if StartRecordName:
done = True
break
@@ -894,17 +1062,24 @@ def get_resource_records(HostedZoneId=None, Name=None, StartRecordName=None,
done = True
except ClientError as e:
# Try forever on a simple thing like this...
- if e.response.get('Error', {}).get('Code') == 'Throttling':
- log.debug('Throttled by AWS API.')
+ if e.response.get("Error", {}).get("Code") == "Throttling":
+ log.debug("Throttled by AWS API.")
time.sleep(3)
continue
six.reraise(*sys.exc_info())
-def change_resource_record_sets(HostedZoneId=None, Name=None,
- PrivateZone=None, ChangeBatch=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def change_resource_record_sets(
+ HostedZoneId=None,
+ Name=None,
+ PrivateZone=None,
+ ChangeBatch=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
See the `AWS Route53 API docs`__ as well as the `Boto3 documentation`__ for all the details...
.. __: https://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html
@@ -969,34 +1144,49 @@ def change_resource_record_sets(HostedZoneId=None, Name=None,
salt myminion boto3_route53.change_resource_record_sets DomainName=example.org. \
keyid=A1234567890ABCDEF123 key=xblahblahblah \
ChangeBatch="{'Changes': [{'Action': 'UPSERT', 'ResourceRecordSet': $foo}]}"
- '''
+ """
if not _exactly_one((HostedZoneId, Name)):
- raise SaltInvocationError('Exactly one of either HostZoneId or Name must be provided.')
+ raise SaltInvocationError(
+ "Exactly one of either HostZoneId or Name must be provided."
+ )
if Name:
- args = {'Name': Name, 'region': region, 'key': key, 'keyid': keyid,
- 'profile': profile}
- args.update({'PrivateZone': PrivateZone}) if PrivateZone is not None else None
+ args = {
+ "Name": Name,
+ "region": region,
+ "key": key,
+ "keyid": keyid,
+ "profile": profile,
+ }
+ args.update({"PrivateZone": PrivateZone}) if PrivateZone is not None else None
zone = find_hosted_zone(**args)
if not zone:
log.error("Couldn't resolve domain name %s to a hosted zone ID.", Name)
return []
- HostedZoneId = zone[0]['HostedZone']['Id']
+ HostedZoneId = zone[0]["HostedZone"]["Id"]
- args = {'HostedZoneId': HostedZoneId, 'ChangeBatch': _aws_encode_changebatch(ChangeBatch)}
+ args = {
+ "HostedZoneId": HostedZoneId,
+ "ChangeBatch": _aws_encode_changebatch(ChangeBatch),
+ }
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tries = 20 # A bit more headroom
while tries:
try:
r = conn.change_resource_record_sets(**args)
- return _wait_for_sync(r['ChangeInfo']['Id'], conn, 30) # And a little extra time here
+ return _wait_for_sync(
+ r["ChangeInfo"]["Id"], conn, 30
+ ) # And a little extra time here
except ClientError as e:
- if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
- log.debug('Throttled by AWS API.')
+ if tries and e.response.get("Error", {}).get("Code") == "Throttling":
+ log.debug("Throttled by AWS API.")
time.sleep(3)
tries -= 1
continue
- log.error('Failed to apply requested changes to the hosted zone %s: %s',
- (Name or HostedZoneId), six.text_type(e))
+ log.error(
+ "Failed to apply requested changes to the hosted zone %s: %s",
+ (Name or HostedZoneId),
+ six.text_type(e),
+ )
raise e
return False
diff --git a/salt/modules/boto3_sns.py b/salt/modules/boto3_sns.py
index 14f9fdd3670..6866219807b 100644
--- a/salt/modules/boto3_sns.py
+++ b/salt/modules/boto3_sns.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon SNS
:configuration: This module accepts explicit sns credentials but can also
@@ -38,12 +38,13 @@ Connection module for Amazon SNS
region: us-east-1
:depends: boto3
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
# Import Salt libs
@@ -52,273 +53,327 @@ import salt.utils.versions
log = logging.getLogger(__name__)
# Import third party libs
-#pylint: disable=unused-import
+# pylint: disable=unused-import
try:
import botocore
import boto3
import jmespath
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
-#pylint: enable=unused-import
+# pylint: enable=unused-import
def __virtual__():
- '''
+ """
Only load if boto libraries exist.
- '''
+ """
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
- __utils__['boto3.assign_funcs'](__name__, 'sns')
+ __utils__["boto3.assign_funcs"](__name__, "sns")
return has_boto_reqs
def list_topics(region=None, key=None, keyid=None, profile=None):
- '''
+ """
Returns a list of the requester's topics
CLI example::
salt myminion boto3_sns.list_topics
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
res = {}
- NextToken = ''
+ NextToken = ""
while NextToken is not None:
ret = conn.list_topics(NextToken=NextToken)
- NextToken = ret.get('NextToken', None)
- arns = jmespath.search('Topics[*].TopicArn', ret)
+ NextToken = ret.get("NextToken", None)
+ arns = jmespath.search("Topics[*].TopicArn", ret)
for t in arns:
- short_name = t.split(':')[-1]
+ short_name = t.split(":")[-1]
res[short_name] = t
return res
def describe_topic(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Returns details about a specific SNS topic, specified by name or ARN.
CLI example::
salt my_favorite_client boto3_sns.describe_topic a_sns_topic_of_my_choice
- '''
+ """
topics = list_topics(region=region, key=key, keyid=keyid, profile=profile)
ret = {}
for topic, arn in topics.items():
if name in (topic, arn):
- ret = {'TopicArn': arn}
- ret['Subscriptions'] = list_subscriptions_by_topic(arn, region=region, key=key,
- keyid=keyid, profile=profile)
- ret['Attributes'] = get_topic_attributes(arn, region=region, key=key, keyid=keyid,
- profile=profile)
+ ret = {"TopicArn": arn}
+ ret["Subscriptions"] = list_subscriptions_by_topic(
+ arn, region=region, key=key, keyid=keyid, profile=profile
+ )
+ ret["Attributes"] = get_topic_attributes(
+ arn, region=region, key=key, keyid=keyid, profile=profile
+ )
return ret
def topic_exists(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if an SNS topic exists.
CLI example::
salt myminion boto3_sns.topic_exists mytopic region=us-east-1
- '''
+ """
topics = list_topics(region=region, key=key, keyid=keyid, profile=profile)
return name in list(topics.values() + topics.keys())
def create_topic(Name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Create an SNS topic.
CLI example::
salt myminion boto3_sns.create_topic mytopic region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.create_topic(Name=Name)
- log.info('SNS topic %s created with ARN %s', Name, ret['TopicArn'])
- return ret['TopicArn']
+ log.info("SNS topic %s created with ARN %s", Name, ret["TopicArn"])
+ return ret["TopicArn"]
except botocore.exceptions.ClientError as e:
- log.error('Failed to create SNS topic %s: %s', Name, e)
+ log.error("Failed to create SNS topic %s: %s", Name, e)
return None
except KeyError:
- log.error('Failed to create SNS topic %s', Name)
+ log.error("Failed to create SNS topic %s", Name)
return None
def delete_topic(TopicArn, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete an SNS topic.
CLI example::
salt myminion boto3_sns.delete_topic mytopic region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.delete_topic(TopicArn=TopicArn)
- log.info('SNS topic %s deleted', TopicArn)
+ log.info("SNS topic %s deleted", TopicArn)
return True
except botocore.exceptions.ClientError as e:
- log.error('Failed to delete SNS topic %s: %s', name, e)
+ log.error("Failed to delete SNS topic %s: %s", name, e)
return False
def get_topic_attributes(TopicArn, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Returns all of the properties of a topic. Topic properties returned might differ based on the
authorization of the user.
CLI example::
salt myminion boto3_sns.get_topic_attributes someTopic region=us-west-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- return conn.get_topic_attributes(TopicArn=TopicArn).get('Attributes')
+ return conn.get_topic_attributes(TopicArn=TopicArn).get("Attributes")
except botocore.exceptions.ClientError as e:
- log.error('Failed to garner attributes for SNS topic %s: %s', TopicArn, e)
+ log.error("Failed to garner attributes for SNS topic %s: %s", TopicArn, e)
return None
-def set_topic_attributes(TopicArn, AttributeName, AttributeValue, region=None, key=None, keyid=None,
- profile=None):
- '''
+def set_topic_attributes(
+ TopicArn,
+ AttributeName,
+ AttributeValue,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Set an attribute of a topic to a new value.
CLI example::
salt myminion boto3_sns.set_topic_attributes someTopic DisplayName myDisplayNameValue
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- conn.set_topic_attributes(TopicArn=TopicArn, AttributeName=AttributeName,
- AttributeValue=AttributeValue)
- log.debug('Set attribute %s=%s on SNS topic %s',
- AttributeName, AttributeValue, TopicArn)
+ conn.set_topic_attributes(
+ TopicArn=TopicArn,
+ AttributeName=AttributeName,
+ AttributeValue=AttributeValue,
+ )
+ log.debug(
+ "Set attribute %s=%s on SNS topic %s",
+ AttributeName,
+ AttributeValue,
+ TopicArn,
+ )
return True
except botocore.exceptions.ClientError as e:
- log.error('Failed to set attribute %s=%s for SNS topic %s: %s',
- AttributeName, AttributeValue, TopicArn, e)
+ log.error(
+ "Failed to set attribute %s=%s for SNS topic %s: %s",
+ AttributeName,
+ AttributeValue,
+ TopicArn,
+ e,
+ )
return False
-def list_subscriptions_by_topic(TopicArn, region=None, key=None, keyid=None, profile=None):
- '''
+def list_subscriptions_by_topic(
+ TopicArn, region=None, key=None, keyid=None, profile=None
+):
+ """
Returns a list of the subscriptions to a specific topic
CLI example::
salt myminion boto3_sns.list_subscriptions_by_topic mytopic region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- NextToken = ''
+ NextToken = ""
res = []
try:
while NextToken is not None:
- ret = conn.list_subscriptions_by_topic(TopicArn=TopicArn, NextToken=NextToken)
- NextToken = ret.get('NextToken', None)
- subs = ret.get('Subscriptions', [])
+ ret = conn.list_subscriptions_by_topic(
+ TopicArn=TopicArn, NextToken=NextToken
+ )
+ NextToken = ret.get("NextToken", None)
+ subs = ret.get("Subscriptions", [])
res += subs
except botocore.exceptions.ClientError as e:
- log.error('Failed to list subscriptions for SNS topic %s: %s', TopicArn, e)
+ log.error("Failed to list subscriptions for SNS topic %s: %s", TopicArn, e)
return None
return res
def list_subscriptions(region=None, key=None, keyid=None, profile=None):
- '''
+ """
Returns a list of the requester's topics
CLI example::
salt myminion boto3_sns.list_subscriptions region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- NextToken = ''
+ NextToken = ""
res = []
try:
while NextToken is not None:
ret = conn.list_subscriptions(NextToken=NextToken)
- NextToken = ret.get('NextToken', None)
- subs = ret.get('Subscriptions', [])
+ NextToken = ret.get("NextToken", None)
+ subs = ret.get("Subscriptions", [])
res += subs
except botocore.exceptions.ClientError as e:
- log.error('Failed to list SNS subscriptions: %s', e)
+ log.error("Failed to list SNS subscriptions: %s", e)
return None
return res
-def get_subscription_attributes(SubscriptionArn, region=None, key=None, keyid=None, profile=None):
- '''
+def get_subscription_attributes(
+ SubscriptionArn, region=None, key=None, keyid=None, profile=None
+):
+ """
Returns all of the properties of a subscription.
CLI example::
salt myminion boto3_sns.get_subscription_attributes somesubscription region=us-west-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.get_subscription_attributes(SubscriptionArn=SubscriptionArn)
- return ret['Attributes']
+ return ret["Attributes"]
except botocore.exceptions.ClientError as e:
- log.error('Failed to list attributes for SNS subscription %s: %s',
- SubscriptionArn, e)
+ log.error(
+ "Failed to list attributes for SNS subscription %s: %s", SubscriptionArn, e
+ )
return None
except KeyError:
- log.error('Failed to list attributes for SNS subscription %s',
- SubscriptionArn)
+ log.error("Failed to list attributes for SNS subscription %s", SubscriptionArn)
return None
-def set_subscription_attributes(SubscriptionArn, AttributeName, AttributeValue, region=None,
- key=None, keyid=None, profile=None):
- '''
+def set_subscription_attributes(
+ SubscriptionArn,
+ AttributeName,
+ AttributeValue,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Set an attribute of a subscription to a new value.
CLI example::
salt myminion boto3_sns.set_subscription_attributes someSubscription RawMessageDelivery jsonStringValue
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- conn.set_subscription_attributes(SubscriptionArn=SubscriptionArn,
- AttributeName=AttributeName, AttributeValue=AttributeValue)
- log.debug('Set attribute %s=%s on SNS subscription %s',
- AttributeName, AttributeValue, SubscriptionArn)
+ conn.set_subscription_attributes(
+ SubscriptionArn=SubscriptionArn,
+ AttributeName=AttributeName,
+ AttributeValue=AttributeValue,
+ )
+ log.debug(
+ "Set attribute %s=%s on SNS subscription %s",
+ AttributeName,
+ AttributeValue,
+ SubscriptionArn,
+ )
return True
except botocore.exceptions.ClientError as e:
- log.error('Failed to set attribute %s=%s for SNS subscription %s: %s',
- AttributeName, AttributeValue, SubscriptionArn, e)
+ log.error(
+ "Failed to set attribute %s=%s for SNS subscription %s: %s",
+ AttributeName,
+ AttributeValue,
+ SubscriptionArn,
+ e,
+ )
return False
-def subscribe(TopicArn, Protocol, Endpoint, region=None, key=None, keyid=None, profile=None):
- '''
+def subscribe(
+ TopicArn, Protocol, Endpoint, region=None, key=None, keyid=None, profile=None
+):
+ """
Subscribe to a Topic.
CLI example::
salt myminion boto3_sns.subscribe mytopic https https://www.example.com/sns-endpoint
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.subscribe(TopicArn=TopicArn, Protocol=Protocol, Endpoint=Endpoint)
- log.info('Subscribed %s %s to topic %s with SubscriptionArn %s',
- Protocol, Endpoint, TopicArn, ret['SubscriptionArn'])
- return ret['SubscriptionArn']
+ log.info(
+ "Subscribed %s %s to topic %s with SubscriptionArn %s",
+ Protocol,
+ Endpoint,
+ TopicArn,
+ ret["SubscriptionArn"],
+ )
+ return ret["SubscriptionArn"]
except botocore.exceptions.ClientError as e:
- log.error('Failed to create subscription to SNS topic %s: %s', TopicArn, e)
+ log.error("Failed to create subscription to SNS topic %s: %s", TopicArn, e)
return None
except KeyError:
- log.error('Failed to create subscription to SNS topic %s', TopicArn)
+ log.error("Failed to create subscription to SNS topic %s", TopicArn)
return None
def unsubscribe(SubscriptionArn, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Unsubscribe a specific SubscriptionArn of a topic.
CLI Example:
@@ -326,19 +381,18 @@ def unsubscribe(SubscriptionArn, region=None, key=None, keyid=None, profile=None
.. code-block:: bash
salt myminion boto3_sns.unsubscribe my_subscription_arn region=us-east-1
- '''
+ """
subs = list_subscriptions(region=region, key=key, keyid=keyid, profile=profile)
- sub = [s for s in subs if s.get('SubscriptionArn') == SubscriptionArn]
+ sub = [s for s in subs if s.get("SubscriptionArn") == SubscriptionArn]
if not sub:
- log.error('Subscription ARN %s not found', SubscriptionArn)
+ log.error("Subscription ARN %s not found", SubscriptionArn)
return False
- TopicArn = sub[0]['TopicArn']
+ TopicArn = sub[0]["TopicArn"]
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.unsubscribe(SubscriptionArn=SubscriptionArn)
- log.info('Deleted subscription %s from SNS topic %s',
- SubscriptionArn, TopicArn)
+ log.info("Deleted subscription %s from SNS topic %s", SubscriptionArn, TopicArn)
return True
except botocore.exceptions.ClientError as e:
- log.error('Failed to delete subscription %s: %s', SubscriptionArn, e)
+ log.error("Failed to delete subscription %s: %s", SubscriptionArn, e)
return False
diff --git a/salt/modules/boto_apigateway.py b/salt/modules/boto_apigateway.py
index eb855418e0e..661f09a8b38 100644
--- a/salt/modules/boto_apigateway.py
+++ b/salt/modules/boto_apigateway.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon APIGateway
.. versionadded:: 2016.11.0
@@ -74,21 +74,23 @@ Connection module for Amazon APIGateway
error:
message: error message
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
-import logging
-import datetime
-# Import Salt libs
-from salt.ext import six
+import datetime
+import logging
+
import salt.utils.compat
import salt.utils.json
import salt.utils.versions
+# Import Salt libs
+from salt.ext import six
+
log = logging.getLogger(__name__)
# Import third party libs
@@ -98,11 +100,13 @@ try:
# pylint: disable=unused-import
import boto
import boto3
+
# pylint: enable=unused-import
from botocore.exceptions import ClientError
from botocore import __version__ as found_botocore_version
- logging.getLogger('boto').setLevel(logging.CRITICAL)
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto").setLevel(logging.CRITICAL)
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@@ -110,84 +114,90 @@ except ImportError:
def __virtual__():
- '''
+ """
Only load if boto libraries exist and if boto libraries are greater than
a given version.
- '''
+ """
# the boto_apigateway execution module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
return salt.utils.versions.check_boto_reqs(
- boto_ver='2.8.0',
- boto3_ver='1.2.1',
- botocore_ver='1.4.49'
+ boto_ver="2.8.0", boto3_ver="1.2.1", botocore_ver="1.4.49"
)
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
- __utils__['boto3.assign_funcs'](__name__, 'apigateway')
+ __utils__["boto3.assign_funcs"](__name__, "apigateway")
def _convert_datetime_str(response):
- '''
+ """
modify any key-value pair where value is a datetime object to a string.
- '''
+ """
if response:
- return dict([(k, '{0}'.format(v)) if isinstance(v, datetime.date) else (k, v) for k, v in six.iteritems(response)])
+ return dict(
+ [
+ (k, "{0}".format(v)) if isinstance(v, datetime.date) else (k, v)
+ for k, v in six.iteritems(response)
+ ]
+ )
return None
def _filter_apis(name, apis):
- '''
+ """
Return list of api items matching the given name.
- '''
- return [api for api in apis if api['name'] == name]
+ """
+ return [api for api in apis if api["name"] == name]
def _filter_apis_desc(desc, apis):
- '''
+ """
Return list of api items matching the given description.
- '''
- return [api for api in apis if api['description'] == desc]
+ """
+ return [api for api in apis if api["description"] == desc]
def _multi_call(function, contentkey, *args, **kwargs):
- '''
+ """
Retrieve full list of values for the contentkey from a boto3 ApiGateway
client function that may be paged via 'position'
- '''
+ """
ret = function(*args, **kwargs)
- position = ret.get('position')
+ position = ret.get("position")
while position:
more = function(*args, position=position, **kwargs)
ret[contentkey].extend(more[contentkey])
- position = more.get('position')
+ position = more.get("position")
return ret.get(contentkey)
-def _find_apis_by_name(name, description=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def _find_apis_by_name(
+ name, description=None, region=None, key=None, keyid=None, profile=None
+):
+ """
get and return list of matching rest api information by the given name and desc.
If rest api name evaluates to False, return all apis w/o filtering the name.
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- apis = _multi_call(conn.get_rest_apis, 'items')
+ apis = _multi_call(conn.get_rest_apis, "items")
if name:
apis = _filter_apis(name, apis)
if description is not None:
apis = _filter_apis_desc(description, apis)
- return {'restapi': [_convert_datetime_str(api) for api in apis]}
+ return {"restapi": [_convert_datetime_str(api) for api in apis]}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def describe_apis(name=None, description=None, region=None, key=None, keyid=None, profile=None):
- '''
+def describe_apis(
+ name=None, description=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Returns all rest apis in the defined region. If optional parameter name is included,
returns all rest apis matching the name in the defined region.
@@ -201,18 +211,30 @@ def describe_apis(name=None, description=None, region=None, key=None, keyid=None
salt myminion boto_apigateway.describe_apis name='api name' description='desc str'
- '''
+ """
if name:
- return _find_apis_by_name(name, description=description,
- region=region, key=key, keyid=keyid, profile=profile)
+ return _find_apis_by_name(
+ name,
+ description=description,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
else:
- return _find_apis_by_name('', description=description,
- region=region, key=key, keyid=keyid, profile=profile)
+ return _find_apis_by_name(
+ "",
+ description=description,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
def api_exists(name, description=None, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if the given Rest API Name and optionally description exists.
CLI Example:
@@ -221,15 +243,22 @@ def api_exists(name, description=None, region=None, key=None, keyid=None, profil
salt myminion boto_apigateway.exists myapi_name
- '''
- apis = _find_apis_by_name(name, description=description,
- region=region, key=key, keyid=keyid, profile=profile)
- return {'exists': bool(apis.get('restapi'))}
+ """
+ apis = _find_apis_by_name(
+ name,
+ description=description,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ return {"exists": bool(apis.get("restapi"))}
-def create_api(name, description, cloneFrom=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_api(
+ name, description, cloneFrom=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Create a new REST API Service with the given name
Returns {created: True} if the rest api was created and returns
@@ -241,21 +270,23 @@ def create_api(name, description, cloneFrom=None,
salt myminion boto_apigateway.create_api myapi_name api_description
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if cloneFrom:
- api = conn.create_rest_api(name=name, description=description, cloneFrom=cloneFrom)
+ api = conn.create_rest_api(
+ name=name, description=description, cloneFrom=cloneFrom
+ )
else:
api = conn.create_rest_api(name=name, description=description)
api = _convert_datetime_str(api)
- return {'created': True, 'restapi': api} if api else {'created': False}
+ return {"created": True, "restapi": api} if api else {"created": False}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
def delete_api(name, description=None, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete all REST API Service with the given name and an optional API description
Returns {deleted: True, count: deleted_count} if apis were deleted, and
@@ -269,24 +300,24 @@ def delete_api(name, description=None, region=None, key=None, keyid=None, profil
salt myminion boto_apigateway.delete_api myapi_name description='api description'
- '''
+ """
try:
conn_params = dict(region=region, key=key, keyid=keyid, profile=profile)
r = _find_apis_by_name(name, description=description, **conn_params)
- apis = r.get('restapi')
+ apis = r.get("restapi")
if apis:
conn = _get_conn(**conn_params)
for api in apis:
- conn.delete_rest_api(restApiId=api['id'])
- return {'deleted': True, 'count': len(apis)}
+ conn.delete_rest_api(restApiId=api["id"])
+ return {"deleted": True, "count": len(apis)}
else:
- return {'deleted': False}
+ return {"deleted": False}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
def describe_api_resources(restApiId, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Given rest api id, return all resources for this api.
CLI Example:
@@ -295,20 +326,23 @@ def describe_api_resources(restApiId, region=None, key=None, keyid=None, profile
salt myminion boto_apigateway.describe_api_resources myapi_id
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- resources = sorted(_multi_call(conn.get_resources, 'items', restApiId=restApiId),
- key=lambda k: k['path'])
+ resources = sorted(
+ _multi_call(conn.get_resources, "items", restApiId=restApiId),
+ key=lambda k: k["path"],
+ )
- return {'resources': resources}
+ return {"resources": resources}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def describe_api_resource(restApiId, path,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe_api_resource(
+ restApiId, path, region=None, key=None, keyid=None, profile=None
+):
+ """
Given rest api id, and an absolute resource path, returns the resource id for
the given path.
@@ -318,20 +352,23 @@ def describe_api_resource(restApiId, path,
salt myminion boto_apigateway.describe_api_resource myapi_id resource_path
- '''
- r = describe_api_resources(restApiId, region=region, key=key, keyid=keyid, profile=profile)
- resources = r.get('resources')
+ """
+ r = describe_api_resources(
+ restApiId, region=region, key=key, keyid=keyid, profile=profile
+ )
+ resources = r.get("resources")
if resources is None:
return r
for resource in resources:
- if resource['path'] == path:
- return {'resource': resource}
- return {'resource': None}
+ if resource["path"] == path:
+ return {"resource": resource}
+ return {"resource": None}
-def create_api_resources(restApiId, path,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_api_resources(
+ restApiId, path, region=None, key=None, keyid=None, profile=None
+):
+ """
Given rest api id, and an absolute resource path, create all the resources and
return all resources in the resourcepath, returns False on failure.
@@ -341,35 +378,44 @@ def create_api_resources(restApiId, path,
salt myminion boto_apigateway.create_api_resources myapi_id resource_path
- '''
- path_parts = path.split('/')
+ """
+ path_parts = path.split("/")
created = []
- current_path = ''
+ current_path = ""
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
for path_part in path_parts:
- if current_path == '/':
- current_path = '{0}{1}'.format(current_path, path_part)
+ if current_path == "/":
+ current_path = "{0}{1}".format(current_path, path_part)
else:
- current_path = '{0}/{1}'.format(current_path, path_part)
- r = describe_api_resource(restApiId, current_path,
- region=region, key=key, keyid=keyid, profile=profile)
- resource = r.get('resource')
+ current_path = "{0}/{1}".format(current_path, path_part)
+ r = describe_api_resource(
+ restApiId,
+ current_path,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ resource = r.get("resource")
if not resource:
- resource = conn.create_resource(restApiId=restApiId, parentId=created[-1]['id'], pathPart=path_part)
+ resource = conn.create_resource(
+ restApiId=restApiId, parentId=created[-1]["id"], pathPart=path_part
+ )
created.append(resource)
if created:
- return {'created': True, 'restApiId': restApiId, 'resources': created}
+ return {"created": True, "restApiId": restApiId, "resources": created}
else:
- return {'created': False, 'error': 'unexpected error.'}
+ return {"created": False, "error": "unexpected error."}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def delete_api_resources(restApiId, path,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_api_resources(
+ restApiId, path, region=None, key=None, keyid=None, profile=None
+):
+ """
Given restApiId and an absolute resource path, delete the resources starting
from the absolute resource path. If resourcepath is the root resource '/',
the function will return False. Returns False on failure.
@@ -380,25 +426,28 @@ def delete_api_resources(restApiId, path,
salt myminion boto_apigateway.delete_api_resources myapi_id, resource_path
- '''
- if path == '/':
- return {'deleted': False, 'error': 'use delete_api to remove the root resource'}
+ """
+ if path == "/":
+ return {"deleted": False, "error": "use delete_api to remove the root resource"}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- r = describe_api_resource(restApiId, path, region=region, key=key, keyid=keyid, profile=profile)
- resource = r.get('resource')
+ r = describe_api_resource(
+ restApiId, path, region=region, key=key, keyid=keyid, profile=profile
+ )
+ resource = r.get("resource")
if resource:
- conn.delete_resource(restApiId=restApiId, resourceId=resource['id'])
- return {'deleted': True}
+ conn.delete_resource(restApiId=restApiId, resourceId=resource["id"])
+ return {"deleted": True}
else:
- return {'deleted': False, 'error': 'no resource found by {0}'.format(path)}
+ return {"deleted": False, "error": "no resource found by {0}".format(path)}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def describe_api_resource_method(restApiId, resourcePath, httpMethod,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe_api_resource_method(
+ restApiId, resourcePath, httpMethod, region=None, key=None, keyid=None, profile=None
+):
+ """
Given rest api id, resource path, and http method (must be one of DELETE,
GET, HEAD, OPTIONS, PATCH, POST, PUT), return the method for the
api/resource path if defined. Return False if method is not defined.
@@ -409,23 +458,26 @@ def describe_api_resource_method(restApiId, resourcePath, httpMethod,
salt myminion boto_apigateway.describe_api_resource_method myapi_id resource_path httpmethod
- '''
- r = describe_api_resource(restApiId, resourcePath,
- region=region, key=key, keyid=keyid, profile=profile)
- resource = r.get('resource')
+ """
+ r = describe_api_resource(
+ restApiId, resourcePath, region=region, key=key, keyid=keyid, profile=profile
+ )
+ resource = r.get("resource")
if not resource:
- return {'error': 'no such resource'}
+ return {"error": "no such resource"}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- method = conn.get_method(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod)
- return {'method': method}
+ method = conn.get_method(
+ restApiId=restApiId, resourceId=resource["id"], httpMethod=httpMethod
+ )
+ return {"method": method}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
def describe_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Gets info about the given api key
CLI Example:
@@ -434,17 +486,17 @@ def describe_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
salt myminion boto_apigateway.describe_api_key apigw_api_key
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
response = conn.get_api_key(apiKey=apiKey)
- return {'apiKey': _convert_datetime_str(response)}
+ return {"apiKey": _convert_datetime_str(response)}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
def describe_api_keys(region=None, key=None, keyid=None, profile=None):
- '''
+ """
Gets information about the defined API Keys. Return list of apiKeys.
CLI Example:
@@ -453,19 +505,27 @@ def describe_api_keys(region=None, key=None, keyid=None, profile=None):
salt myminion boto_apigateway.describe_api_keys
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- apikeys = _multi_call(conn.get_api_keys, 'items')
+ apikeys = _multi_call(conn.get_api_keys, "items")
- return {'apiKeys': [_convert_datetime_str(apikey) for apikey in apikeys]}
+ return {"apiKeys": [_convert_datetime_str(apikey) for apikey in apikeys]}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def create_api_key(name, description, enabled=True, stageKeys=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_api_key(
+ name,
+ description,
+ enabled=True,
+ stageKeys=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create an API key given name and description.
An optional enabled argument can be provided. If provided, the
@@ -485,24 +545,25 @@ def create_api_key(name, description, enabled=True, stageKeys=None,
salt myminion boto_apigateway.create_api_key name description \\
stageKeys='[{"restApiId": "id", "stageName": "stagename"}]'
- '''
+ """
try:
stageKeys = list() if stageKeys is None else stageKeys
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- response = conn.create_api_key(name=name, description=description,
- enabled=enabled, stageKeys=stageKeys)
+ response = conn.create_api_key(
+ name=name, description=description, enabled=enabled, stageKeys=stageKeys
+ )
if not response:
- return {'created': False}
+ return {"created": False}
- return {'created': True, 'apiKey': _convert_datetime_str(response)}
+ return {"created": True, "apiKey": _convert_datetime_str(response)}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
def delete_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Deletes a given apiKey
CLI Example:
@@ -511,51 +572,56 @@ def delete_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
salt myminion boto_apigateway.delete_api_key apikeystring
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_api_key(apiKey=apiKey)
- return {'deleted': True}
+ return {"deleted": True}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
def _api_key_patch_replace(conn, apiKey, path, value):
- '''
+ """
the replace patch operation on an ApiKey resource
- '''
- response = conn.update_api_key(apiKey=apiKey,
- patchOperations=[{'op': 'replace', 'path': path, 'value': value}])
+ """
+ response = conn.update_api_key(
+ apiKey=apiKey, patchOperations=[{"op": "replace", "path": path, "value": value}]
+ )
return response
def _api_key_patchops(op, pvlist):
- '''
+ """
helper function to return patchOperations object
- '''
- return [{'op': op, 'path': p, 'value': v} for (p, v) in pvlist]
+ """
+ return [{"op": op, "path": p, "value": v} for (p, v) in pvlist]
def _api_key_patch_add(conn, apiKey, pvlist):
- '''
+ """
the add patch operation for a list of (path, value) tuples on an ApiKey resource list path
- '''
- response = conn.update_api_key(apiKey=apiKey,
- patchOperations=_api_key_patchops('add', pvlist))
+ """
+ response = conn.update_api_key(
+ apiKey=apiKey, patchOperations=_api_key_patchops("add", pvlist)
+ )
return response
def _api_key_patch_remove(conn, apiKey, pvlist):
- '''
+ """
the remove patch operation for a list of (path, value) tuples on an ApiKey resource list path
- '''
- response = conn.update_api_key(apiKey=apiKey,
- patchOperations=_api_key_patchops('remove', pvlist))
+ """
+ response = conn.update_api_key(
+ apiKey=apiKey, patchOperations=_api_key_patchops("remove", pvlist)
+ )
return response
-def update_api_key_description(apiKey, description, region=None, key=None, keyid=None, profile=None):
- '''
+def update_api_key_description(
+ apiKey, description, region=None, key=None, keyid=None, profile=None
+):
+ """
update the given apiKey with the given description.
CLI Example:
@@ -564,17 +630,17 @@ def update_api_key_description(apiKey, description, region=None, key=None, keyid
salt myminion boto_apigateway.update_api_key_description api_key description
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- response = _api_key_patch_replace(conn, apiKey, '/description', description)
- return {'updated': True, 'apiKey': _convert_datetime_str(response)}
+ response = _api_key_patch_replace(conn, apiKey, "/description", description)
+ return {"updated": True, "apiKey": _convert_datetime_str(response)}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
def enable_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
- '''
+ """
enable the given apiKey.
CLI Example:
@@ -583,17 +649,17 @@ def enable_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
salt myminion boto_apigateway.enable_api_key api_key
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- response = _api_key_patch_replace(conn, apiKey, '/enabled', 'True')
- return {'apiKey': _convert_datetime_str(response)}
+ response = _api_key_patch_replace(conn, apiKey, "/enabled", "True")
+ return {"apiKey": _convert_datetime_str(response)}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
def disable_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
- '''
+ """
disable the given apiKey.
CLI Example:
@@ -602,17 +668,19 @@ def disable_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
salt myminion boto_apigateway.enable_api_key api_key
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- response = _api_key_patch_replace(conn, apiKey, '/enabled', 'False')
- return {'apiKey': _convert_datetime_str(response)}
+ response = _api_key_patch_replace(conn, apiKey, "/enabled", "False")
+ return {"apiKey": _convert_datetime_str(response)}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def associate_api_key_stagekeys(apiKey, stagekeyslist, region=None, key=None, keyid=None, profile=None):
- '''
+def associate_api_key_stagekeys(
+ apiKey, stagekeyslist, region=None, key=None, keyid=None, profile=None
+):
+ """
associate the given stagekeyslist to the given apiKey.
CLI Example:
@@ -622,18 +690,20 @@ def associate_api_key_stagekeys(apiKey, stagekeyslist, region=None, key=None, ke
salt myminion boto_apigateway.associate_stagekeys_api_key \\
api_key '["restapi id/stage name", ...]'
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- pvlist = [('/stages', stagekey) for stagekey in stagekeyslist]
+ pvlist = [("/stages", stagekey) for stagekey in stagekeyslist]
response = _api_key_patch_add(conn, apiKey, pvlist)
- return {'associated': True, 'apiKey': _convert_datetime_str(response)}
+ return {"associated": True, "apiKey": _convert_datetime_str(response)}
except ClientError as e:
- return {'associated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"associated": False, "error": __utils__["boto3.get_error"](e)}
-def disassociate_api_key_stagekeys(apiKey, stagekeyslist, region=None, key=None, keyid=None, profile=None):
- '''
+def disassociate_api_key_stagekeys(
+ apiKey, stagekeyslist, region=None, key=None, keyid=None, profile=None
+):
+ """
disassociate the given stagekeyslist to the given apiKey.
CLI Example:
@@ -643,18 +713,20 @@ def disassociate_api_key_stagekeys(apiKey, stagekeyslist, region=None, key=None,
salt myminion boto_apigateway.disassociate_stagekeys_api_key \\
api_key '["restapi id/stage name", ...]'
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- pvlist = [('/stages', stagekey) for stagekey in stagekeyslist]
+ pvlist = [("/stages", stagekey) for stagekey in stagekeyslist]
response = _api_key_patch_remove(conn, apiKey, pvlist)
- return {'disassociated': True}
+ return {"disassociated": True}
except ClientError as e:
- return {'disassociated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"disassociated": False, "error": __utils__["boto3.get_error"](e)}
-def describe_api_deployments(restApiId, region=None, key=None, keyid=None, profile=None):
- '''
+def describe_api_deployments(
+ restApiId, region=None, key=None, keyid=None, profile=None
+):
+ """
Gets information about the defined API Deployments. Return list of api deployments.
CLI Example:
@@ -663,7 +735,7 @@ def describe_api_deployments(restApiId, region=None, key=None, keyid=None, profi
salt myminion boto_apigateway.describe_api_deployments restApiId
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
deployments = []
@@ -671,18 +743,26 @@ def describe_api_deployments(restApiId, region=None, key=None, keyid=None, profi
while True:
if _deployments:
- deployments = deployments + _deployments['items']
- if 'position' not in _deployments:
+ deployments = deployments + _deployments["items"]
+ if "position" not in _deployments:
break
- _deployments = conn.get_deployments(restApiId=restApiId, position=_deployments['position'])
+ _deployments = conn.get_deployments(
+ restApiId=restApiId, position=_deployments["position"]
+ )
- return {'deployments': [_convert_datetime_str(deployment) for deployment in deployments]}
+ return {
+ "deployments": [
+ _convert_datetime_str(deployment) for deployment in deployments
+ ]
+ }
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def describe_api_deployment(restApiId, deploymentId, region=None, key=None, keyid=None, profile=None):
- '''
+def describe_api_deployment(
+ restApiId, deploymentId, region=None, key=None, keyid=None, profile=None
+):
+ """
Get API deployment for a given restApiId and deploymentId.
CLI Example:
@@ -691,18 +771,19 @@ def describe_api_deployment(restApiId, deploymentId, region=None, key=None, keyi
salt myminion boto_apigateway.describe_api_deployent restApiId deploymentId
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
deployment = conn.get_deployment(restApiId=restApiId, deploymentId=deploymentId)
- return {'deployment': _convert_datetime_str(deployment)}
+ return {"deployment": _convert_datetime_str(deployment)}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def activate_api_deployment(restApiId, stageName, deploymentId,
- region=None, key=None, keyid=None, profile=None):
- '''
+def activate_api_deployment(
+ restApiId, stageName, deploymentId, region=None, key=None, keyid=None, profile=None
+):
+ """
Activates previously deployed deployment for a given stage
CLI Example:
@@ -711,22 +792,35 @@ def activate_api_deployment(restApiId, stageName, deploymentId,
salt myminion boto_apigateway.activate_api_deployent restApiId stagename deploymentId
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- response = conn.update_stage(restApiId=restApiId, stageName=stageName,
- patchOperations=[{'op': 'replace',
- 'path': '/deploymentId',
- 'value': deploymentId}])
- return {'set': True, 'response': _convert_datetime_str(response)}
+ response = conn.update_stage(
+ restApiId=restApiId,
+ stageName=stageName,
+ patchOperations=[
+ {"op": "replace", "path": "/deploymentId", "value": deploymentId}
+ ],
+ )
+ return {"set": True, "response": _convert_datetime_str(response)}
except ClientError as e:
- return {'set': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"set": False, "error": __utils__["boto3.get_error"](e)}
-def create_api_deployment(restApiId, stageName, stageDescription='', description='', cacheClusterEnabled=False,
- cacheClusterSize='0.5', variables=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_api_deployment(
+ restApiId,
+ stageName,
+ stageDescription="",
+ description="",
+ cacheClusterEnabled=False,
+ cacheClusterSize="0.5",
+ variables=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Creates a new API deployment.
CLI Example:
@@ -736,22 +830,29 @@ def create_api_deployment(restApiId, stageName, stageDescription='', description
salt myminion boto_apigateway.create_api_deployent restApiId stagename stageDescription='' \\
description='' cacheClusterEnabled=True|False cacheClusterSize=0.5 variables='{"name": "value"}'
- '''
+ """
try:
variables = dict() if variables is None else variables
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- deployment = conn.create_deployment(restApiId=restApiId, stageName=stageName,
- stageDescription=stageDescription, description=description,
- cacheClusterEnabled=cacheClusterEnabled, cacheClusterSize=cacheClusterSize,
- variables=variables)
- return {'created': True, 'deployment': _convert_datetime_str(deployment)}
+ deployment = conn.create_deployment(
+ restApiId=restApiId,
+ stageName=stageName,
+ stageDescription=stageDescription,
+ description=description,
+ cacheClusterEnabled=cacheClusterEnabled,
+ cacheClusterSize=cacheClusterSize,
+ variables=variables,
+ )
+ return {"created": True, "deployment": _convert_datetime_str(deployment)}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def delete_api_deployment(restApiId, deploymentId, region=None, key=None, keyid=None, profile=None):
- '''
+def delete_api_deployment(
+ restApiId, deploymentId, region=None, key=None, keyid=None, profile=None
+):
+ """
Deletes API deployment for a given restApiId and deploymentID
CLI Example:
@@ -760,17 +861,19 @@ def delete_api_deployment(restApiId, deploymentId, region=None, key=None, keyid=
salt myminion boto_apigateway.delete_api_deployent restApiId deploymentId
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_deployment(restApiId=restApiId, deploymentId=deploymentId)
- return {'deleted': True}
+ return {"deleted": True}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def overwrite_api_stage_variables(restApiId, stageName, variables, region=None, key=None, keyid=None, profile=None):
- '''
+def overwrite_api_stage_variables(
+ restApiId, stageName, variables, region=None, key=None, keyid=None, profile=None
+):
+ """
Overwrite the stage variables for the given restApiId and stage name with the given variables,
variables must be in the form of a dictionary. Overwrite will always remove all the existing
stage variables associated with the given restApiId and stage name, follow by the adding of all the
@@ -782,41 +885,46 @@ def overwrite_api_stage_variables(restApiId, stageName, variables, region=None,
salt myminion boto_apigateway.overwrite_api_stage_variables restApiId stageName variables='{"name": "value"}'
- '''
+ """
try:
- res = describe_api_stage(restApiId, stageName, region=region, key=key, keyid=keyid, profile=profile)
- if res.get('error'):
- return {'overwrite': False, 'error': res.get('error')}
+ res = describe_api_stage(
+ restApiId, stageName, region=region, key=key, keyid=keyid, profile=profile
+ )
+ if res.get("error"):
+ return {"overwrite": False, "error": res.get("error")}
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
# remove all existing variables that are not in the given variables,
# followed by adding of the variables
- stage = res.get('stage')
- old_vars = stage.get('variables', {})
+ stage = res.get("stage")
+ old_vars = stage.get("variables", {})
patch_ops = []
for old_var in old_vars:
if old_var not in variables:
- patch_ops.append(dict(op='remove',
- path='/variables/{0}'.format(old_var),
- value=''))
+ patch_ops.append(
+ dict(op="remove", path="/variables/{0}".format(old_var), value="")
+ )
for var, val in six.iteritems(variables):
if var not in old_vars or old_vars[var] != val:
- patch_ops.append(dict(op='replace',
- path='/variables/{0}'.format(var),
- value=val))
+ patch_ops.append(
+ dict(op="replace", path="/variables/{0}".format(var), value=val)
+ )
if patch_ops:
- stage = conn.update_stage(restApiId=restApiId, stageName=stageName,
- patchOperations=patch_ops)
+ stage = conn.update_stage(
+ restApiId=restApiId, stageName=stageName, patchOperations=patch_ops
+ )
- return {'overwrite': True, 'stage': _convert_datetime_str(stage)}
+ return {"overwrite": True, "stage": _convert_datetime_str(stage)}
except ClientError as e:
- return {'overwrite': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"overwrite": False, "error": __utils__["boto3.get_error"](e)}
-def describe_api_stage(restApiId, stageName, region=None, key=None, keyid=None, profile=None):
- '''
+def describe_api_stage(
+ restApiId, stageName, region=None, key=None, keyid=None, profile=None
+):
+ """
Get API stage for a given apiID and stage name
CLI Example:
@@ -825,17 +933,19 @@ def describe_api_stage(restApiId, stageName, region=None, key=None, keyid=None,
salt myminion boto_apigateway.describe_api_stage restApiId stageName
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stage = conn.get_stage(restApiId=restApiId, stageName=stageName)
- return {'stage': _convert_datetime_str(stage)}
+ return {"stage": _convert_datetime_str(stage)}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def describe_api_stages(restApiId, deploymentId, region=None, key=None, keyid=None, profile=None):
- '''
+def describe_api_stages(
+ restApiId, deploymentId, region=None, key=None, keyid=None, profile=None
+):
+ """
Get all API stages for a given apiID and deploymentID
CLI Example:
@@ -844,19 +954,29 @@ def describe_api_stages(restApiId, deploymentId, region=None, key=None, keyid=No
salt myminion boto_apigateway.describe_api_stages restApiId deploymentId
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stages = conn.get_stages(restApiId=restApiId, deploymentId=deploymentId)
- return {'stages': [_convert_datetime_str(stage) for stage in stages['item']]}
+ return {"stages": [_convert_datetime_str(stage) for stage in stages["item"]]}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def create_api_stage(restApiId, stageName, deploymentId, description='',
- cacheClusterEnabled=False, cacheClusterSize='0.5', variables=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_api_stage(
+ restApiId,
+ stageName,
+ deploymentId,
+ description="",
+ cacheClusterEnabled=False,
+ cacheClusterSize="0.5",
+ variables=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Creates a new API stage for a given restApiId and deploymentId.
CLI Example:
@@ -866,21 +986,29 @@ def create_api_stage(restApiId, stageName, deploymentId, description='',
salt myminion boto_apigateway.create_api_stage restApiId stagename deploymentId \\
description='' cacheClusterEnabled=True|False cacheClusterSize='0.5' variables='{"name": "value"}'
- '''
+ """
try:
variables = dict() if variables is None else variables
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- stage = conn.create_stage(restApiId=restApiId, stageName=stageName, deploymentId=deploymentId,
- description=description, cacheClusterEnabled=cacheClusterEnabled,
- cacheClusterSize=cacheClusterSize, variables=variables)
- return {'created': True, 'stage': _convert_datetime_str(stage)}
+ stage = conn.create_stage(
+ restApiId=restApiId,
+ stageName=stageName,
+ deploymentId=deploymentId,
+ description=description,
+ cacheClusterEnabled=cacheClusterEnabled,
+ cacheClusterSize=cacheClusterSize,
+ variables=variables,
+ )
+ return {"created": True, "stage": _convert_datetime_str(stage)}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def delete_api_stage(restApiId, stageName, region=None, key=None, keyid=None, profile=None):
- '''
+def delete_api_stage(
+ restApiId, stageName, region=None, key=None, keyid=None, profile=None
+):
+ """
Deletes stage identified by stageName from API identified by restApiId
CLI Example:
@@ -889,17 +1017,19 @@ def delete_api_stage(restApiId, stageName, region=None, key=None, keyid=None, pr
salt myminion boto_apigateway.delete_api_stage restApiId stageName
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_stage(restApiId=restApiId, stageName=stageName)
- return {'deleted': True}
+ return {"deleted": True}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def flush_api_stage_cache(restApiId, stageName, region=None, key=None, keyid=None, profile=None):
- '''
+def flush_api_stage_cache(
+ restApiId, stageName, region=None, key=None, keyid=None, profile=None
+):
+ """
Flushes cache for the stage identified by stageName from API identified by restApiId
CLI Example:
@@ -908,19 +1038,29 @@ def flush_api_stage_cache(restApiId, stageName, region=None, key=None, keyid=Non
salt myminion boto_apigateway.flush_api_stage_cache restApiId stageName
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.flush_stage_cache(restApiId=restApiId, stageName=stageName)
- return {'flushed': True}
+ return {"flushed": True}
except ClientError as e:
- return {'flushed': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"flushed": False, "error": __utils__["boto3.get_error"](e)}
-def create_api_method(restApiId, resourcePath, httpMethod, authorizationType,
- apiKeyRequired=False, requestParameters=None, requestModels=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_api_method(
+ restApiId,
+ resourcePath,
+ httpMethod,
+ authorizationType,
+ apiKeyRequired=False,
+ requestParameters=None,
+ requestModels=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Creates API method for a resource in the given API
CLI Example:
@@ -930,27 +1070,43 @@ def create_api_method(restApiId, resourcePath, httpMethod, authorizationType,
salt myminion boto_apigateway.create_api_method restApiId resourcePath, httpMethod, authorizationType, \\
apiKeyRequired=False, requestParameters='{"name", "value"}', requestModels='{"content-type", "value"}'
- '''
+ """
try:
- resource = describe_api_resource(restApiId, resourcePath, region=region,
- key=key, keyid=keyid, profile=profile).get('resource')
+ resource = describe_api_resource(
+ restApiId,
+ resourcePath,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ).get("resource")
if resource:
- requestParameters = dict() if requestParameters is None else requestParameters
+ requestParameters = (
+ dict() if requestParameters is None else requestParameters
+ )
requestModels = dict() if requestModels is None else requestModels
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- method = conn.put_method(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod,
- authorizationType=str(authorizationType), apiKeyRequired=apiKeyRequired, # future lint: disable=blacklisted-function
- requestParameters=requestParameters, requestModels=requestModels)
- return {'created': True, 'method': method}
- return {'created': False, 'error': 'Failed to create method'}
+ method = conn.put_method(
+ restApiId=restApiId,
+ resourceId=resource["id"],
+ httpMethod=httpMethod,
+ authorizationType=str(authorizationType),
+ apiKeyRequired=apiKeyRequired, # future lint: disable=blacklisted-function
+ requestParameters=requestParameters,
+ requestModels=requestModels,
+ )
+ return {"created": True, "method": method}
+ return {"created": False, "error": "Failed to create method"}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def describe_api_method(restApiId, resourcePath, httpMethod, region=None, key=None, keyid=None, profile=None):
- '''
+def describe_api_method(
+ restApiId, resourcePath, httpMethod, region=None, key=None, keyid=None, profile=None
+):
+ """
Get API method for a resource in the given API
CLI Example:
@@ -959,21 +1115,31 @@ def describe_api_method(restApiId, resourcePath, httpMethod, region=None, key=No
salt myminion boto_apigateway.describe_api_method restApiId resourcePath httpMethod
- '''
+ """
try:
- resource = describe_api_resource(restApiId, resourcePath, region=region,
- key=key, keyid=keyid, profile=profile).get('resource')
+ resource = describe_api_resource(
+ restApiId,
+ resourcePath,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ).get("resource")
if resource:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- method = conn.get_method(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod)
- return {'method': _convert_datetime_str(method)}
- return {'error': 'get API method failed: no such resource'}
+ method = conn.get_method(
+ restApiId=restApiId, resourceId=resource["id"], httpMethod=httpMethod
+ )
+ return {"method": _convert_datetime_str(method)}
+ return {"error": "get API method failed: no such resource"}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def delete_api_method(restApiId, resourcePath, httpMethod, region=None, key=None, keyid=None, profile=None):
- '''
+def delete_api_method(
+ restApiId, resourcePath, httpMethod, region=None, key=None, keyid=None, profile=None
+):
+ """
Delete API method for a resource in the given API
CLI Example:
@@ -982,22 +1148,40 @@ def delete_api_method(restApiId, resourcePath, httpMethod, region=None, key=None
salt myminion boto_apigateway.delete_api_method restApiId resourcePath httpMethod
- '''
+ """
try:
- resource = describe_api_resource(restApiId, resourcePath, region=region,
- key=key, keyid=keyid, profile=profile).get('resource')
+ resource = describe_api_resource(
+ restApiId,
+ resourcePath,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ).get("resource")
if resource:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- conn.delete_method(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod)
- return {'deleted': True}
- return {'deleted': False, 'error': 'get API method failed: no such resource'}
+ conn.delete_method(
+ restApiId=restApiId, resourceId=resource["id"], httpMethod=httpMethod
+ )
+ return {"deleted": True}
+ return {"deleted": False, "error": "get API method failed: no such resource"}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def create_api_method_response(restApiId, resourcePath, httpMethod, statusCode, responseParameters=None,
- responseModels=None, region=None, key=None, keyid=None, profile=None):
- '''
+def create_api_method_response(
+ restApiId,
+ resourcePath,
+ httpMethod,
+ statusCode,
+ responseParameters=None,
+ responseModels=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create API method response for a method on a given resource in the given API
CLI Example:
@@ -1007,27 +1191,48 @@ def create_api_method_response(restApiId, resourcePath, httpMethod, statusCode,
salt myminion boto_apigateway.create_api_method_response restApiId resourcePath httpMethod \\
statusCode responseParameters='{"name", "True|False"}' responseModels='{"content-type", "model"}'
- '''
+ """
try:
- resource = describe_api_resource(restApiId, resourcePath, region=region,
- key=key, keyid=keyid, profile=profile).get('resource')
+ resource = describe_api_resource(
+ restApiId,
+ resourcePath,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ).get("resource")
if resource:
- responseParameters = dict() if responseParameters is None else responseParameters
+ responseParameters = (
+ dict() if responseParameters is None else responseParameters
+ )
responseModels = dict() if responseModels is None else responseModels
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- response = conn.put_method_response(restApiId=restApiId, resourceId=resource['id'],
- httpMethod=httpMethod, statusCode=str(statusCode), # future lint: disable=blacklisted-function
- responseParameters=responseParameters, responseModels=responseModels)
- return {'created': True, 'response': response}
- return {'created': False, 'error': 'no such resource'}
+ response = conn.put_method_response(
+ restApiId=restApiId,
+ resourceId=resource["id"],
+ httpMethod=httpMethod,
+ statusCode=str(statusCode), # future lint: disable=blacklisted-function
+ responseParameters=responseParameters,
+ responseModels=responseModels,
+ )
+ return {"created": True, "response": response}
+ return {"created": False, "error": "no such resource"}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def delete_api_method_response(restApiId, resourcePath, httpMethod, statusCode,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_api_method_response(
+ restApiId,
+ resourcePath,
+ httpMethod,
+ statusCode,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Delete API method response for a resource in the given API
CLI Example:
@@ -1036,23 +1241,41 @@ def delete_api_method_response(restApiId, resourcePath, httpMethod, statusCode,
salt myminion boto_apigateway.delete_api_method_response restApiId resourcePath httpMethod statusCode
- '''
+ """
try:
- resource = describe_api_resource(restApiId, resourcePath, region=region,
- key=key, keyid=keyid, profile=profile).get('resource')
+ resource = describe_api_resource(
+ restApiId,
+ resourcePath,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ).get("resource")
if resource:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- conn.delete_method_response(restApiId=restApiId, resourceId=resource['id'],
- httpMethod=httpMethod, statusCode=str(statusCode)) # future lint: disable=blacklisted-function
- return {'deleted': True}
- return {'deleted': False, 'error': 'no such resource'}
+ conn.delete_method_response(
+ restApiId=restApiId,
+ resourceId=resource["id"],
+ httpMethod=httpMethod,
+ statusCode=str(statusCode),
+ ) # future lint: disable=blacklisted-function
+ return {"deleted": True}
+ return {"deleted": False, "error": "no such resource"}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def describe_api_method_response(restApiId, resourcePath, httpMethod, statusCode,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe_api_method_response(
+ restApiId,
+ resourcePath,
+ httpMethod,
+ statusCode,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Get API method response for a resource in the given API
CLI Example:
@@ -1061,22 +1284,32 @@ def describe_api_method_response(restApiId, resourcePath, httpMethod, statusCode
salt myminion boto_apigateway.describe_api_method_response restApiId resourcePath httpMethod statusCode
- '''
+ """
try:
- resource = describe_api_resource(restApiId, resourcePath, region=region,
- key=key, keyid=keyid, profile=profile).get('resource')
+ resource = describe_api_resource(
+ restApiId,
+ resourcePath,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ).get("resource")
if resource:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- response = conn.get_method_response(restApiId=restApiId, resourceId=resource['id'],
- httpMethod=httpMethod, statusCode=str(statusCode)) # future lint: disable=blacklisted-function
- return {'response': _convert_datetime_str(response)}
- return {'error': 'no such resource'}
+ response = conn.get_method_response(
+ restApiId=restApiId,
+ resourceId=resource["id"],
+ httpMethod=httpMethod,
+ statusCode=str(statusCode),
+ ) # future lint: disable=blacklisted-function
+ return {"response": _convert_datetime_str(response)}
+ return {"error": "no such resource"}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
def describe_api_models(restApiId, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get all models for a given API
CLI Example:
@@ -1085,17 +1318,19 @@ def describe_api_models(restApiId, region=None, key=None, keyid=None, profile=No
salt myminion boto_apigateway.describe_api_models restApiId
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- models = _multi_call(conn.get_models, 'items', restApiId=restApiId)
- return {'models': [_convert_datetime_str(model) for model in models]}
+ models = _multi_call(conn.get_models, "items", restApiId=restApiId)
+ return {"models": [_convert_datetime_str(model) for model in models]}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def describe_api_model(restApiId, modelName, flatten=True, region=None, key=None, keyid=None, profile=None):
- '''
+def describe_api_model(
+ restApiId, modelName, flatten=True, region=None, key=None, keyid=None, profile=None
+):
+ """
Get a model by name for a given API
CLI Example:
@@ -1104,17 +1339,21 @@ def describe_api_model(restApiId, modelName, flatten=True, region=None, key=None
salt myminion boto_apigateway.describe_api_model restApiId modelName [True]
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- model = conn.get_model(restApiId=restApiId, modelName=modelName, flatten=flatten)
- return {'model': _convert_datetime_str(model)}
+ model = conn.get_model(
+ restApiId=restApiId, modelName=modelName, flatten=flatten
+ )
+ return {"model": _convert_datetime_str(model)}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def api_model_exists(restApiId, modelName, region=None, key=None, keyid=None, profile=None):
- '''
+def api_model_exists(
+ restApiId, modelName, region=None, key=None, keyid=None, profile=None
+):
+ """
Check to see if the given modelName exists in the given restApiId
CLI Example:
@@ -1122,23 +1361,30 @@ def api_model_exists(restApiId, modelName, region=None, key=None, keyid=None, pr
.. code-block:: bash
salt myminion boto_apigateway.api_model_exists restApiId modelName
- '''
- r = describe_api_model(restApiId, modelName, region=region, key=key, keyid=keyid, profile=profile)
+ """
+ r = describe_api_model(
+ restApiId, modelName, region=region, key=key, keyid=keyid, profile=profile
+ )
- return {'exists': bool(r.get('model'))}
+ return {"exists": bool(r.get("model"))}
def _api_model_patch_replace(conn, restApiId, modelName, path, value):
- '''
+ """
the replace patch operation on a Model resource
- '''
- response = conn.update_model(restApiId=restApiId, modelName=modelName,
- patchOperations=[{'op': 'replace', 'path': path, 'value': value}])
+ """
+ response = conn.update_model(
+ restApiId=restApiId,
+ modelName=modelName,
+ patchOperations=[{"op": "replace", "path": path, "value": value}],
+ )
return response
-def update_api_model_schema(restApiId, modelName, schema, region=None, key=None, keyid=None, profile=None):
- '''
+def update_api_model_schema(
+ restApiId, modelName, schema, region=None, key=None, keyid=None, profile=None
+):
+ """
update the schema (in python dictionary format) for the given model in the given restApiId
CLI Example:
@@ -1147,18 +1393,24 @@ def update_api_model_schema(restApiId, modelName, schema, region=None, key=None,
salt myminion boto_apigateway.update_api_model_schema restApiId modelName schema
- '''
+ """
try:
- schema_json = salt.utils.json.dumps(schema) if isinstance(schema, dict) else schema
+ schema_json = (
+ salt.utils.json.dumps(schema) if isinstance(schema, dict) else schema
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- response = _api_model_patch_replace(conn, restApiId, modelName, '/schema', schema_json)
- return {'updated': True, 'model': _convert_datetime_str(response)}
+ response = _api_model_patch_replace(
+ conn, restApiId, modelName, "/schema", schema_json
+ )
+ return {"updated": True, "model": _convert_datetime_str(response)}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def delete_api_model(restApiId, modelName, region=None, key=None, keyid=None, profile=None):
- '''
+def delete_api_model(
+ restApiId, modelName, region=None, key=None, keyid=None, profile=None
+):
+ """
Delete a model identified by name in a given API
CLI Example:
@@ -1167,18 +1419,27 @@ def delete_api_model(restApiId, modelName, region=None, key=None, keyid=None, pr
salt myminion boto_apigateway.delete_api_model restApiId modelName
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_model(restApiId=restApiId, modelName=modelName)
- return {'deleted': True}
+ return {"deleted": True}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def create_api_model(restApiId, modelName, modelDescription, schema, contentType='application/json',
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_api_model(
+ restApiId,
+ modelName,
+ modelDescription,
+ schema,
+ contentType="application/json",
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create a new model in a given API with a given schema, currently only contentType supported is
'application/json'
@@ -1188,19 +1449,28 @@ def create_api_model(restApiId, modelName, modelDescription, schema, contentType
salt myminion boto_apigateway.create_api_model restApiId modelName modelDescription '' 'content-type'
- '''
+ """
try:
- schema_json = salt.utils.json.dumps(schema) if isinstance(schema, dict) else schema
+ schema_json = (
+ salt.utils.json.dumps(schema) if isinstance(schema, dict) else schema
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- model = conn.create_model(restApiId=restApiId, name=modelName, description=modelDescription,
- schema=schema_json, contentType=contentType)
- return {'created': True, 'model': _convert_datetime_str(model)}
+ model = conn.create_model(
+ restApiId=restApiId,
+ name=modelName,
+ description=modelDescription,
+ schema=schema_json,
+ contentType=contentType,
+ )
+ return {"created": True, "model": _convert_datetime_str(model)}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def describe_api_integration(restApiId, resourcePath, httpMethod, region=None, key=None, keyid=None, profile=None):
- '''
+def describe_api_integration(
+ restApiId, resourcePath, httpMethod, region=None, key=None, keyid=None, profile=None
+):
+ """
Get an integration for a given method in a given API
CLI Example:
@@ -1209,22 +1479,38 @@ def describe_api_integration(restApiId, resourcePath, httpMethod, region=None, k
salt myminion boto_apigateway.describe_api_integration restApiId resourcePath httpMethod
- '''
+ """
try:
- resource = describe_api_resource(restApiId, resourcePath, region=region,
- key=key, keyid=keyid, profile=profile).get('resource')
+ resource = describe_api_resource(
+ restApiId,
+ resourcePath,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ).get("resource")
if resource:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- integration = conn.get_integration(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod)
- return {'integration': _convert_datetime_str(integration)}
- return {'error': 'no such resource'}
+ integration = conn.get_integration(
+ restApiId=restApiId, resourceId=resource["id"], httpMethod=httpMethod
+ )
+ return {"integration": _convert_datetime_str(integration)}
+ return {"error": "no such resource"}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def describe_api_integration_response(restApiId, resourcePath, httpMethod, statusCode,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe_api_integration_response(
+ restApiId,
+ resourcePath,
+ httpMethod,
+ statusCode,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Get an integration response for a given method in a given API
CLI Example:
@@ -1233,22 +1519,34 @@ def describe_api_integration_response(restApiId, resourcePath, httpMethod, statu
salt myminion boto_apigateway.describe_api_integration_response restApiId resourcePath httpMethod statusCode
- '''
+ """
try:
- resource = describe_api_resource(restApiId, resourcePath, region=region,
- key=key, keyid=keyid, profile=profile).get('resource')
+ resource = describe_api_resource(
+ restApiId,
+ resourcePath,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ).get("resource")
if resource:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- response = conn.get_integration_response(restApiId=restApiId, resourceId=resource['id'],
- httpMethod=httpMethod, statusCode=statusCode)
- return {'response': _convert_datetime_str(response)}
- return {'error': 'no such resource'}
+ response = conn.get_integration_response(
+ restApiId=restApiId,
+ resourceId=resource["id"],
+ httpMethod=httpMethod,
+ statusCode=statusCode,
+ )
+ return {"response": _convert_datetime_str(response)}
+ return {"error": "no such resource"}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def delete_api_integration(restApiId, resourcePath, httpMethod, region=None, key=None, keyid=None, profile=None):
- '''
+def delete_api_integration(
+ restApiId, resourcePath, httpMethod, region=None, key=None, keyid=None, profile=None
+):
+ """
Deletes an integration for a given method in a given API
CLI Example:
@@ -1257,22 +1555,38 @@ def delete_api_integration(restApiId, resourcePath, httpMethod, region=None, key
salt myminion boto_apigateway.delete_api_integration restApiId resourcePath httpMethod
- '''
+ """
try:
- resource = describe_api_resource(restApiId, resourcePath, region=region,
- key=key, keyid=keyid, profile=profile).get('resource')
+ resource = describe_api_resource(
+ restApiId,
+ resourcePath,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ).get("resource")
if resource:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- conn.delete_integration(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod)
- return {'deleted': True}
- return {'deleted': False, 'error': 'no such resource'}
+ conn.delete_integration(
+ restApiId=restApiId, resourceId=resource["id"], httpMethod=httpMethod
+ )
+ return {"deleted": True}
+ return {"deleted": False, "error": "no such resource"}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def delete_api_integration_response(restApiId, resourcePath, httpMethod, statusCode,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_api_integration_response(
+ restApiId,
+ resourcePath,
+ httpMethod,
+ statusCode,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Deletes an integration response for a given method in a given API
CLI Example:
@@ -1281,38 +1595,60 @@ def delete_api_integration_response(restApiId, resourcePath, httpMethod, statusC
salt myminion boto_apigateway.delete_api_integration_response restApiId resourcePath httpMethod statusCode
- '''
+ """
try:
- resource = describe_api_resource(restApiId, resourcePath, region=region,
- key=key, keyid=keyid, profile=profile).get('resource')
+ resource = describe_api_resource(
+ restApiId,
+ resourcePath,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ).get("resource")
if resource:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- conn.delete_integration_response(restApiId=restApiId, resourceId=resource['id'],
- httpMethod=httpMethod, statusCode=statusCode)
- return {'deleted': True}
- return {'deleted': False, 'error': 'no such resource'}
+ conn.delete_integration_response(
+ restApiId=restApiId,
+ resourceId=resource["id"],
+ httpMethod=httpMethod,
+ statusCode=statusCode,
+ )
+ return {"deleted": True}
+ return {"deleted": False, "error": "no such resource"}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
def _get_role_arn(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Helper function to get an ARN if name does not look like an ARN.
- '''
- if name.startswith('arn:aws:iam:'):
+ """
+ if name.startswith("arn:aws:iam:"):
return name
- account_id = __salt__['boto_iam.get_account_id'](
+ account_id = __salt__["boto_iam.get_account_id"](
region=region, key=key, keyid=keyid, profile=profile
)
- return 'arn:aws:iam::{0}:role/{1}'.format(account_id, name)
+ return "arn:aws:iam::{0}:role/{1}".format(account_id, name)
-def create_api_integration(restApiId, resourcePath, httpMethod, integrationType, integrationHttpMethod,
- uri, credentials, requestParameters=None, requestTemplates=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_api_integration(
+ restApiId,
+ resourcePath,
+ httpMethod,
+ integrationType,
+ integrationHttpMethod,
+ uri,
+ credentials,
+ requestParameters=None,
+ requestTemplates=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Creates an integration for a given method in a given API.
If integrationType is MOCK, uri and credential parameters will be ignored.
@@ -1328,34 +1664,61 @@ def create_api_integration(restApiId, resourcePath, httpMethod, integrationType,
salt myminion boto_apigateway.create_api_integration restApiId resourcePath httpMethod \\
integrationType integrationHttpMethod uri credentials ['{}' ['{}']]
- '''
+ """
try:
- credentials = _get_role_arn(credentials, region=region, key=key, keyid=keyid, profile=profile)
- resource = describe_api_resource(restApiId, resourcePath, region=region,
- key=key, keyid=keyid, profile=profile).get('resource')
+ credentials = _get_role_arn(
+ credentials, region=region, key=key, keyid=keyid, profile=profile
+ )
+ resource = describe_api_resource(
+ restApiId,
+ resourcePath,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ).get("resource")
if resource:
- requestParameters = dict() if requestParameters is None else requestParameters
+ requestParameters = (
+ dict() if requestParameters is None else requestParameters
+ )
requestTemplates = dict() if requestTemplates is None else requestTemplates
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- if httpMethod.lower() == 'options':
+ if httpMethod.lower() == "options":
uri = ""
credentials = ""
- integration = conn.put_integration(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod,
- type=integrationType, integrationHttpMethod=integrationHttpMethod,
- uri=uri, credentials=credentials, requestParameters=requestParameters,
- requestTemplates=requestTemplates)
- return {'created': True, 'integration': integration}
- return {'created': False, 'error': 'no such resource'}
+ integration = conn.put_integration(
+ restApiId=restApiId,
+ resourceId=resource["id"],
+ httpMethod=httpMethod,
+ type=integrationType,
+ integrationHttpMethod=integrationHttpMethod,
+ uri=uri,
+ credentials=credentials,
+ requestParameters=requestParameters,
+ requestTemplates=requestTemplates,
+ )
+ return {"created": True, "integration": integration}
+ return {"created": False, "error": "no such resource"}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def create_api_integration_response(restApiId, resourcePath, httpMethod, statusCode, selectionPattern,
- responseParameters=None, responseTemplates=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_api_integration_response(
+ restApiId,
+ resourcePath,
+ httpMethod,
+ statusCode,
+ selectionPattern,
+ responseParameters=None,
+ responseTemplates=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Creates an integration response for a given method in a given API
CLI Example:
@@ -1365,35 +1728,51 @@ def create_api_integration_response(restApiId, resourcePath, httpMethod, statusC
salt myminion boto_apigateway.create_api_integration_response restApiId resourcePath httpMethod \\
statusCode selectionPattern ['{}' ['{}']]
- '''
+ """
try:
- resource = describe_api_resource(restApiId, resourcePath, region=region,
- key=key, keyid=keyid, profile=profile).get('resource')
+ resource = describe_api_resource(
+ restApiId,
+ resourcePath,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ).get("resource")
if resource:
- responseParameters = dict() if responseParameters is None else responseParameters
- responseTemplates = dict() if responseTemplates is None else responseTemplates
+ responseParameters = (
+ dict() if responseParameters is None else responseParameters
+ )
+ responseTemplates = (
+ dict() if responseTemplates is None else responseTemplates
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- response = conn.put_integration_response(restApiId=restApiId, resourceId=resource['id'],
- httpMethod=httpMethod, statusCode=statusCode,
- selectionPattern=selectionPattern,
- responseParameters=responseParameters,
- responseTemplates=responseTemplates)
- return {'created': True, 'response': response}
- return {'created': False, 'error': 'no such resource'}
+ response = conn.put_integration_response(
+ restApiId=restApiId,
+ resourceId=resource["id"],
+ httpMethod=httpMethod,
+ statusCode=statusCode,
+ selectionPattern=selectionPattern,
+ responseParameters=responseParameters,
+ responseTemplates=responseTemplates,
+ )
+ return {"created": True, "response": response}
+ return {"created": False, "error": "no such resource"}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
def _filter_plans(attr, name, plans):
- '''
+ """
Helper to return list of usage plan items matching the given attribute value.
- '''
+ """
return [plan for plan in plans if plan[attr] == name]
-def describe_usage_plans(name=None, plan_id=None, region=None, key=None, keyid=None, profile=None):
- '''
+def describe_usage_plans(
+ name=None, plan_id=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Returns a list of existing usage plans, optionally filtered to match a given plan name
.. versionadded:: 2017.7.0
@@ -1406,46 +1785,63 @@ def describe_usage_plans(name=None, plan_id=None, region=None, key=None, keyid=N
salt myminion boto_apigateway.describe_usage_plans name='usage plan name'
salt myminion boto_apigateway.describe_usage_plans plan_id='usage plan id'
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- plans = _multi_call(conn.get_usage_plans, 'items')
+ plans = _multi_call(conn.get_usage_plans, "items")
if name:
- plans = _filter_plans('name', name, plans)
+ plans = _filter_plans("name", name, plans)
if plan_id:
- plans = _filter_plans('id', plan_id, plans)
+ plans = _filter_plans("id", plan_id, plans)
- return {'plans': [_convert_datetime_str(plan) for plan in plans]}
+ return {"plans": [_convert_datetime_str(plan) for plan in plans]}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
def _validate_throttle(throttle):
- '''
+ """
Helper to verify that throttling parameters are valid
- '''
+ """
if throttle is not None:
if not isinstance(throttle, dict):
- raise TypeError('throttle must be a dictionary, provided value: {0}'.format(throttle))
+ raise TypeError(
+ "throttle must be a dictionary, provided value: {0}".format(throttle)
+ )
def _validate_quota(quota):
- '''
+ """
Helper to verify that quota parameters are valid
- '''
+ """
if quota is not None:
if not isinstance(quota, dict):
- raise TypeError('quota must be a dictionary, provided value: {0}'.format(quota))
- periods = ['DAY', 'WEEK', 'MONTH']
- if 'period' not in quota or quota['period'] not in periods:
- raise ValueError('quota must have a valid period specified, valid values are {0}'.format(','.join(periods)))
- if 'limit' not in quota:
- raise ValueError('quota limit must have a valid value')
+ raise TypeError(
+ "quota must be a dictionary, provided value: {0}".format(quota)
+ )
+ periods = ["DAY", "WEEK", "MONTH"]
+ if "period" not in quota or quota["period"] not in periods:
+ raise ValueError(
+ "quota must have a valid period specified, valid values are {0}".format(
+ ",".join(periods)
+ )
+ )
+ if "limit" not in quota:
+ raise ValueError("quota limit must have a valid value")
-def create_usage_plan(name, description=None, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None):
- '''
+def create_usage_plan(
+ name,
+ description=None,
+ throttle=None,
+ quota=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Creates a new usage plan with throttling and quotas optionally applied
.. versionadded:: 2017.7.0
@@ -1480,30 +1876,32 @@ def create_usage_plan(name, description=None, throttle=None, quota=None, region=
salt myminion boto_apigateway.create_usage_plan name='usage plan name' throttle='{"rateLimit": 10.0, "burstLimit": 10}'
- '''
+ """
try:
_validate_throttle(throttle)
_validate_quota(quota)
values = dict(name=name)
if description:
- values['description'] = description
+ values["description"] = description
if throttle:
- values['throttle'] = throttle
+ values["throttle"] = throttle
if quota:
- values['quota'] = quota
+ values["quota"] = quota
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
res = conn.create_usage_plan(**values)
- return {'created': True, 'result': res}
+ return {"created": True, "result": res}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
except (TypeError, ValueError) as e:
- return {'error': six.text_type(e)}
+ return {"error": six.text_type(e)}
-def update_usage_plan(plan_id, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None):
- '''
+def update_usage_plan(
+ plan_id, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Updates an existing usage plan with throttling and quotas
.. versionadded:: 2017.7.0
@@ -1538,7 +1936,7 @@ def update_usage_plan(plan_id, throttle=None, quota=None, region=None, key=None,
salt myminion boto_apigateway.update_usage_plan plan_id='usage plan id' throttle='{"rateLimit": 10.0, "burstLimit": 10}'
- '''
+ """
try:
_validate_throttle(throttle)
_validate_quota(quota)
@@ -1548,36 +1946,63 @@ def update_usage_plan(plan_id, throttle=None, quota=None, region=None, key=None,
patchOperations = []
if throttle is None:
- patchOperations.append({'op': 'remove', 'path': '/throttle'})
+ patchOperations.append({"op": "remove", "path": "/throttle"})
else:
- if 'rateLimit' in throttle:
- patchOperations.append({'op': 'replace', 'path': '/throttle/rateLimit', 'value': str(throttle['rateLimit'])}) # future lint: disable=blacklisted-function
- if 'burstLimit' in throttle:
- patchOperations.append({'op': 'replace', 'path': '/throttle/burstLimit', 'value': str(throttle['burstLimit'])}) # future lint: disable=blacklisted-function
+ if "rateLimit" in throttle:
+ patchOperations.append(
+ {
+ "op": "replace",
+ "path": "/throttle/rateLimit",
+ "value": str(throttle["rateLimit"]),
+ }
+ ) # future lint: disable=blacklisted-function
+ if "burstLimit" in throttle:
+ patchOperations.append(
+ {
+ "op": "replace",
+ "path": "/throttle/burstLimit",
+ "value": str(throttle["burstLimit"]),
+ }
+ ) # future lint: disable=blacklisted-function
if quota is None:
- patchOperations.append({'op': 'remove', 'path': '/quota'})
+ patchOperations.append({"op": "remove", "path": "/quota"})
else:
- patchOperations.append({'op': 'replace', 'path': '/quota/period', 'value': str(quota['period'])}) # future lint: disable=blacklisted-function
- patchOperations.append({'op': 'replace', 'path': '/quota/limit', 'value': str(quota['limit'])}) # future lint: disable=blacklisted-function
- if 'offset' in quota:
- patchOperations.append({'op': 'replace', 'path': '/quota/offset', 'value': str(quota['offset'])}) # future lint: disable=blacklisted-function
+ patchOperations.append(
+ {
+ "op": "replace",
+ "path": "/quota/period",
+ "value": str(quota["period"]),
+ }
+ ) # future lint: disable=blacklisted-function
+ patchOperations.append(
+ {"op": "replace", "path": "/quota/limit", "value": str(quota["limit"])}
+ ) # future lint: disable=blacklisted-function
+ if "offset" in quota:
+ patchOperations.append(
+ {
+ "op": "replace",
+ "path": "/quota/offset",
+ "value": str(quota["offset"]),
+ }
+ ) # future lint: disable=blacklisted-function
if patchOperations:
- res = conn.update_usage_plan(usagePlanId=plan_id,
- patchOperations=patchOperations)
- return {'updated': True, 'result': res}
+ res = conn.update_usage_plan(
+ usagePlanId=plan_id, patchOperations=patchOperations
+ )
+ return {"updated": True, "result": res}
- return {'updated': False}
+ return {"updated": False}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
except (TypeError, ValueError) as e:
- return {'error': six.text_type(e)}
+ return {"error": six.text_type(e)}
def delete_usage_plan(plan_id, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Deletes usage plan identified by plan_id
.. versionadded:: 2017.7.0
@@ -1588,23 +2013,27 @@ def delete_usage_plan(plan_id, region=None, key=None, keyid=None, profile=None):
salt myminion boto_apigateway.delete_usage_plan plan_id='usage plan id'
- '''
+ """
try:
- existing = describe_usage_plans(plan_id=plan_id, region=region, key=key, keyid=keyid, profile=profile)
+ existing = describe_usage_plans(
+ plan_id=plan_id, region=region, key=key, keyid=keyid, profile=profile
+ )
# don't attempt to delete the usage plan if it does not exist
- if 'error' in existing:
- return {'error': existing['error']}
+ if "error" in existing:
+ return {"error": existing["error"]}
- if 'plans' in existing and existing['plans']:
+ if "plans" in existing and existing["plans"]:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
res = conn.delete_usage_plan(usagePlanId=plan_id)
- return {'deleted': True, 'usagePlanId': plan_id}
+ return {"deleted": True, "usagePlanId": plan_id}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def _update_usage_plan_apis(plan_id, apis, op, region=None, key=None, keyid=None, profile=None):
- '''
+def _update_usage_plan_apis(
+ plan_id, apis, op, region=None, key=None, keyid=None, profile=None
+):
+ """
Helper function that updates the usage plan identified by plan_id by adding or removing it to each of the stages, specified by apis parameter.
apis
@@ -1618,29 +2047,34 @@ def _update_usage_plan_apis(plan_id, apis, op, region=None, key=None, keyid=None
op
'add' or 'remove'
- '''
+ """
try:
patchOperations = []
for api in apis:
- patchOperations.append({
- 'op': op,
- 'path': '/apiStages',
- 'value': '{0}:{1}'.format(api['apiId'], api['stage'])
- })
+ patchOperations.append(
+ {
+ "op": op,
+ "path": "/apiStages",
+ "value": "{0}:{1}".format(api["apiId"], api["stage"]),
+ }
+ )
res = None
if patchOperations:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- res = conn.update_usage_plan(usagePlanId=plan_id,
- patchOperations=patchOperations)
- return {'success': True, 'result': res}
+ res = conn.update_usage_plan(
+ usagePlanId=plan_id, patchOperations=patchOperations
+ )
+ return {"success": True, "result": res}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
except Exception as e: # pylint: disable=broad-except
- return {'error': e}
+ return {"error": e}
-def attach_usage_plan_to_apis(plan_id, apis, region=None, key=None, keyid=None, profile=None):
- '''
+def attach_usage_plan_to_apis(
+ plan_id, apis, region=None, key=None, keyid=None, profile=None
+):
+ """
Attaches given usage plan to each of the apis provided in a list of apiId and stage values
.. versionadded:: 2017.7.0
@@ -1660,12 +2094,16 @@ def attach_usage_plan_to_apis(plan_id, apis, region=None, key=None, keyid=None,
salt myminion boto_apigateway.attach_usage_plan_to_apis plan_id='usage plan id' apis='[{"apiId": "some id 1", "stage": "some stage 1"}]'
- '''
- return _update_usage_plan_apis(plan_id, apis, 'add', region=region, key=key, keyid=keyid, profile=profile)
+ """
+ return _update_usage_plan_apis(
+ plan_id, apis, "add", region=region, key=key, keyid=keyid, profile=profile
+ )
-def detach_usage_plan_from_apis(plan_id, apis, region=None, key=None, keyid=None, profile=None):
- '''
+def detach_usage_plan_from_apis(
+ plan_id, apis, region=None, key=None, keyid=None, profile=None
+):
+ """
Detaches given usage plan from each of the apis provided in a list of apiId and stage value
.. versionadded:: 2017.7.0
@@ -1685,5 +2123,7 @@ def detach_usage_plan_from_apis(plan_id, apis, region=None, key=None, keyid=None
salt myminion boto_apigateway.detach_usage_plan_to_apis plan_id='usage plan id' apis='[{"apiId": "some id 1", "stage": "some stage 1"}]'
- '''
- return _update_usage_plan_apis(plan_id, apis, 'remove', region=region, key=key, keyid=keyid, profile=profile)
+ """
+ return _update_usage_plan_apis(
+ plan_id, apis, "remove", region=region, key=key, keyid=keyid, profile=profile
+ )
diff --git a/salt/modules/boto_asg.py b/salt/modules/boto_asg.py
index 0df3b080666..6baebe2404c 100644
--- a/salt/modules/boto_asg.py
+++ b/salt/modules/boto_asg.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon Autoscale Groups
.. versionadded:: 2014.7.0
@@ -41,37 +41,18 @@ Connection module for Amazon Autoscale Groups
:depends: boto
:depends: boto3
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import datetime
+import email.mime.multipart
import logging
import sys
import time
-import email.mime.multipart
-
-log = logging.getLogger(__name__)
-DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
-
-# Import third party libs
-from salt.ext import six
-try:
- import boto
- import boto.ec2
- import boto.ec2.instance
- import boto.ec2.blockdevicemapping as blockdevicemapping
- import boto.ec2.autoscale as autoscale
- logging.getLogger('boto').setLevel(logging.CRITICAL)
- import boto3 # pylint: disable=unused-import
- from botocore.exceptions import ClientError
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
# Import Salt libs
import salt.utils.compat
@@ -79,35 +60,63 @@ import salt.utils.json
import salt.utils.odict as odict
import salt.utils.versions
+# Import third party libs
+from salt.ext import six
+
+log = logging.getLogger(__name__)
+DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
+
+
+try:
+ import boto
+ import boto.ec2
+ import boto.ec2.instance
+ import boto.ec2.blockdevicemapping as blockdevicemapping
+ import boto.ec2.autoscale as autoscale
+
+ logging.getLogger("boto").setLevel(logging.CRITICAL)
+ import boto3 # pylint: disable=unused-import
+ from botocore.exceptions import ClientError
+
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
def __virtual__():
- '''
+ """
Only load if boto libraries exist.
- '''
+ """
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
- __utils__['boto.assign_funcs'](__name__, 'asg', module='ec2.autoscale', pack=__salt__)
- setattr(sys.modules[__name__], '_get_ec2_conn',
- __utils__['boto.get_connection_func']('ec2'))
+ __utils__["boto.assign_funcs"](
+ __name__, "asg", module="ec2.autoscale", pack=__salt__
+ )
+ setattr(
+ sys.modules[__name__],
+ "_get_ec2_conn",
+ __utils__["boto.get_connection_func"]("ec2"),
+ )
return has_boto_reqs
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
- __utils__['boto3.assign_funcs'](
- __name__, 'autoscaling',
- get_conn_funcname='_get_conn_autoscaling_boto3')
+ __utils__["boto3.assign_funcs"](
+ __name__, "autoscaling", get_conn_funcname="_get_conn_autoscaling_boto3"
+ )
def exists(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if an autoscale group exists.
CLI example::
salt myminion boto_asg.exists myasg region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
@@ -116,12 +125,12 @@ def exists(name, region=None, key=None, keyid=None, profile=None):
if _conn:
return True
else:
- msg = 'The autoscale group does not exist in region {0}'.format(region)
+ msg = "The autoscale group does not exist in region {0}".format(region)
log.debug(msg)
return False
except boto.exception.BotoServerError as e:
- if retries and e.code == 'Throttling':
- log.debug('Throttled by AWS API, retrying in 5 seconds...')
+ if retries and e.code == "Throttling":
+ log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
@@ -130,13 +139,13 @@ def exists(name, region=None, key=None, keyid=None, profile=None):
def get_config(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get the configuration for an autoscale group.
CLI example::
salt myminion boto_asg.get_config myasg region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
@@ -147,30 +156,41 @@ def get_config(name, region=None, key=None, keyid=None, profile=None):
else:
return {}
ret = odict.OrderedDict()
- attrs = ['name', 'availability_zones', 'default_cooldown',
- 'desired_capacity', 'health_check_period',
- 'health_check_type', 'launch_config_name', 'load_balancers',
- 'max_size', 'min_size', 'placement_group',
- 'vpc_zone_identifier', 'tags', 'termination_policies',
- 'suspended_processes']
+ attrs = [
+ "name",
+ "availability_zones",
+ "default_cooldown",
+ "desired_capacity",
+ "health_check_period",
+ "health_check_type",
+ "launch_config_name",
+ "load_balancers",
+ "max_size",
+ "min_size",
+ "placement_group",
+ "vpc_zone_identifier",
+ "tags",
+ "termination_policies",
+ "suspended_processes",
+ ]
for attr in attrs:
# Tags are objects, so we need to turn them into dicts.
- if attr == 'tags':
+ if attr == "tags":
_tags = []
for tag in asg.tags:
_tag = odict.OrderedDict()
- _tag['key'] = tag.key
- _tag['value'] = tag.value
- _tag['propagate_at_launch'] = tag.propagate_at_launch
+ _tag["key"] = tag.key
+ _tag["value"] = tag.value
+ _tag["propagate_at_launch"] = tag.propagate_at_launch
_tags.append(_tag)
- ret['tags'] = _tags
+ ret["tags"] = _tags
# Boto accepts a string or list as input for vpc_zone_identifier,
# but always returns a comma separated list. We require lists in
# states.
- elif attr == 'vpc_zone_identifier':
- ret[attr] = getattr(asg, attr).split(',')
+ elif attr == "vpc_zone_identifier":
+ ret[attr] = getattr(asg, attr).split(",")
# convert SuspendedProcess objects to names
- elif attr == 'suspended_processes':
+ elif attr == "suspended_processes":
suspended_processes = getattr(asg, attr)
ret[attr] = sorted([x.process_name for x in suspended_processes])
else:
@@ -180,34 +200,38 @@ def get_config(name, region=None, key=None, keyid=None, profile=None):
ret["scaling_policies"] = []
for policy in policies:
ret["scaling_policies"].append(
- dict([
- ("name", policy.name),
- ("adjustment_type", policy.adjustment_type),
- ("scaling_adjustment", policy.scaling_adjustment),
- ("min_adjustment_step", policy.min_adjustment_step),
- ("cooldown", policy.cooldown)
- ])
+ dict(
+ [
+ ("name", policy.name),
+ ("adjustment_type", policy.adjustment_type),
+ ("scaling_adjustment", policy.scaling_adjustment),
+ ("min_adjustment_step", policy.min_adjustment_step),
+ ("cooldown", policy.cooldown),
+ ]
+ )
)
# scheduled actions
actions = conn.get_all_scheduled_actions(as_group=name)
- ret['scheduled_actions'] = {}
+ ret["scheduled_actions"] = {}
for action in actions:
end_time = None
if action.end_time:
end_time = action.end_time.isoformat()
- ret['scheduled_actions'][action.name] = dict([
- ("min_size", action.min_size),
- ("max_size", action.max_size),
- # AWS bug
- ("desired_capacity", int(action.desired_capacity)),
- ("start_time", action.start_time.isoformat()),
- ("end_time", end_time),
- ("recurrence", action.recurrence)
- ])
+ ret["scheduled_actions"][action.name] = dict(
+ [
+ ("min_size", action.min_size),
+ ("max_size", action.max_size),
+ # AWS bug
+ ("desired_capacity", int(action.desired_capacity)),
+ ("start_time", action.start_time.isoformat()),
+ ("end_time", end_time),
+ ("recurrence", action.recurrence),
+ ]
+ )
return ret
except boto.exception.BotoServerError as e:
- if retries and e.code == 'Throttling':
- log.debug('Throttled by AWS API, retrying in 5 seconds...')
+ if retries and e.code == "Throttling":
+ log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
@@ -215,21 +239,38 @@ def get_config(name, region=None, key=None, keyid=None, profile=None):
return {}
-def create(name, launch_config_name, availability_zones, min_size, max_size,
- desired_capacity=None, load_balancers=None, default_cooldown=None,
- health_check_type=None, health_check_period=None,
- placement_group=None, vpc_zone_identifier=None, tags=None,
- termination_policies=None, suspended_processes=None,
- scaling_policies=None, scheduled_actions=None, region=None,
- notification_arn=None, notification_types=None,
- key=None, keyid=None, profile=None):
- '''
+def create(
+ name,
+ launch_config_name,
+ availability_zones,
+ min_size,
+ max_size,
+ desired_capacity=None,
+ load_balancers=None,
+ default_cooldown=None,
+ health_check_type=None,
+ health_check_period=None,
+ placement_group=None,
+ vpc_zone_identifier=None,
+ tags=None,
+ termination_policies=None,
+ suspended_processes=None,
+ scaling_policies=None,
+ scheduled_actions=None,
+ region=None,
+ notification_arn=None,
+ notification_types=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create an autoscale group.
CLI example::
salt myminion boto_asg.create myasg mylc '["us-east-1a", "us-east-1e"]' 1 10 load_balancers='["myelb", "myelb2"]' tags='[{"key": "Name", value="myasg", "propagate_at_launch": True}]'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(availability_zones, six.string_types):
availability_zones = salt.utils.json.loads(availability_zones)
@@ -244,18 +285,22 @@ def create(name, launch_config_name, availability_zones, min_size, max_size,
if tags:
for tag in tags:
try:
- key = tag.get('key')
+ key = tag.get("key")
except KeyError:
- log.error('Tag missing key.')
+ log.error("Tag missing key.")
return False
try:
- value = tag.get('value')
+ value = tag.get("value")
except KeyError:
- log.error('Tag missing value.')
+ log.error("Tag missing value.")
return False
- propagate_at_launch = tag.get('propagate_at_launch', False)
- _tag = autoscale.Tag(key=key, value=value, resource_id=name,
- propagate_at_launch=propagate_at_launch)
+ propagate_at_launch = tag.get("propagate_at_launch", False)
+ _tag = autoscale.Tag(
+ key=key,
+ value=value,
+ resource_id=name,
+ propagate_at_launch=propagate_at_launch,
+ )
_tags.append(_tag)
if isinstance(termination_policies, six.string_types):
termination_policies = salt.utils.json.loads(termination_policies)
@@ -267,17 +312,22 @@ def create(name, launch_config_name, availability_zones, min_size, max_size,
while True:
try:
_asg = autoscale.AutoScalingGroup(
- name=name, launch_config=launch_config_name,
+ name=name,
+ launch_config=launch_config_name,
availability_zones=availability_zones,
- min_size=min_size, max_size=max_size,
- desired_capacity=desired_capacity, load_balancers=load_balancers,
+ min_size=min_size,
+ max_size=max_size,
+ desired_capacity=desired_capacity,
+ load_balancers=load_balancers,
default_cooldown=default_cooldown,
health_check_type=health_check_type,
health_check_period=health_check_period,
- placement_group=placement_group, tags=_tags,
+ placement_group=placement_group,
+ tags=_tags,
vpc_zone_identifier=vpc_zone_identifier,
termination_policies=termination_policies,
- suspended_processes=suspended_processes)
+ suspended_processes=suspended_processes,
+ )
conn.create_auto_scaling_group(_asg)
# create scaling policies
_create_scaling_policies(conn, name, scaling_policies)
@@ -285,40 +335,60 @@ def create(name, launch_config_name, availability_zones, min_size, max_size,
_create_scheduled_actions(conn, name, scheduled_actions)
# create notifications
if notification_arn and notification_types:
- conn.put_notification_configuration(_asg, notification_arn, notification_types)
- log.info('Created ASG %s', name)
+ conn.put_notification_configuration(
+ _asg, notification_arn, notification_types
+ )
+ log.info("Created ASG %s", name)
return True
except boto.exception.BotoServerError as e:
- if retries and e.code == 'Throttling':
- log.debug('Throttled by AWS API, retrying in 5 seconds...')
+ if retries and e.code == "Throttling":
+ log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
- msg = 'Failed to create ASG %s', name
+ msg = "Failed to create ASG %s", name
log.error(msg)
return False
-def update(name, launch_config_name, availability_zones, min_size, max_size,
- desired_capacity=None, load_balancers=None, default_cooldown=None,
- health_check_type=None, health_check_period=None,
- placement_group=None, vpc_zone_identifier=None, tags=None,
- termination_policies=None, suspended_processes=None,
- scaling_policies=None, scheduled_actions=None,
- notification_arn=None, notification_types=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def update(
+ name,
+ launch_config_name,
+ availability_zones,
+ min_size,
+ max_size,
+ desired_capacity=None,
+ load_balancers=None,
+ default_cooldown=None,
+ health_check_type=None,
+ health_check_period=None,
+ placement_group=None,
+ vpc_zone_identifier=None,
+ tags=None,
+ termination_policies=None,
+ suspended_processes=None,
+ scaling_policies=None,
+ scheduled_actions=None,
+ notification_arn=None,
+ notification_types=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Update an autoscale group.
CLI example::
salt myminion boto_asg.update myasg mylc '["us-east-1a", "us-east-1e"]' 1 10 load_balancers='["myelb", "myelb2"]' tags='[{"key": "Name", value="myasg", "propagate_at_launch": True}]'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- conn3 = _get_conn_autoscaling_boto3(region=region, key=key, keyid=keyid,
- profile=profile)
+ conn3 = _get_conn_autoscaling_boto3(
+ region=region, key=key, keyid=keyid, profile=profile
+ )
if not conn:
return False, "failed to connect to AWS"
if isinstance(availability_zones, six.string_types):
@@ -338,33 +408,40 @@ def update(name, launch_config_name, availability_zones, min_size, max_size,
# Massage our tagset into add / remove lists
# Use a boto3 call here b/c the boto2 call doeesn't implement filters
- current_tags = conn3.describe_tags(Filters=[{'Name': 'auto-scaling-group',
- 'Values': [name]}]).get('Tags', [])
- current_tags = [{'key': t['Key'],
- 'value': t['Value'],
- 'resource_id': t['ResourceId'],
- 'propagate_at_launch': t.get('PropagateAtLaunch', False)}
- for t in current_tags]
+ current_tags = conn3.describe_tags(
+ Filters=[{"Name": "auto-scaling-group", "Values": [name]}]
+ ).get("Tags", [])
+ current_tags = [
+ {
+ "key": t["Key"],
+ "value": t["Value"],
+ "resource_id": t["ResourceId"],
+ "propagate_at_launch": t.get("PropagateAtLaunch", False),
+ }
+ for t in current_tags
+ ]
add_tags = []
desired_tags = []
if tags:
- tags = __utils__['boto3.ordered'](tags)
+ tags = __utils__["boto3.ordered"](tags)
for tag in tags:
try:
- key = tag.get('key')
+ key = tag.get("key")
except KeyError:
- log.error('Tag missing key.')
+ log.error("Tag missing key.")
return False, "Tag {0} missing key".format(tag)
try:
- value = tag.get('value')
+ value = tag.get("value")
except KeyError:
- log.error('Tag missing value.')
+ log.error("Tag missing value.")
return False, "Tag {0} missing value".format(tag)
- propagate_at_launch = tag.get('propagate_at_launch', False)
- _tag = {'key': key,
- 'value': value,
- 'resource_id': name,
- 'propagate_at_launch': propagate_at_launch}
+ propagate_at_launch = tag.get("propagate_at_launch", False)
+ _tag = {
+ "key": key,
+ "value": value,
+ "resource_id": name,
+ "propagate_at_launch": propagate_at_launch,
+ }
if _tag not in current_tags:
add_tags.append(_tag)
desired_tags.append(_tag)
@@ -375,26 +452,33 @@ def update(name, launch_config_name, availability_zones, min_size, max_size,
try:
_asg = autoscale.AutoScalingGroup(
connection=conn,
- name=name, launch_config=launch_config_name,
+ name=name,
+ launch_config=launch_config_name,
availability_zones=availability_zones,
- min_size=min_size, max_size=max_size,
- desired_capacity=desired_capacity, load_balancers=load_balancers,
+ min_size=min_size,
+ max_size=max_size,
+ desired_capacity=desired_capacity,
+ load_balancers=load_balancers,
default_cooldown=default_cooldown,
health_check_type=health_check_type,
health_check_period=health_check_period,
- placement_group=placement_group, tags=add_tags,
+ placement_group=placement_group,
+ tags=add_tags,
vpc_zone_identifier=vpc_zone_identifier,
- termination_policies=termination_policies)
+ termination_policies=termination_policies,
+ )
if notification_arn and notification_types:
- conn.put_notification_configuration(_asg, notification_arn, notification_types)
+ conn.put_notification_configuration(
+ _asg, notification_arn, notification_types
+ )
_asg.update()
# Seems the update call doesn't handle tags, so we'll need to update
# that separately.
if add_tags:
- log.debug('Adding/updating tags from ASG: %s', add_tags)
+ log.debug("Adding/updating tags from ASG: %s", add_tags)
conn.create_or_update_tags([autoscale.Tag(**t) for t in add_tags])
if delete_tags:
- log.debug('Deleting tags from ASG: %s', delete_tags)
+ log.debug("Deleting tags from ASG: %s", delete_tags)
conn.delete_tags([autoscale.Tag(**t) for t in delete_tags])
# update doesn't handle suspended_processes either
# Resume all processes
@@ -403,7 +487,7 @@ def update(name, launch_config_name, availability_zones, min_size, max_size,
# list suspends all; don't do that.
if suspended_processes is not None and len(suspended_processes) > 0:
_asg.suspend_processes(suspended_processes)
- log.info('Updated ASG %s', name)
+ log.info("Updated ASG %s", name)
# ### scaling policies
# delete all policies, then recreate them
for policy in conn.get_all_policies(as_group=name):
@@ -416,21 +500,21 @@ def update(name, launch_config_name, availability_zones, min_size, max_size,
scheduled_action.name, autoscale_group=name
)
_create_scheduled_actions(conn, name, scheduled_actions)
- return True, ''
+ return True, ""
except boto.exception.BotoServerError as e:
- if retries and e.code == 'Throttling':
- log.debug('Throttled by AWS API, retrying in 5 seconds...')
+ if retries and e.code == "Throttling":
+ log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
- msg = 'Failed to update ASG {0}'.format(name)
+ msg = "Failed to update ASG {0}".format(name)
log.error(msg)
return False, six.text_type(e)
def _create_scaling_policies(conn, as_name, scaling_policies):
- 'helper function to create scaling policies'
+ "helper function to create scaling policies"
if scaling_policies:
for policy in scaling_policies:
policy = autoscale.policy.ScalingPolicy(
@@ -439,64 +523,71 @@ def _create_scaling_policies(conn, as_name, scaling_policies):
adjustment_type=policy["adjustment_type"],
scaling_adjustment=policy["scaling_adjustment"],
min_adjustment_step=policy.get("min_adjustment_step", None),
- cooldown=policy["cooldown"])
+ cooldown=policy["cooldown"],
+ )
conn.create_scaling_policy(policy)
def _create_scheduled_actions(conn, as_name, scheduled_actions):
- '''
+ """
Helper function to create scheduled actions
- '''
+ """
if scheduled_actions:
for name, action in six.iteritems(scheduled_actions):
- if 'start_time' in action and isinstance(action['start_time'], six.string_types):
- action['start_time'] = datetime.datetime.strptime(
- action['start_time'], DATE_FORMAT
+ if "start_time" in action and isinstance(
+ action["start_time"], six.string_types
+ ):
+ action["start_time"] = datetime.datetime.strptime(
+ action["start_time"], DATE_FORMAT
)
- if 'end_time' in action and isinstance(action['end_time'], six.string_types):
- action['end_time'] = datetime.datetime.strptime(
- action['end_time'], DATE_FORMAT
+ if "end_time" in action and isinstance(
+ action["end_time"], six.string_types
+ ):
+ action["end_time"] = datetime.datetime.strptime(
+ action["end_time"], DATE_FORMAT
)
- conn.create_scheduled_group_action(as_name, name,
- desired_capacity=action.get('desired_capacity'),
- min_size=action.get('min_size'),
- max_size=action.get('max_size'),
- start_time=action.get('start_time'),
- end_time=action.get('end_time'),
- recurrence=action.get('recurrence')
+ conn.create_scheduled_group_action(
+ as_name,
+ name,
+ desired_capacity=action.get("desired_capacity"),
+ min_size=action.get("min_size"),
+ max_size=action.get("max_size"),
+ start_time=action.get("start_time"),
+ end_time=action.get("end_time"),
+ recurrence=action.get("recurrence"),
)
def delete(name, force=False, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete an autoscale group.
CLI example::
salt myminion boto_asg.delete myasg region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
try:
conn.delete_auto_scaling_group(name, force)
- msg = 'Deleted autoscale group {0}.'.format(name)
+ msg = "Deleted autoscale group {0}.".format(name)
log.info(msg)
return True
except boto.exception.BotoServerError as e:
- if retries and e.code == 'Throttling':
- log.debug('Throttled by AWS API, retrying in 5 seconds...')
+ if retries and e.code == "Throttling":
+ log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
- msg = 'Failed to delete autoscale group {0}'.format(name)
+ msg = "Failed to delete autoscale group {0}".format(name)
log.error(msg)
return False
def get_cloud_init_mime(cloud_init):
- '''
+ """
Get a mime multipart encoded string from a cloud-init dict. Currently
supports boothooks, scripts and cloud-config.
@@ -505,36 +596,36 @@ def get_cloud_init_mime(cloud_init):
.. code-block:: bash
salt myminion boto.get_cloud_init_mime
- '''
+ """
if isinstance(cloud_init, six.string_types):
cloud_init = salt.utils.json.loads(cloud_init)
_cloud_init = email.mime.multipart.MIMEMultipart()
- if 'boothooks' in cloud_init:
- for script_name, script in six.iteritems(cloud_init['boothooks']):
- _script = email.mime.text.MIMEText(script, 'cloud-boothook')
+ if "boothooks" in cloud_init:
+ for script_name, script in six.iteritems(cloud_init["boothooks"]):
+ _script = email.mime.text.MIMEText(script, "cloud-boothook")
_cloud_init.attach(_script)
- if 'scripts' in cloud_init:
- for script_name, script in six.iteritems(cloud_init['scripts']):
- _script = email.mime.text.MIMEText(script, 'x-shellscript')
+ if "scripts" in cloud_init:
+ for script_name, script in six.iteritems(cloud_init["scripts"]):
+ _script = email.mime.text.MIMEText(script, "x-shellscript")
_cloud_init.attach(_script)
- if 'cloud-config' in cloud_init:
- cloud_config = cloud_init['cloud-config']
+ if "cloud-config" in cloud_init:
+ cloud_config = cloud_init["cloud-config"]
_cloud_config = email.mime.text.MIMEText(
salt.utils.yaml.safe_dump(cloud_config, default_flow_style=False),
- 'cloud-config')
+ "cloud-config",
+ )
_cloud_init.attach(_cloud_config)
return _cloud_init.as_string()
-def launch_configuration_exists(name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def launch_configuration_exists(name, region=None, key=None, keyid=None, profile=None):
+ """
Check for a launch configuration's existence.
CLI example::
salt myminion boto_asg.launch_configuration_exists mylc
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
@@ -543,12 +634,14 @@ def launch_configuration_exists(name, region=None, key=None, keyid=None,
if lc:
return True
else:
- msg = 'The launch configuration does not exist in region {0}'.format(region)
+ msg = "The launch configuration does not exist in region {0}".format(
+ region
+ )
log.debug(msg)
return False
except boto.exception.BotoServerError as e:
- if retries and e.code == 'Throttling':
- log.debug('Throttled by AWS API, retrying in 5 seconds...')
+ if retries and e.code == "Throttling":
+ log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
@@ -556,23 +649,22 @@ def launch_configuration_exists(name, region=None, key=None, keyid=None,
return False
-def get_all_launch_configurations(region=None, key=None, keyid=None,
- profile=None):
- '''
+def get_all_launch_configurations(region=None, key=None, keyid=None, profile=None):
+ """
Fetch and return all Launch Configuration with details.
CLI example::
salt myminion boto_asg.get_all_launch_configurations
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
try:
return conn.get_all_launch_configurations()
except boto.exception.BotoServerError as e:
- if retries and e.code == 'Throttling':
- log.debug('Throttled by AWS API, retrying in 5 seconds...')
+ if retries and e.code == "Throttling":
+ log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
@@ -580,28 +672,28 @@ def get_all_launch_configurations(region=None, key=None, keyid=None,
return []
-def list_launch_configurations(region=None, key=None, keyid=None,
- profile=None):
- '''
+def list_launch_configurations(region=None, key=None, keyid=None, profile=None):
+ """
List all Launch Configurations.
CLI example::
salt myminion boto_asg.list_launch_configurations
- '''
+ """
ret = get_all_launch_configurations(region, key, keyid, profile)
return [r.name for r in ret]
-def describe_launch_configuration(name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def describe_launch_configuration(
+ name, region=None, key=None, keyid=None, profile=None
+):
+ """
Dump details of a given launch configuration.
CLI example::
salt myminion boto_asg.describe_launch_configuration mylc
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
@@ -610,12 +702,14 @@ def describe_launch_configuration(name, region=None, key=None, keyid=None,
if lc:
return lc[0]
else:
- msg = 'The launch configuration does not exist in region {0}'.format(region)
+ msg = "The launch configuration does not exist in region {0}".format(
+ region
+ )
log.debug(msg)
return None
except boto.exception.BotoServerError as e:
- if retries and e.code == 'Throttling':
- log.debug('Throttled by AWS API, retrying in 5 seconds...')
+ if retries and e.code == "Throttling":
+ log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
@@ -623,26 +717,39 @@ def describe_launch_configuration(name, region=None, key=None, keyid=None,
return None
-def create_launch_configuration(name, image_id, key_name=None,
- vpc_id=None, vpc_name=None,
- security_groups=None, user_data=None,
- instance_type='m1.small', kernel_id=None,
- ramdisk_id=None, block_device_mappings=None,
- instance_monitoring=False, spot_price=None,
- instance_profile_name=None,
- ebs_optimized=False,
- associate_public_ip_address=None,
- volume_type=None, delete_on_termination=True,
- iops=None, use_block_device_types=False,
- region=None, key=None, keyid=None,
- profile=None):
- '''
+def create_launch_configuration(
+ name,
+ image_id,
+ key_name=None,
+ vpc_id=None,
+ vpc_name=None,
+ security_groups=None,
+ user_data=None,
+ instance_type="m1.small",
+ kernel_id=None,
+ ramdisk_id=None,
+ block_device_mappings=None,
+ instance_monitoring=False,
+ spot_price=None,
+ instance_profile_name=None,
+ ebs_optimized=False,
+ associate_public_ip_address=None,
+ volume_type=None,
+ delete_on_termination=True,
+ iops=None,
+ use_block_device_types=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create a launch configuration.
CLI example::
salt myminion boto_asg.create_launch_configuration mylc image_id=ami-0b9c9f62 key_name='mykey' security_groups='["mygroup"]' instance_type='c3.2xlarge'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(security_groups, six.string_types):
security_groups = salt.utils.json.loads(security_groups)
@@ -664,72 +771,84 @@ def create_launch_configuration(name, image_id, key_name=None,
# within the default VPC. If a security group id is already part of the list,
# convert_to_group_ids leaves that entry without attempting a lookup on it.
if security_groups and (vpc_id or vpc_name):
- security_groups = __salt__['boto_secgroup.convert_to_group_ids'](
- security_groups,
- vpc_id=vpc_id, vpc_name=vpc_name,
- region=region, key=key, keyid=keyid,
- profile=profile
- )
+ security_groups = __salt__["boto_secgroup.convert_to_group_ids"](
+ security_groups,
+ vpc_id=vpc_id,
+ vpc_name=vpc_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
lc = autoscale.LaunchConfiguration(
- name=name, image_id=image_id, key_name=key_name,
- security_groups=security_groups, user_data=user_data,
- instance_type=instance_type, kernel_id=kernel_id,
- ramdisk_id=ramdisk_id, block_device_mappings=_bdms,
- instance_monitoring=instance_monitoring, spot_price=spot_price,
+ name=name,
+ image_id=image_id,
+ key_name=key_name,
+ security_groups=security_groups,
+ user_data=user_data,
+ instance_type=instance_type,
+ kernel_id=kernel_id,
+ ramdisk_id=ramdisk_id,
+ block_device_mappings=_bdms,
+ instance_monitoring=instance_monitoring,
+ spot_price=spot_price,
instance_profile_name=instance_profile_name,
ebs_optimized=ebs_optimized,
associate_public_ip_address=associate_public_ip_address,
- volume_type=volume_type, delete_on_termination=delete_on_termination,
- iops=iops, use_block_device_types=use_block_device_types)
+ volume_type=volume_type,
+ delete_on_termination=delete_on_termination,
+ iops=iops,
+ use_block_device_types=use_block_device_types,
+ )
retries = 30
while True:
try:
conn.create_launch_configuration(lc)
- log.info('Created LC %s', name)
+ log.info("Created LC %s", name)
return True
except boto.exception.BotoServerError as e:
- if retries and e.code == 'Throttling':
- log.debug('Throttled by AWS API, retrying in 5 seconds...')
+ if retries and e.code == "Throttling":
+ log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
- msg = 'Failed to create LC {0}'.format(name)
+ msg = "Failed to create LC {0}".format(name)
log.error(msg)
return False
-def delete_launch_configuration(name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def delete_launch_configuration(name, region=None, key=None, keyid=None, profile=None):
+ """
Delete a launch configuration.
CLI example::
salt myminion boto_asg.delete_launch_configuration mylc
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
try:
conn.delete_launch_configuration(name)
- log.info('Deleted LC %s', name)
+ log.info("Deleted LC %s", name)
return True
except boto.exception.BotoServerError as e:
- if retries and e.code == 'Throttling':
- log.debug('Throttled by AWS API, retrying in 5 seconds...')
+ if retries and e.code == "Throttling":
+ log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
- msg = 'Failed to delete LC {0}'.format(name)
+ msg = "Failed to delete LC {0}".format(name)
log.error(msg)
return False
-def get_scaling_policy_arn(as_group, scaling_policy_name, region=None,
- key=None, keyid=None, profile=None):
- '''
+def get_scaling_policy_arn(
+ as_group, scaling_policy_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Return the arn for a scaling policy in a specific autoscale group or None
if not found. Mainly used as a helper method for boto_cloudwatch_alarm, for
linking alarms to scaling policies.
@@ -737,7 +856,7 @@ def get_scaling_policy_arn(as_group, scaling_policy_name, region=None,
CLI Example::
salt '*' boto_asg.get_scaling_policy_arn mygroup mypolicy
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while retries > 0:
@@ -747,20 +866,20 @@ def get_scaling_policy_arn(as_group, scaling_policy_name, region=None,
for policy in policies:
if policy.name == scaling_policy_name:
return policy.policy_arn
- log.error('Could not convert: %s', as_group)
+ log.error("Could not convert: %s", as_group)
return None
except boto.exception.BotoServerError as e:
- if e.error_code != 'Throttling':
+ if e.error_code != "Throttling":
raise
- log.debug('Throttled by API, will retry in 5 seconds')
+ log.debug("Throttled by API, will retry in 5 seconds")
time.sleep(5)
- log.error('Maximum number of retries exceeded')
+ log.error("Maximum number of retries exceeded")
return None
def get_all_groups(region=None, key=None, keyid=None, profile=None):
- '''
+ """
Return all AutoScale Groups visible in the account
(as a list of boto.ec2.autoscale.group.AutoScalingGroup).
@@ -772,12 +891,12 @@ def get_all_groups(region=None, key=None, keyid=None, profile=None):
salt-call boto_asg.get_all_groups region=us-east-1 --output yaml
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
while True:
try:
- next_token = ''
+ next_token = ""
asgs = []
while next_token is not None:
ret = conn.get_all_groups(next_token=next_token)
@@ -785,8 +904,8 @@ def get_all_groups(region=None, key=None, keyid=None, profile=None):
next_token = ret.next_token
return asgs
except boto.exception.BotoServerError as e:
- if retries and e.code == 'Throttling':
- log.debug('Throttled by AWS API, retrying in 5 seconds...')
+ if retries and e.code == "Throttling":
+ log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
@@ -795,7 +914,7 @@ def get_all_groups(region=None, key=None, keyid=None, profile=None):
def list_groups(region=None, key=None, keyid=None, profile=None):
- '''
+ """
Return all AutoScale Groups visible in the account
(as a list of names).
@@ -807,21 +926,32 @@ def list_groups(region=None, key=None, keyid=None, profile=None):
salt-call boto_asg.list_groups region=us-east-1
- '''
- return [a.name for a in get_all_groups(region=region, key=key, keyid=keyid, profile=profile)]
+ """
+ return [
+ a.name
+ for a in get_all_groups(region=region, key=key, keyid=keyid, profile=profile)
+ ]
-def get_instances(name, lifecycle_state="InService", health_status="Healthy",
- attribute="private_ip_address", attributes=None, region=None,
- key=None, keyid=None, profile=None):
- '''
+def get_instances(
+ name,
+ lifecycle_state="InService",
+ health_status="Healthy",
+ attribute="private_ip_address",
+ attributes=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
return attribute of all instances in the named autoscale group.
CLI example::
salt-call boto_asg.get_instances my_autoscale_group_name
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
ec2_conn = _get_ec2_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
@@ -830,15 +960,17 @@ def get_instances(name, lifecycle_state="InService", health_status="Healthy",
asgs = conn.get_all_groups(names=[name])
break
except boto.exception.BotoServerError as e:
- if retries and e.code == 'Throttling':
- log.debug('Throttled by AWS API, retrying in 5 seconds...')
+ if retries and e.code == "Throttling":
+ log.debug("Throttled by AWS API, retrying in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error(e)
return False
if len(asgs) != 1:
- log.debug("name '%s' returns multiple ASGs: %s", name, [asg.name for asg in asgs])
+ log.debug(
+ "name '%s' returns multiple ASGs: %s", name, [asg.name for asg in asgs]
+ )
return False
asg = asgs[0]
instance_ids = []
@@ -852,23 +984,40 @@ def get_instances(name, lifecycle_state="InService", health_status="Healthy",
# get full instance info, so that we can return the attribute
instances = ec2_conn.get_only_instances(instance_ids=instance_ids)
if attributes:
- return [[_convert_attribute(instance, attr) for attr in attributes] for instance in instances]
+ return [
+ [_convert_attribute(instance, attr) for attr in attributes]
+ for instance in instances
+ ]
else:
# properly handle case when not all instances have the requested attribute
- return [_convert_attribute(instance, attribute) for instance in instances if getattr(instance, attribute)]
+ return [
+ _convert_attribute(instance, attribute)
+ for instance in instances
+ if getattr(instance, attribute)
+ ]
def _convert_attribute(instance, attribute):
if attribute == "tags":
tags = dict(getattr(instance, attribute))
- return {key.encode("utf-8"): value.encode("utf-8") for key, value in six.iteritems(tags)}
+ return {
+ key.encode("utf-8"): value.encode("utf-8")
+ for key, value in six.iteritems(tags)
+ }
return getattr(instance, attribute).encode("ascii")
-def enter_standby(name, instance_ids, should_decrement_desired_capacity=False,
- region=None, key=None, keyid=None, profile=None):
- '''
+def enter_standby(
+ name,
+ instance_ids,
+ should_decrement_desired_capacity=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Switch desired instances to StandBy mode
.. versionadded:: 2016.11.0
@@ -877,24 +1026,36 @@ def enter_standby(name, instance_ids, should_decrement_desired_capacity=False,
salt-call boto_asg.enter_standby my_autoscale_group_name '["i-xxxxxx"]'
- '''
- conn = _get_conn_autoscaling_boto3(region=region, key=key, keyid=keyid, profile=profile)
+ """
+ conn = _get_conn_autoscaling_boto3(
+ region=region, key=key, keyid=keyid, profile=profile
+ )
try:
response = conn.enter_standby(
InstanceIds=instance_ids,
AutoScalingGroupName=name,
- ShouldDecrementDesiredCapacity=should_decrement_desired_capacity)
+ ShouldDecrementDesiredCapacity=should_decrement_desired_capacity,
+ )
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
- return {'exists': False}
- return {'error': err}
- return all(activity['StatusCode'] != 'Failed' for activity in response['Activities'])
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "ResourceNotFoundException":
+ return {"exists": False}
+ return {"error": err}
+ return all(
+ activity["StatusCode"] != "Failed" for activity in response["Activities"]
+ )
-def exit_standby(name, instance_ids, should_decrement_desired_capacity=False,
- region=None, key=None, keyid=None, profile=None):
- '''
+def exit_standby(
+ name,
+ instance_ids,
+ should_decrement_desired_capacity=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Exit desired instances from StandBy mode
.. versionadded:: 2016.11.0
@@ -903,16 +1064,19 @@ def exit_standby(name, instance_ids, should_decrement_desired_capacity=False,
salt-call boto_asg.exit_standby my_autoscale_group_name '["i-xxxxxx"]'
- '''
+ """
conn = _get_conn_autoscaling_boto3(
- region=region, key=key, keyid=keyid, profile=profile)
+ region=region, key=key, keyid=keyid, profile=profile
+ )
try:
response = conn.exit_standby(
- InstanceIds=instance_ids,
- AutoScalingGroupName=name)
+ InstanceIds=instance_ids, AutoScalingGroupName=name
+ )
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
- return {'exists': False}
- return {'error': err}
- return all(activity['StatusCode'] != 'Failed' for activity in response['Activities'])
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "ResourceNotFoundException":
+ return {"exists": False}
+ return {"error": err}
+ return all(
+ activity["StatusCode"] != "Failed" for activity in response["Activities"]
+ )
diff --git a/salt/modules/boto_cfn.py b/salt/modules/boto_cfn.py
index bb7bfcc33b9..cfe9ebc167d 100644
--- a/salt/modules/boto_cfn.py
+++ b/salt/modules/boto_cfn.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon Cloud Formation
.. versionadded:: 2015.5.0
@@ -28,48 +28,54 @@ Connection module for Amazon Cloud Formation
cfn.region: us-east-1
:depends: boto
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
+import salt.utils.versions
+
# Import Salt libs
from salt.ext import six
-import salt.utils.versions
log = logging.getLogger(__name__)
# Import third party libs
# pylint: disable=import-error
try:
- #pylint: disable=unused-import
+ # pylint: disable=unused-import
import boto
import boto.cloudformation
- #pylint: enable=unused-import
+
+ # pylint: enable=unused-import
from boto.exception import BotoServerError
- logging.getLogger('boto').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def __virtual__():
- '''
+ """
Only load if boto libraries exist.
- '''
+ """
return salt.utils.versions.check_boto_reqs(check_boto3=False)
def __init__(opts):
if HAS_BOTO:
- __utils__['boto.assign_funcs'](__name__, 'cfn', module='cloudformation', pack=__salt__)
+ __utils__["boto.assign_funcs"](
+ __name__, "cfn", module="cloudformation", pack=__salt__
+ )
def exists(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if a stack exists.
CLI Example:
@@ -77,21 +83,21 @@ def exists(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_cfn.exists mystack region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
# Returns an object if stack exists else an exception
exists = conn.describe_stacks(name)
- log.debug('Stack %s exists.', name)
+ log.debug("Stack %s exists.", name)
return True
except BotoServerError as e:
- log.debug('boto_cfn.exists raised an exception', exc_info=True)
+ log.debug("boto_cfn.exists raised an exception", exc_info=True)
return False
def describe(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Describe a stack.
.. versionadded:: 2015.8.0
@@ -101,7 +107,7 @@ def describe(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_cfn.describe mystack region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -109,34 +115,55 @@ def describe(name, region=None, key=None, keyid=None, profile=None):
r = conn.describe_stacks(name)
if r:
stack = r[0]
- log.debug('Found VPC: %s', stack.stack_id)
- keys = ('stack_id', 'description', 'stack_status', 'stack_status_reason', 'tags')
+ log.debug("Found VPC: %s", stack.stack_id)
+ keys = (
+ "stack_id",
+ "description",
+ "stack_status",
+ "stack_status_reason",
+ "tags",
+ )
ret = dict([(k, getattr(stack, k)) for k in keys if hasattr(stack, k)])
- o = getattr(stack, 'outputs')
- p = getattr(stack, 'parameters')
+ o = getattr(stack, "outputs")
+ p = getattr(stack, "parameters")
outputs = {}
parameters = {}
for i in o:
outputs[i.key] = i.value
- ret['outputs'] = outputs
+ ret["outputs"] = outputs
for j in p:
parameters[j.key] = j.value
- ret['parameters'] = parameters
+ ret["parameters"] = parameters
- return {'stack': ret}
+ return {"stack": ret}
- log.debug('Stack %s exists.', name)
+ log.debug("Stack %s exists.", name)
return True
except BotoServerError as e:
- log.warning('Could not describe stack %s.\n%s', name, e)
+ log.warning("Could not describe stack %s.\n%s", name, e)
return False
-def create(name, template_body=None, template_url=None, parameters=None, notification_arns=None, disable_rollback=None,
- timeout_in_minutes=None, capabilities=None, tags=None, on_failure=None, stack_policy_body=None,
- stack_policy_url=None, region=None, key=None, keyid=None, profile=None):
- '''
+def create(
+ name,
+ template_body=None,
+ template_url=None,
+ parameters=None,
+ notification_arns=None,
+ disable_rollback=None,
+ timeout_in_minutes=None,
+ capabilities=None,
+ tags=None,
+ on_failure=None,
+ stack_policy_body=None,
+ stack_policy_url=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create a CFN stack.
CLI Example:
@@ -145,24 +172,52 @@ def create(name, template_body=None, template_url=None, parameters=None, notific
salt myminion boto_cfn.create mystack template_url='https://s3.amazonaws.com/bucket/template.cft' \
region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- return conn.create_stack(name, template_body, template_url, parameters, notification_arns, disable_rollback,
- timeout_in_minutes, capabilities, tags, on_failure, stack_policy_body, stack_policy_url)
+ return conn.create_stack(
+ name,
+ template_body,
+ template_url,
+ parameters,
+ notification_arns,
+ disable_rollback,
+ timeout_in_minutes,
+ capabilities,
+ tags,
+ on_failure,
+ stack_policy_body,
+ stack_policy_url,
+ )
except BotoServerError as e:
- msg = 'Failed to create stack {0}.\n{1}'.format(name, e)
+ msg = "Failed to create stack {0}.\n{1}".format(name, e)
log.error(msg)
log.debug(e)
return False
-def update_stack(name, template_body=None, template_url=None, parameters=None, notification_arns=None,
- disable_rollback=False, timeout_in_minutes=None, capabilities=None, tags=None,
- use_previous_template=None, stack_policy_during_update_body=None, stack_policy_during_update_url=None,
- stack_policy_body=None, stack_policy_url=None, region=None, key=None, keyid=None, profile=None):
- '''
+def update_stack(
+ name,
+ template_body=None,
+ template_url=None,
+ parameters=None,
+ notification_arns=None,
+ disable_rollback=False,
+ timeout_in_minutes=None,
+ capabilities=None,
+ tags=None,
+ use_previous_template=None,
+ stack_policy_during_update_body=None,
+ stack_policy_during_update_url=None,
+ stack_policy_body=None,
+ stack_policy_url=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Update a CFN stack.
.. versionadded:: 2015.8.0
@@ -173,25 +228,37 @@ def update_stack(name, template_body=None, template_url=None, parameters=None, n
salt myminion boto_cfn.update_stack mystack template_url='https://s3.amazonaws.com/bucket/template.cft' \
region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- update = conn.update_stack(name, template_body, template_url, parameters, notification_arns,
- disable_rollback, timeout_in_minutes, capabilities, tags, use_previous_template,
- stack_policy_during_update_body, stack_policy_during_update_url,
- stack_policy_body, stack_policy_url)
- log.debug('Updated result is : %s.', update)
+ update = conn.update_stack(
+ name,
+ template_body,
+ template_url,
+ parameters,
+ notification_arns,
+ disable_rollback,
+ timeout_in_minutes,
+ capabilities,
+ tags,
+ use_previous_template,
+ stack_policy_during_update_body,
+ stack_policy_during_update_url,
+ stack_policy_body,
+ stack_policy_url,
+ )
+ log.debug("Updated result is : %s.", update)
return update
except BotoServerError as e:
- msg = 'Failed to update stack {0}.'.format(name)
+ msg = "Failed to update stack {0}.".format(name)
log.debug(e)
log.error(msg)
return six.text_type(e)
def delete(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete a CFN stack.
CLI Example:
@@ -199,20 +266,20 @@ def delete(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_cfn.delete mystack region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.delete_stack(name)
except BotoServerError as e:
- msg = 'Failed to create stack {0}.'.format(name)
+ msg = "Failed to create stack {0}.".format(name)
log.error(msg)
log.debug(e)
return six.text_type(e)
def get_template(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if attributes are set on a CFN stack.
CLI Example:
@@ -220,22 +287,29 @@ def get_template(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_cfn.get_template mystack
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
template = conn.get_template(name)
- log.info('Retrieved template for stack %s', name)
+ log.info("Retrieved template for stack %s", name)
return template
except BotoServerError as e:
log.debug(e)
- msg = 'Template {0} does not exist'.format(name)
+ msg = "Template {0} does not exist".format(name)
log.error(msg)
return six.text_type(e)
-def validate_template(template_body=None, template_url=None, region=None, key=None, keyid=None, profile=None):
- '''
+def validate_template(
+ template_body=None,
+ template_url=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Validate cloudformation template
.. versionadded:: 2015.8.0
@@ -245,7 +319,7 @@ def validate_template(template_body=None, template_url=None, region=None, key=No
.. code-block:: bash
salt myminion boto_cfn.validate_template mystack-template
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -253,6 +327,6 @@ def validate_template(template_body=None, template_url=None, region=None, key=No
return conn.validate_template(template_body, template_url)
except BotoServerError as e:
log.debug(e)
- msg = 'Error while trying to validate template {0}.'.format(template_body)
+ msg = "Error while trying to validate template {0}.".format(template_body)
log.error(msg)
return six.text_type(e)
diff --git a/salt/modules/boto_cloudfront.py b/salt/modules/boto_cloudfront.py
index ea04e8d6b0b..1469cae3801 100644
--- a/salt/modules/boto_cloudfront.py
+++ b/salt/modules/boto_cloudfront.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon CloudFront
.. versionadded:: 2018.3.0
@@ -46,26 +46,28 @@ Connection module for Amazon CloudFront
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
# Import Salt libs
import salt.ext.six as six
-from salt.utils.odict import OrderedDict
import salt.utils.versions
+from salt.utils.odict import OrderedDict
# Import third party libs
try:
# pylint: disable=unused-import
import boto3
import botocore
+
# pylint: enable=unused-import
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@@ -74,46 +76,39 @@ log = logging.getLogger(__name__)
def __virtual__():
- '''
+ """
Only load if boto3 libraries exist.
- '''
+ """
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
- __utils__['boto3.assign_funcs'](__name__, 'cloudfront')
+ __utils__["boto3.assign_funcs"](__name__, "cloudfront")
return has_boto_reqs
def _list_distributions(
- conn,
- name=None,
- region=None,
- key=None,
- keyid=None,
- profile=None,
+ conn, name=None, region=None, key=None, keyid=None, profile=None,
):
- '''
+ """
Private function that returns an iterator over all CloudFront distributions.
The caller is responsible for all boto-related error handling.
name
(Optional) Only yield the distribution with the given name
- '''
- for dl_ in conn.get_paginator('list_distributions').paginate():
- distribution_list = dl_['DistributionList']
- if 'Items' not in distribution_list:
+ """
+ for dl_ in conn.get_paginator("list_distributions").paginate():
+ distribution_list = dl_["DistributionList"]
+ if "Items" not in distribution_list:
# If there are no items, AWS omits the `Items` key for some reason
continue
- for partial_dist in distribution_list['Items']:
- tags = conn.list_tags_for_resource(Resource=partial_dist['ARN'])
- tags = dict(
- (kv['Key'], kv['Value']) for kv in tags['Tags']['Items']
- )
+ for partial_dist in distribution_list["Items"]:
+ tags = conn.list_tags_for_resource(Resource=partial_dist["ARN"])
+ tags = dict((kv["Key"], kv["Value"]) for kv in tags["Tags"]["Items"])
- id_ = partial_dist['Id']
- if 'Name' not in tags:
- log.warning('CloudFront distribution %s has no Name tag.', id_)
+ id_ = partial_dist["Id"]
+ if "Name" not in tags:
+ log.warning("CloudFront distribution %s has no Name tag.", id_)
continue
- distribution_name = tags.pop('Name', None)
+ distribution_name = tags.pop("Name", None)
if name is not None and distribution_name != name:
continue
@@ -126,7 +121,7 @@ def _list_distributions(
# Hence, we must call get_distribution() to get the full object,
# and we cache these objects to help lessen API calls.
distribution = _cache_id(
- 'cloudfront',
+ "cloudfront",
sub_resource=distribution_name,
region=region,
key=key,
@@ -139,12 +134,12 @@ def _list_distributions(
dist_with_etag = conn.get_distribution(Id=id_)
distribution = {
- 'distribution': dist_with_etag['Distribution'],
- 'etag': dist_with_etag['ETag'],
- 'tags': tags,
+ "distribution": dist_with_etag["Distribution"],
+ "etag": dist_with_etag["ETag"],
+ "tags": tags,
}
_cache_id(
- 'cloudfront',
+ "cloudfront",
sub_resource=distribution_name,
resource_id=distribution,
region=region,
@@ -156,7 +151,7 @@ def _list_distributions(
def get_distribution(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get information about a CloudFront distribution (configuration, tags) with a given name.
name
@@ -181,9 +176,9 @@ def get_distribution(name, region=None, key=None, keyid=None, profile=None):
salt myminion boto_cloudfront.get_distribution name=mydistribution profile=awsprofile
- '''
+ """
distribution = _cache_id(
- 'cloudfront',
+ "cloudfront",
sub_resource=name,
region=region,
key=key,
@@ -191,17 +186,12 @@ def get_distribution(name, region=None, key=None, keyid=None, profile=None):
profile=profile,
)
if distribution:
- return {'result': distribution}
+ return {"result": distribution}
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
for _, dist in _list_distributions(
- conn,
- name=name,
- region=region,
- key=key,
- keyid=keyid,
- profile=profile,
+ conn, name=name, region=region, key=key, keyid=keyid, profile=profile,
):
# _list_distributions should only return the one distribution
# that we want (with the given name).
@@ -210,16 +200,16 @@ def get_distribution(name, region=None, key=None, keyid=None, profile=None):
# return the first one over and over again,
# so only the first result is useful.
if distribution is not None:
- msg = 'More than one distribution found with name {0}'
- return {'error': msg.format(name)}
+ msg = "More than one distribution found with name {0}"
+ return {"error": msg.format(name)}
distribution = dist
except botocore.exceptions.ClientError as err:
- return {'error': __utils__['boto3.get_error'](err)}
+ return {"error": __utils__["boto3.get_error"](err)}
if not distribution:
- return {'result': None}
+ return {"result": None}
_cache_id(
- 'cloudfront',
+ "cloudfront",
sub_resource=name,
resource_id=distribution,
region=region,
@@ -227,11 +217,11 @@ def get_distribution(name, region=None, key=None, keyid=None, profile=None):
keyid=keyid,
profile=profile,
)
- return {'result': distribution}
+ return {"result": distribution}
def export_distributions(region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get details of all CloudFront distributions.
Produces results that can be used to create an SLS file.
@@ -242,51 +232,37 @@ def export_distributions(region=None, key=None, keyid=None, profile=None):
salt-call boto_cloudfront.export_distributions --out=txt |\
sed "s/local: //" > cloudfront_distributions.sls
- '''
+ """
results = OrderedDict()
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
for name, distribution in _list_distributions(
- conn,
- region=region,
- key=key,
- keyid=keyid,
- profile=profile,
+ conn, region=region, key=key, keyid=keyid, profile=profile,
):
- config = distribution['distribution']['DistributionConfig']
- tags = distribution['tags']
+ config = distribution["distribution"]["DistributionConfig"]
+ tags = distribution["tags"]
distribution_sls_data = [
- {'name': name},
- {'config': config},
- {'tags': tags},
+ {"name": name},
+ {"config": config},
+ {"tags": tags},
]
- results['Manage CloudFront distribution {0}'.format(name)] = {
- 'boto_cloudfront.present': distribution_sls_data,
+ results["Manage CloudFront distribution {0}".format(name)] = {
+ "boto_cloudfront.present": distribution_sls_data,
}
except botocore.exceptions.ClientError as err:
# Raise an exception, as this is meant to be user-invoked at the CLI
# as opposed to being called from execution or state modules
six.reraise(*sys.exc_info())
- dumper = __utils__['yaml.get_dumper']('IndentedSafeOrderedDumper')
- return __utils__['yaml.dump'](
- results,
- default_flow_style=False,
- Dumper=dumper,
- )
+ dumper = __utils__["yaml.get_dumper"]("IndentedSafeOrderedDumper")
+ return __utils__["yaml.dump"](results, default_flow_style=False, Dumper=dumper,)
def create_distribution(
- name,
- config,
- tags=None,
- region=None,
- key=None,
- keyid=None,
- profile=None,
+ name, config, tags=None, region=None, key=None, keyid=None, profile=None,
):
- '''
+ """
Create a CloudFront distribution with the given name, config, and (optionally) tags.
name
@@ -317,28 +293,23 @@ def create_distribution(
salt myminion boto_cloudfront.create_distribution name=mydistribution profile=awsprofile \
config='{"Comment":"partial configuration","Enabled":true}'
- '''
+ """
if tags is None:
tags = {}
- if 'Name' in tags:
+ if "Name" in tags:
# Be lenient and silently accept if names match, else error
- if tags['Name'] != name:
- return {'error': 'Must not pass `Name` in `tags` but as `name`'}
- tags['Name'] = name
- tags = {
- 'Items': [{'Key': k, 'Value': v} for k, v in six.iteritems(tags)]
- }
+ if tags["Name"] != name:
+ return {"error": "Must not pass `Name` in `tags` but as `name`"}
+ tags["Name"] = name
+ tags = {"Items": [{"Key": k, "Value": v} for k, v in six.iteritems(tags)]}
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.create_distribution_with_tags(
- DistributionConfigWithTags={
- 'DistributionConfig': config,
- 'Tags': tags,
- },
+ DistributionConfigWithTags={"DistributionConfig": config, "Tags": tags},
)
_cache_id(
- 'cloudfront',
+ "cloudfront",
sub_resource=name,
invalidate=True,
region=region,
@@ -347,21 +318,15 @@ def create_distribution(
profile=profile,
)
except botocore.exceptions.ClientError as err:
- return {'error': __utils__['boto3.get_error'](err)}
+ return {"error": __utils__["boto3.get_error"](err)}
- return {'result': True}
+ return {"result": True}
def update_distribution(
- name,
- config,
- tags=None,
- region=None,
- key=None,
- keyid=None,
- profile=None,
+ name, config, tags=None, region=None, key=None, keyid=None, profile=None,
):
- '''
+ """
Update the config (and optionally tags) for the CloudFront distribution with the given name.
name
@@ -392,61 +357,53 @@ def update_distribution(
salt myminion boto_cloudfront.update_distribution name=mydistribution profile=awsprofile \
config='{"Comment":"partial configuration","Enabled":true}'
- '''
+ """
distribution_ret = get_distribution(
- name,
- region=region,
- key=key,
- keyid=keyid,
- profile=profile
+ name, region=region, key=key, keyid=keyid, profile=profile
)
- if 'error' in distribution_ret:
+ if "error" in distribution_ret:
return distribution_ret
- dist_with_tags = distribution_ret['result']
+ dist_with_tags = distribution_ret["result"]
- current_distribution = dist_with_tags['distribution']
- current_config = current_distribution['DistributionConfig']
- current_tags = dist_with_tags['tags']
- etag = dist_with_tags['etag']
+ current_distribution = dist_with_tags["distribution"]
+ current_config = current_distribution["DistributionConfig"]
+ current_tags = dist_with_tags["tags"]
+ etag = dist_with_tags["etag"]
- config_diff = __utils__['dictdiffer.deep_diff'](current_config, config)
+ config_diff = __utils__["dictdiffer.deep_diff"](current_config, config)
if tags:
- tags_diff = __utils__['dictdiffer.deep_diff'](current_tags, tags)
+ tags_diff = __utils__["dictdiffer.deep_diff"](current_tags, tags)
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- if 'old' in config_diff or 'new' in config_diff:
+ if "old" in config_diff or "new" in config_diff:
conn.update_distribution(
- DistributionConfig=config,
- Id=current_distribution['Id'],
- IfMatch=etag,
+ DistributionConfig=config, Id=current_distribution["Id"], IfMatch=etag,
)
if tags:
- arn = current_distribution['ARN']
- if 'new' in tags_diff:
+ arn = current_distribution["ARN"]
+ if "new" in tags_diff:
tags_to_add = {
- 'Items': [
- {'Key': k, 'Value': v}
- for k, v in six.iteritems(tags_diff['new'])
+ "Items": [
+ {"Key": k, "Value": v}
+ for k, v in six.iteritems(tags_diff["new"])
],
}
conn.tag_resource(
- Resource=arn,
- Tags=tags_to_add,
+ Resource=arn, Tags=tags_to_add,
)
- if 'old' in tags_diff:
+ if "old" in tags_diff:
tags_to_remove = {
- 'Items': list(tags_diff['old'].keys()),
+ "Items": list(tags_diff["old"].keys()),
}
conn.untag_resource(
- Resource=arn,
- TagKeys=tags_to_remove,
+ Resource=arn, TagKeys=tags_to_remove,
)
except botocore.exceptions.ClientError as err:
- return {'error': __utils__['boto3.get_error'](err)}
+ return {"error": __utils__["boto3.get_error"](err)}
finally:
_cache_id(
- 'cloudfront',
+ "cloudfront",
sub_resource=name,
invalidate=True,
region=region,
@@ -455,4 +412,4 @@ def update_distribution(
profile=profile,
)
- return {'result': True}
+ return {"result": True}
diff --git a/salt/modules/boto_cloudtrail.py b/salt/modules/boto_cloudtrail.py
index c7ac67d08d5..331f82dd534 100644
--- a/salt/modules/boto_cloudtrail.py
+++ b/salt/modules/boto_cloudtrail.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon CloudTrail
.. versionadded:: 2016.3.0
@@ -45,18 +45,20 @@ The dependencies listed above can be installed via package or pip.
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
+import salt.utils.compat
+import salt.utils.versions
+
# Import Salt libs
from salt.ext import six
-import salt.utils.compat
-import salt.utils.versions
log = logging.getLogger(__name__)
@@ -64,12 +66,14 @@ log = logging.getLogger(__name__)
# pylint: disable=import-error
try:
- #pylint: disable=unused-import
+ # pylint: disable=unused-import
import boto
import boto3
- #pylint: enable=unused-import
+
+ # pylint: enable=unused-import
from botocore.exceptions import ClientError
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@@ -77,27 +81,24 @@ except ImportError:
def __virtual__():
- '''
+ """
Only load if boto libraries exist and if boto libraries are greater than
a given version.
- '''
+ """
# the boto_lambda execution module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
- return salt.utils.versions.check_boto_reqs(
- boto3_ver='1.2.5'
- )
+ return salt.utils.versions.check_boto_reqs(boto3_ver="1.2.5")
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
- __utils__['boto3.assign_funcs'](__name__, 'cloudtrail')
+ __utils__["boto3.assign_funcs"](__name__, "cloudtrail")
-def exists(Name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def exists(Name, region=None, key=None, keyid=None, profile=None):
+ """
Given a trail name, check to see if the given trail exists.
Returns True if the given trail exists and returns False if the given
@@ -109,30 +110,36 @@ def exists(Name,
salt myminion boto_cloudtrail.exists mytrail
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.get_trail_status(Name=Name)
- return {'exists': True}
+ return {"exists": True}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'TrailNotFoundException':
- return {'exists': False}
- return {'error': err}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "TrailNotFoundException":
+ return {"exists": False}
+ return {"error": err}
-def create(Name,
- S3BucketName, S3KeyPrefix=None,
- SnsTopicName=None,
- IncludeGlobalServiceEvents=None,
- IsMultiRegionTrail=None,
- EnableLogFileValidation=None,
- CloudWatchLogsLogGroupArn=None,
- CloudWatchLogsRoleArn=None,
- KmsKeyId=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create(
+ Name,
+ S3BucketName,
+ S3KeyPrefix=None,
+ SnsTopicName=None,
+ IncludeGlobalServiceEvents=None,
+ IsMultiRegionTrail=None,
+ EnableLogFileValidation=None,
+ CloudWatchLogsLogGroupArn=None,
+ CloudWatchLogsRoleArn=None,
+ KmsKeyId=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid config, create a trail.
Returns {created: true} if the trail was created and returns
@@ -144,34 +151,37 @@ def create(Name,
salt myminion boto_cloudtrail.create my_trail my_bucket
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
- for arg in ('S3KeyPrefix', 'SnsTopicName', 'IncludeGlobalServiceEvents',
- 'IsMultiRegionTrail',
- 'EnableLogFileValidation', 'CloudWatchLogsLogGroupArn',
- 'CloudWatchLogsRoleArn', 'KmsKeyId'):
+ for arg in (
+ "S3KeyPrefix",
+ "SnsTopicName",
+ "IncludeGlobalServiceEvents",
+ "IsMultiRegionTrail",
+ "EnableLogFileValidation",
+ "CloudWatchLogsLogGroupArn",
+ "CloudWatchLogsRoleArn",
+ "KmsKeyId",
+ ):
if locals()[arg] is not None:
kwargs[arg] = locals()[arg]
- trail = conn.create_trail(Name=Name,
- S3BucketName=S3BucketName,
- **kwargs)
+ trail = conn.create_trail(Name=Name, S3BucketName=S3BucketName, **kwargs)
if trail:
- log.info('The newly created trail name is %s', trail['Name'])
+ log.info("The newly created trail name is %s", trail["Name"])
- return {'created': True, 'name': trail['Name']}
+ return {"created": True, "name": trail["Name"]}
else:
- log.warning('Trail was not created')
- return {'created': False}
+ log.warning("Trail was not created")
+ return {"created": False}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def delete(Name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete(Name, region=None, key=None, keyid=None, profile=None):
+ """
Given a trail name, delete it.
Returns {deleted: true} if the trail was deleted and returns
@@ -183,19 +193,18 @@ def delete(Name,
salt myminion boto_cloudtrail.delete mytrail
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_trail(Name=Name)
- return {'deleted': True}
+ return {"deleted": True}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def describe(Name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe(Name, region=None, key=None, keyid=None, profile=None):
+ """
Given a trail name describe its properties.
Returns a dictionary of interesting properties.
@@ -206,32 +215,39 @@ def describe(Name,
salt myminion boto_cloudtrail.describe mytrail
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
trails = conn.describe_trails(trailNameList=[Name])
- if trails and len(trails.get('trailList', [])) > 0:
- keys = ('Name', 'S3BucketName', 'S3KeyPrefix',
- 'SnsTopicName', 'IncludeGlobalServiceEvents',
- 'IsMultiRegionTrail',
- 'HomeRegion', 'TrailARN',
- 'LogFileValidationEnabled', 'CloudWatchLogsLogGroupArn',
- 'CloudWatchLogsRoleArn', 'KmsKeyId')
- trail = trails['trailList'].pop()
- return {'trail': dict([(k, trail.get(k)) for k in keys])}
+ if trails and len(trails.get("trailList", [])) > 0:
+ keys = (
+ "Name",
+ "S3BucketName",
+ "S3KeyPrefix",
+ "SnsTopicName",
+ "IncludeGlobalServiceEvents",
+ "IsMultiRegionTrail",
+ "HomeRegion",
+ "TrailARN",
+ "LogFileValidationEnabled",
+ "CloudWatchLogsLogGroupArn",
+ "CloudWatchLogsRoleArn",
+ "KmsKeyId",
+ )
+ trail = trails["trailList"].pop()
+ return {"trail": dict([(k, trail.get(k)) for k in keys])}
else:
- return {'trail': None}
+ return {"trail": None}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'TrailNotFoundException':
- return {'trail': None}
- return {'error': __utils__['boto3.get_error'](e)}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "TrailNotFoundException":
+ return {"trail": None}
+ return {"error": __utils__["boto3.get_error"](e)}
-def status(Name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def status(Name, region=None, key=None, keyid=None, profile=None):
+ """
Given a trail name describe its properties.
Returns a dictionary of interesting properties.
@@ -242,36 +258,43 @@ def status(Name,
salt myminion boto_cloudtrail.describe mytrail
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
trail = conn.get_trail_status(Name=Name)
if trail:
- keys = ('IsLogging', 'LatestDeliveryError', 'LatestNotificationError',
- 'LatestDeliveryTime', 'LatestNotificationTime',
- 'StartLoggingTime', 'StopLoggingTime',
- 'LatestCloudWatchLogsDeliveryError',
- 'LatestCloudWatchLogsDeliveryTime',
- 'LatestDigestDeliveryTime', 'LatestDigestDeliveryError',
- 'LatestDeliveryAttemptTime',
- 'LatestNotificationAttemptTime',
- 'LatestNotificationAttemptSucceeded',
- 'LatestDeliveryAttemptSucceeded',
- 'TimeLoggingStarted',
- 'TimeLoggingStopped')
- return {'trail': dict([(k, trail.get(k)) for k in keys])}
+ keys = (
+ "IsLogging",
+ "LatestDeliveryError",
+ "LatestNotificationError",
+ "LatestDeliveryTime",
+ "LatestNotificationTime",
+ "StartLoggingTime",
+ "StopLoggingTime",
+ "LatestCloudWatchLogsDeliveryError",
+ "LatestCloudWatchLogsDeliveryTime",
+ "LatestDigestDeliveryTime",
+ "LatestDigestDeliveryError",
+ "LatestDeliveryAttemptTime",
+ "LatestNotificationAttemptTime",
+ "LatestNotificationAttemptSucceeded",
+ "LatestDeliveryAttemptSucceeded",
+ "TimeLoggingStarted",
+ "TimeLoggingStopped",
+ )
+ return {"trail": dict([(k, trail.get(k)) for k in keys])}
else:
- return {'trail': None}
+ return {"trail": None}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'TrailNotFoundException':
- return {'trail': None}
- return {'error': __utils__['boto3.get_error'](e)}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "TrailNotFoundException":
+ return {"trail": None}
+ return {"error": __utils__["boto3.get_error"](e)}
def list(region=None, key=None, keyid=None, profile=None):
- '''
+ """
List all trails
Returns list of trails
@@ -284,28 +307,34 @@ def list(region=None, key=None, keyid=None, profile=None):
- {...}
- {...}
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
trails = conn.describe_trails()
- if not bool(trails.get('trailList')):
- log.warning('No trails found')
- return {'trails': trails.get('trailList', [])}
+ if not bool(trails.get("trailList")):
+ log.warning("No trails found")
+ return {"trails": trails.get("trailList", [])}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def update(Name,
- S3BucketName, S3KeyPrefix=None,
- SnsTopicName=None,
- IncludeGlobalServiceEvents=None,
- IsMultiRegionTrail=None,
- EnableLogFileValidation=None,
- CloudWatchLogsLogGroupArn=None,
- CloudWatchLogsRoleArn=None,
- KmsKeyId=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def update(
+ Name,
+ S3BucketName,
+ S3KeyPrefix=None,
+ SnsTopicName=None,
+ IncludeGlobalServiceEvents=None,
+ IsMultiRegionTrail=None,
+ EnableLogFileValidation=None,
+ CloudWatchLogsLogGroupArn=None,
+ CloudWatchLogsRoleArn=None,
+ KmsKeyId=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid config, update a trail.
Returns {created: true} if the trail was created and returns
@@ -317,34 +346,37 @@ def update(Name,
salt myminion boto_cloudtrail.update my_trail my_bucket
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
- for arg in ('S3KeyPrefix', 'SnsTopicName', 'IncludeGlobalServiceEvents',
- 'IsMultiRegionTrail',
- 'EnableLogFileValidation', 'CloudWatchLogsLogGroupArn',
- 'CloudWatchLogsRoleArn', 'KmsKeyId'):
+ for arg in (
+ "S3KeyPrefix",
+ "SnsTopicName",
+ "IncludeGlobalServiceEvents",
+ "IsMultiRegionTrail",
+ "EnableLogFileValidation",
+ "CloudWatchLogsLogGroupArn",
+ "CloudWatchLogsRoleArn",
+ "KmsKeyId",
+ ):
if locals()[arg] is not None:
kwargs[arg] = locals()[arg]
- trail = conn.update_trail(Name=Name,
- S3BucketName=S3BucketName,
- **kwargs)
+ trail = conn.update_trail(Name=Name, S3BucketName=S3BucketName, **kwargs)
if trail:
- log.info('The updated trail name is %s', trail['Name'])
+ log.info("The updated trail name is %s", trail["Name"])
- return {'updated': True, 'name': trail['Name']}
+ return {"updated": True, "name": trail["Name"]}
else:
- log.warning('Trail was not created')
- return {'updated': False}
+ log.warning("Trail was not created")
+ return {"updated": False}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def start_logging(Name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def start_logging(Name, region=None, key=None, keyid=None, profile=None):
+ """
Start logging for a trail
Returns {started: true} if the trail was started and returns
@@ -356,19 +388,18 @@ def start_logging(Name,
salt myminion boto_cloudtrail.start_logging my_trail
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.start_logging(Name=Name)
- return {'started': True}
+ return {"started": True}
except ClientError as e:
- return {'started': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"started": False, "error": __utils__["boto3.get_error"](e)}
-def stop_logging(Name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def stop_logging(Name, region=None, key=None, keyid=None, profile=None):
+ """
Stop logging for a trail
Returns {stopped: true} if the trail was stopped and returns
@@ -380,33 +411,32 @@ def stop_logging(Name,
salt myminion boto_cloudtrail.stop_logging my_trail
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.stop_logging(Name=Name)
- return {'stopped': True}
+ return {"stopped": True}
except ClientError as e:
- return {'stopped': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"stopped": False, "error": __utils__["boto3.get_error"](e)}
def _get_trail_arn(name, region=None, key=None, keyid=None, profile=None):
- if name.startswith('arn:aws:cloudtrail:'):
+ if name.startswith("arn:aws:cloudtrail:"):
return name
- account_id = __salt__['boto_iam.get_account_id'](
+ account_id = __salt__["boto_iam.get_account_id"](
region=region, key=key, keyid=keyid, profile=profile
)
- if profile and 'region' in profile:
- region = profile['region']
+ if profile and "region" in profile:
+ region = profile["region"]
if region is None:
- region = 'us-east-1'
- return 'arn:aws:cloudtrail:{0}:{1}:trail/{2}'.format(region, account_id, name)
+ region = "us-east-1"
+ return "arn:aws:cloudtrail:{0}:{1}:trail/{2}".format(region, account_id, name)
-def add_tags(Name,
- region=None, key=None, keyid=None, profile=None, **kwargs):
- '''
+def add_tags(Name, region=None, key=None, keyid=None, profile=None, **kwargs):
+ """
Add tags to a trail
Returns {tagged: true} if the trail was tagged and returns
@@ -418,26 +448,28 @@ def add_tags(Name,
salt myminion boto_cloudtrail.add_tags my_trail tag_a=tag_value tag_b=tag_value
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tagslist = []
for k, v in six.iteritems(kwargs):
- if six.text_type(k).startswith('__'):
+ if six.text_type(k).startswith("__"):
continue
- tagslist.append({'Key': six.text_type(k), 'Value': six.text_type(v)})
- conn.add_tags(ResourceId=_get_trail_arn(Name,
- region=region, key=key, keyid=keyid,
- profile=profile), TagsList=tagslist)
- return {'tagged': True}
+ tagslist.append({"Key": six.text_type(k), "Value": six.text_type(v)})
+ conn.add_tags(
+ ResourceId=_get_trail_arn(
+ Name, region=region, key=key, keyid=keyid, profile=profile
+ ),
+ TagsList=tagslist,
+ )
+ return {"tagged": True}
except ClientError as e:
- return {'tagged': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"tagged": False, "error": __utils__["boto3.get_error"](e)}
-def remove_tags(Name,
- region=None, key=None, keyid=None, profile=None, **kwargs):
- '''
+def remove_tags(Name, region=None, key=None, keyid=None, profile=None, **kwargs):
+ """
Remove tags from a trail
Returns {tagged: true} if the trail was tagged and returns
@@ -449,26 +481,28 @@ def remove_tags(Name,
salt myminion boto_cloudtrail.remove_tags my_trail tag_a=tag_value tag_b=tag_value
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tagslist = []
for k, v in six.iteritems(kwargs):
- if six.text_type(k).startswith('__'):
+ if six.text_type(k).startswith("__"):
continue
- tagslist.append({'Key': six.text_type(k), 'Value': six.text_type(v)})
- conn.remove_tags(ResourceId=_get_trail_arn(Name,
- region=region, key=key, keyid=keyid,
- profile=profile), TagsList=tagslist)
- return {'tagged': True}
+ tagslist.append({"Key": six.text_type(k), "Value": six.text_type(v)})
+ conn.remove_tags(
+ ResourceId=_get_trail_arn(
+ Name, region=region, key=key, keyid=keyid, profile=profile
+ ),
+ TagsList=tagslist,
+ )
+ return {"tagged": True}
except ClientError as e:
- return {'tagged': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"tagged": False, "error": __utils__["boto3.get_error"](e)}
-def list_tags(Name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def list_tags(Name, region=None, key=None, keyid=None, profile=None):
+ """
List tags of a trail
Returns:
@@ -482,18 +516,16 @@ def list_tags(Name,
salt myminion boto_cloudtrail.list_tags my_trail
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- rid = _get_trail_arn(Name,
- region=region, key=key, keyid=keyid,
- profile=profile)
+ rid = _get_trail_arn(Name, region=region, key=key, keyid=keyid, profile=profile)
ret = conn.list_tags(ResourceIdList=[rid])
- tlist = ret.get('ResourceTagList', []).pop().get('TagsList')
+ tlist = ret.get("ResourceTagList", []).pop().get("TagsList")
tagdict = {}
for tag in tlist:
- tagdict[tag.get('Key')] = tag.get('Value')
- return {'tags': tagdict}
+ tagdict[tag.get("Key")] = tag.get("Value")
+ return {"tags": tagdict}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
diff --git a/salt/modules/boto_cloudwatch.py b/salt/modules/boto_cloudwatch.py
index e32e606b8ac..91a60327eff 100644
--- a/salt/modules/boto_cloudwatch.py
+++ b/salt/modules/boto_cloudwatch.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon CloudWatch
.. versionadded:: 2014.7.0
@@ -40,21 +40,22 @@ Connection module for Amazon CloudWatch
region: us-east-1
:depends: boto
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
+
+import salt.utils.json
+import salt.utils.odict as odict
+import salt.utils.versions
import yaml # pylint: disable=blacklisted-import
# Import Salt libs
from salt.ext import six
-import salt.utils.json
-import salt.utils.odict as odict
-import salt.utils.versions
log = logging.getLogger(__name__)
@@ -64,32 +65,33 @@ try:
import boto.ec2.cloudwatch
import boto.ec2.cloudwatch.listelement
import boto.ec2.cloudwatch.dimension
- logging.getLogger('boto').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def __virtual__():
- '''
+ """
Only load if boto libraries exist.
- '''
+ """
has_boto_reqs = salt.utils.versions.check_boto_reqs(check_boto3=False)
if has_boto_reqs is True:
- __utils__['boto.assign_funcs'](__name__, 'cloudwatch',
- module='ec2.cloudwatch',
- pack=__salt__)
+ __utils__["boto.assign_funcs"](
+ __name__, "cloudwatch", module="ec2.cloudwatch", pack=__salt__
+ )
return has_boto_reqs
def get_alarm(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get alarm details. Also can be used to check to see if an alarm exists.
CLI example::
salt myminion boto_cloudwatch.get_alarm myalarm region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
alarms = conn.describe_alarms(alarm_names=[name])
@@ -101,32 +103,36 @@ def get_alarm(name, region=None, key=None, keyid=None, profile=None):
def _safe_dump(data):
- '''
+ """
this presenter magic makes yaml.safe_dump
work with the objects returned from
boto.describe_alarms()
- '''
- custom_dumper = __utils__['yaml.get_dumper']('SafeOrderedDumper')
+ """
+ custom_dumper = __utils__["yaml.get_dumper"]("SafeOrderedDumper")
def boto_listelement_presenter(dumper, data):
return dumper.represent_list(list(data))
- yaml.add_representer(boto.ec2.cloudwatch.listelement.ListElement,
- boto_listelement_presenter,
- Dumper=custom_dumper)
+ yaml.add_representer(
+ boto.ec2.cloudwatch.listelement.ListElement,
+ boto_listelement_presenter,
+ Dumper=custom_dumper,
+ )
def dimension_presenter(dumper, data):
return dumper.represent_dict(dict(data))
- yaml.add_representer(boto.ec2.cloudwatch.dimension.Dimension,
- dimension_presenter, Dumper=custom_dumper)
+ yaml.add_representer(
+ boto.ec2.cloudwatch.dimension.Dimension,
+ dimension_presenter,
+ Dumper=custom_dumper,
+ )
- return __utils__['yaml.dump'](data, Dumper=custom_dumper)
+ return __utils__["yaml.dump"](data, Dumper=custom_dumper)
-def get_all_alarms(region=None, prefix=None, key=None, keyid=None,
- profile=None):
- '''
+def get_all_alarms(region=None, prefix=None, key=None, keyid=None, profile=None):
+ """
Get all alarm details. Produces results that can be used to create an sls
file.
@@ -156,7 +162,7 @@ def get_all_alarms(region=None, prefix=None, key=None, keyid=None,
CLI example::
salt myminion boto_cloudwatch.get_all_alarms region=us-east-1 --out=txt
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
alarms = conn.describe_alarms()
@@ -170,19 +176,32 @@ def get_all_alarms(region=None, prefix=None, key=None, keyid=None,
name = prefix + alarm["name"]
del alarm["name"]
alarm_sls = [{"name": name}, {"attributes": alarm}]
- results["manage alarm " + name] = {"boto_cloudwatch_alarm.present":
- alarm_sls}
+ results["manage alarm " + name] = {"boto_cloudwatch_alarm.present": alarm_sls}
return _safe_dump(results)
def create_or_update_alarm(
- connection=None, name=None, metric=None, namespace=None,
- statistic=None, comparison=None, threshold=None, period=None,
- evaluation_periods=None, unit=None, description='',
- dimensions=None, alarm_actions=None,
- insufficient_data_actions=None, ok_actions=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+ connection=None,
+ name=None,
+ metric=None,
+ namespace=None,
+ statistic=None,
+ comparison=None,
+ threshold=None,
+ period=None,
+ evaluation_periods=None,
+ unit=None,
+ description="",
+ dimensions=None,
+ alarm_actions=None,
+ insufficient_data_actions=None,
+ ok_actions=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create or update a cloudwatch alarm.
Params are the same as:
@@ -207,7 +226,7 @@ def create_or_update_alarm(
CLI example:
salt myminion boto_cloudwatch.create_alarm name=myalarm ... region=us-east-1
- '''
+ """
# clean up argument types, so that CLI works
if threshold:
threshold = float(threshold)
@@ -218,7 +237,10 @@ def create_or_update_alarm(
if isinstance(dimensions, six.string_types):
dimensions = salt.utils.json.loads(dimensions)
if not isinstance(dimensions, dict):
- log.error("could not parse dimensions argument: must be json encoding of a dict: '%s'", dimensions)
+ log.error(
+ "could not parse dimensions argument: must be json encoding of a dict: '%s'",
+ dimensions,
+ )
return False
if isinstance(alarm_actions, six.string_types):
alarm_actions = alarm_actions.split(",")
@@ -229,23 +251,21 @@ def create_or_update_alarm(
# convert provided action names into ARN's
if alarm_actions:
- alarm_actions = convert_to_arn(alarm_actions,
- region=region,
- key=key,
- keyid=keyid,
- profile=profile)
+ alarm_actions = convert_to_arn(
+ alarm_actions, region=region, key=key, keyid=keyid, profile=profile
+ )
if insufficient_data_actions:
- insufficient_data_actions = convert_to_arn(insufficient_data_actions,
- region=region,
- key=key,
- keyid=keyid,
- profile=profile)
+ insufficient_data_actions = convert_to_arn(
+ insufficient_data_actions,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if ok_actions:
- ok_actions = convert_to_arn(ok_actions,
- region=region,
- key=key,
- keyid=keyid,
- profile=profile)
+ ok_actions = convert_to_arn(
+ ok_actions, region=region, key=key, keyid=keyid, profile=profile
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
@@ -264,22 +284,22 @@ def create_or_update_alarm(
dimensions=dimensions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
- ok_actions=ok_actions
+ ok_actions=ok_actions,
)
conn.create_alarm(alarm)
- log.info('Created/updated alarm %s', name)
+ log.info("Created/updated alarm %s", name)
return True
def convert_to_arn(arns, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Convert a list of strings into actual arns. Converts convenience names such
as 'scaling_policy:...'
CLI Example::
salt '*' convert_to_arn 'scaling_policy:'
- '''
+ """
results = []
for arn in arns:
if arn.startswith("scaling_policy:"):
@@ -290,37 +310,49 @@ def convert_to_arn(arns, region=None, key=None, keyid=None, profile=None):
if policy_arn:
results.append(policy_arn)
else:
- log.error('Could not convert: %s', arn)
+ log.error("Could not convert: %s", arn)
else:
results.append(arn)
return results
def delete_alarm(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete a cloudwatch alarm
CLI example to delete a queue::
salt myminion boto_cloudwatch.delete_alarm myalarm region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_alarms([name])
- log.info('Deleted alarm %s', name)
+ log.info("Deleted alarm %s", name)
return True
def _metric_alarm_to_dict(alarm):
- '''
+ """
Convert a boto.ec2.cloudwatch.alarm.MetricAlarm into a dict. Convenience
for pretty printing.
- '''
+ """
d = odict.OrderedDict()
- fields = ['name', 'metric', 'namespace', 'statistic', 'comparison',
- 'threshold', 'period', 'evaluation_periods', 'unit',
- 'description', 'dimensions', 'alarm_actions',
- 'insufficient_data_actions', 'ok_actions']
+ fields = [
+ "name",
+ "metric",
+ "namespace",
+ "statistic",
+ "comparison",
+ "threshold",
+ "period",
+ "evaluation_periods",
+ "unit",
+ "description",
+ "dimensions",
+ "alarm_actions",
+ "insufficient_data_actions",
+ "ok_actions",
+ ]
for f in fields:
if hasattr(alarm, f):
d[f] = getattr(alarm, f)
diff --git a/salt/modules/boto_cloudwatch_event.py b/salt/modules/boto_cloudwatch_event.py
index 529401df029..49daad93137 100644
--- a/salt/modules/boto_cloudwatch_event.py
+++ b/salt/modules/boto_cloudwatch_event.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon CloudWatch Events
.. versionadded:: 2016.11.0
@@ -40,9 +40,9 @@ Connection module for Amazon CloudWatch Events
region: us-east-1
:depends: boto3
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
from __future__ import absolute_import, print_function, unicode_literals
@@ -53,41 +53,42 @@ import logging
import salt.utils.compat
import salt.utils.json
import salt.utils.versions
+from salt.ext import six
log = logging.getLogger(__name__)
# Import third party libs
# pylint: disable=import-error
try:
- #pylint: disable=unused-import
+ # pylint: disable=unused-import
import boto
import boto3
- #pylint: enable=unused-import
+
+ # pylint: enable=unused-import
from botocore.exceptions import ClientError
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError as e:
HAS_BOTO = False
# pylint: enable=import-error
-from salt.ext import six
-
def __virtual__():
- '''
+ """
Only load if boto libraries exist.
- '''
+ """
return salt.utils.versions.check_boto_reqs()
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
- __utils__['boto3.assign_funcs'](__name__, 'events')
+ __utils__["boto3.assign_funcs"](__name__, "events")
def exists(Name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Given a rule name, check to see if the given rule exists.
Returns True if the given rule exists and returns False if the given
@@ -96,30 +97,35 @@ def exists(Name, region=None, key=None, keyid=None, profile=None):
CLI example::
salt myminion boto_cloudwatch_event.exists myevent region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
events = conn.list_rules(NamePrefix=Name)
if len(events) == 0:
- return {'exists': False}
- for rule in events.get('Rules', []):
- if rule.get('Name', None) == Name:
- return {'exists': True}
- return {'exists': False}
+ return {"exists": False}
+ for rule in events.get("Rules", []):
+ if rule.get("Name", None) == Name:
+ return {"exists": True}
+ return {"exists": False}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- return {'error': err}
+ err = __utils__["boto3.get_error"](e)
+ return {"error": err}
-def create_or_update(Name,
- ScheduleExpression=None,
- EventPattern=None,
- Description=None,
- RoleArn=None,
- State=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_or_update(
+ Name,
+ ScheduleExpression=None,
+ EventPattern=None,
+ Description=None,
+ RoleArn=None,
+ State=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid config, create an event rule.
Returns {created: true} if the rule was created and returns
@@ -131,31 +137,34 @@ def create_or_update(Name,
salt myminion boto_cloudwatch_event.create_or_update my_rule
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
- for arg in ('ScheduleExpression', 'EventPattern', 'State',
- 'Description', 'RoleArn'):
+ for arg in (
+ "ScheduleExpression",
+ "EventPattern",
+ "State",
+ "Description",
+ "RoleArn",
+ ):
if locals()[arg] is not None:
kwargs[arg] = locals()[arg]
- rule = conn.put_rule(Name=Name,
- **kwargs)
+ rule = conn.put_rule(Name=Name, **kwargs)
if rule:
- log.info('The newly created event rule is %s', rule.get('RuleArn'))
+ log.info("The newly created event rule is %s", rule.get("RuleArn"))
- return {'created': True, 'arn': rule.get('RuleArn')}
+ return {"created": True, "arn": rule.get("RuleArn")}
else:
- log.warning('Event rule was not created')
- return {'created': False}
+ log.warning("Event rule was not created")
+ return {"created": False}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def delete(Name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete(Name, region=None, key=None, keyid=None, profile=None):
+ """
Given a rule name, delete it.
Returns {deleted: true} if the rule was deleted and returns
@@ -167,19 +176,18 @@ def delete(Name,
salt myminion boto_cloudwatch_event.delete myrule
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_rule(Name=Name)
- return {'deleted': True}
+ return {"deleted": True}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def describe(Name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe(Name, region=None, key=None, keyid=None, profile=None):
+ """
Given a rule name describe its properties.
Returns a dictionary of interesting properties.
@@ -190,52 +198,56 @@ def describe(Name,
salt myminion boto_cloudwatch_event.describe myrule
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
rule = conn.describe_rule(Name=Name)
if rule:
- keys = ('Name', 'Arn', 'EventPattern',
- 'ScheduleExpression', 'State',
- 'Description',
- 'RoleArn')
- return {'rule': dict([(k, rule.get(k)) for k in keys])}
+ keys = (
+ "Name",
+ "Arn",
+ "EventPattern",
+ "ScheduleExpression",
+ "State",
+ "Description",
+ "RoleArn",
+ )
+ return {"rule": dict([(k, rule.get(k)) for k in keys])}
else:
- return {'rule': None}
+ return {"rule": None}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'RuleNotFoundException':
- return {'error': "Rule {0} not found".format(Rule)}
- return {'error': __utils__['boto3.get_error'](e)}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "RuleNotFoundException":
+ return {"error": "Rule {0} not found".format(Rule)}
+ return {"error": __utils__["boto3.get_error"](e)}
def list_rules(region=None, key=None, keyid=None, profile=None):
- '''
+ """
List, with details, all Cloudwatch Event rules visible in the current scope.
CLI example::
salt myminion boto_cloudwatch_event.list_rules region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = []
- NextToken = ''
+ NextToken = ""
while NextToken is not None:
- args = {'NextToken': NextToken} if NextToken else {}
+ args = {"NextToken": NextToken} if NextToken else {}
r = conn.list_rules(**args)
- ret += r.get('Rules', [])
- NextToken = r.get('NextToken')
+ ret += r.get("Rules", [])
+ NextToken = r.get("NextToken")
return ret
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def list_targets(Rule,
- region=None, key=None, keyid=None, profile=None):
- '''
+def list_targets(Rule, region=None, key=None, keyid=None, profile=None):
+ """
Given a rule name list the targets of that rule.
Returns a dictionary of interesting properties.
@@ -246,29 +258,27 @@ def list_targets(Rule,
salt myminion boto_cloudwatch_event.list_targets myrule
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
targets = conn.list_targets_by_rule(Rule=Rule)
ret = []
- if targets and 'Targets' in targets:
- keys = ('Id', 'Arn', 'Input',
- 'InputPath')
- for target in targets.get('Targets'):
+ if targets and "Targets" in targets:
+ keys = ("Id", "Arn", "Input", "InputPath")
+ for target in targets.get("Targets"):
ret.append(dict([(k, target.get(k)) for k in keys if k in target]))
- return {'targets': ret}
+ return {"targets": ret}
else:
- return {'targets': None}
+ return {"targets": None}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'RuleNotFoundException':
- return {'error': "Rule {0} not found".format(Rule)}
- return {'error': __utils__['boto3.get_error'](e)}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "RuleNotFoundException":
+ return {"error": "Rule {0} not found".format(Rule)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def put_targets(Rule, Targets,
- region=None, key=None, keyid=None, profile=None):
- '''
+def put_targets(Rule, Targets, region=None, key=None, keyid=None, profile=None):
+ """
Add the given targets to the given rule
Returns a dictionary describing any failures.
@@ -279,26 +289,25 @@ def put_targets(Rule, Targets,
salt myminion boto_cloudwatch_event.put_targets myrule [{'Id': 'target1', 'Arn': 'arn:***'}]
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(Targets, six.string_types):
Targets = salt.utils.json.loads(Targets)
failures = conn.put_targets(Rule=Rule, Targets=Targets)
- if failures and failures.get('FailedEntryCount', 0) > 0:
- return {'failures': failures.get('FailedEntries')}
+ if failures and failures.get("FailedEntryCount", 0) > 0:
+ return {"failures": failures.get("FailedEntries")}
else:
- return {'failures': None}
+ return {"failures": None}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'RuleNotFoundException':
- return {'error': "Rule {0} not found".format(Rule)}
- return {'error': __utils__['boto3.get_error'](e)}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "RuleNotFoundException":
+ return {"error": "Rule {0} not found".format(Rule)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def remove_targets(Rule, Ids,
- region=None, key=None, keyid=None, profile=None):
- '''
+def remove_targets(Rule, Ids, region=None, key=None, keyid=None, profile=None):
+ """
Given a rule name remove the named targets from the target list
Returns a dictionary describing any failures.
@@ -309,18 +318,18 @@ def remove_targets(Rule, Ids,
salt myminion boto_cloudwatch_event.remove_targets myrule ['Target1']
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(Ids, six.string_types):
Ids = salt.utils.json.loads(Ids)
failures = conn.remove_targets(Rule=Rule, Ids=Ids)
- if failures and failures.get('FailedEntryCount', 0) > 0:
- return {'failures': failures.get('FailedEntries', 1)}
+ if failures and failures.get("FailedEntryCount", 0) > 0:
+ return {"failures": failures.get("FailedEntries", 1)}
else:
- return {'failures': None}
+ return {"failures": None}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'RuleNotFoundException':
- return {'error': "Rule {0} not found".format(Rule)}
- return {'error': __utils__['boto3.get_error'](e)}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "RuleNotFoundException":
+ return {"error": "Rule {0} not found".format(Rule)}
+ return {"error": __utils__["boto3.get_error"](e)}
diff --git a/salt/modules/boto_cognitoidentity.py b/salt/modules/boto_cognitoidentity.py
index 63377cad658..017bcbdc981 100644
--- a/salt/modules/boto_cognitoidentity.py
+++ b/salt/modules/boto_cognitoidentity.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon CognitoIdentity
.. versionadded:: 2016.11.0
@@ -72,12 +72,13 @@ Connection module for Amazon CognitoIdentity
:depends: boto3
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
# Import Salt libs
@@ -93,10 +94,12 @@ try:
# pylint: disable=unused-import
import boto
import boto3
+
# pylint: enable=unused-import
from botocore.exceptions import ClientError
- logging.getLogger('boto').setLevel(logging.CRITICAL)
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto").setLevel(logging.CRITICAL)
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@@ -104,47 +107,54 @@ except ImportError:
def __virtual__():
- '''
+ """
Only load if boto libraries exist and if boto libraries are greater than
a given version.
- '''
+ """
# the boto_cognitoidentity execution module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
- return salt.utils.versions.check_boto_reqs(
- boto_ver='2.8.0',
- boto3_ver='1.2.1'
- )
+ return salt.utils.versions.check_boto_reqs(boto_ver="2.8.0", boto3_ver="1.2.1")
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
- __utils__['boto3.assign_funcs'](__name__, 'cognito-identity')
+ __utils__["boto3.assign_funcs"](__name__, "cognito-identity")
def _find_identity_pool_ids(name, pool_id, conn):
- '''
+ """
Given identity pool name (or optionally a pool_id and name will be ignored),
find and return list of matching identity pool id's.
- '''
+ """
ids = []
if pool_id is None:
- for pools in __utils__['boto3.paged_call'](conn.list_identity_pools,
- marker_flag='NextToken', marker_arg='NextToken', MaxResults=25):
- for pool in pools['IdentityPools']:
- if pool['IdentityPoolName'] == name:
- ids.append(pool['IdentityPoolId'])
+ for pools in __utils__["boto3.paged_call"](
+ conn.list_identity_pools,
+ marker_flag="NextToken",
+ marker_arg="NextToken",
+ MaxResults=25,
+ ):
+ for pool in pools["IdentityPools"]:
+ if pool["IdentityPoolName"] == name:
+ ids.append(pool["IdentityPoolId"])
else:
ids.append(pool_id)
return ids
-def describe_identity_pools(IdentityPoolName, IdentityPoolId=None,
- region=None, key=None, keyid=None, profile=None):
+def describe_identity_pools(
+ IdentityPoolName,
+ IdentityPoolId=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
- '''
+ """
Given an identity pool name, (optionally if an identity pool id is given,
the given name will be ignored)
@@ -157,7 +167,7 @@ def describe_identity_pools(IdentityPoolName, IdentityPoolId=None,
salt myminion boto_cognitoidentity.describe_identity_pools my_id_pool_name
salt myminion boto_cognitoidentity.describe_identity_pools '' IdentityPoolId=my_id_pool_id
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -167,22 +177,27 @@ def describe_identity_pools(IdentityPoolName, IdentityPoolId=None,
results = []
for pool_id in ids:
response = conn.describe_identity_pool(IdentityPoolId=pool_id)
- response.pop('ResponseMetadata', None)
+ response.pop("ResponseMetadata", None)
results.append(response)
- return {'identity_pools': results}
+ return {"identity_pools": results}
else:
- return {'identity_pools': None}
+ return {"identity_pools": None}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def create_identity_pool(IdentityPoolName,
- AllowUnauthenticatedIdentities=False,
- SupportedLoginProviders=None,
- DeveloperProviderName=None,
- OpenIdConnectProviderARNs=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_identity_pool(
+ IdentityPoolName,
+ AllowUnauthenticatedIdentities=False,
+ SupportedLoginProviders=None,
+ DeveloperProviderName=None,
+ OpenIdConnectProviderARNs=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Creates a new identity pool. All parameters except for IdentityPoolName is optional.
SupportedLoginProviders should be a dictionary mapping provider names to provider app
IDs. OpenIdConnectProviderARNs should be a list of OpenID Connect provider ARNs.
@@ -196,31 +211,43 @@ def create_identity_pool(IdentityPoolName,
salt myminion boto_cognitoidentity.create_identity_pool my_id_pool_name \
DeveloperProviderName=custom_developer_provider
- '''
- SupportedLoginProviders = dict() if SupportedLoginProviders is None else SupportedLoginProviders
- OpenIdConnectProviderARNs = list() if OpenIdConnectProviderARNs is None else OpenIdConnectProviderARNs
+ """
+ SupportedLoginProviders = (
+ dict() if SupportedLoginProviders is None else SupportedLoginProviders
+ )
+ OpenIdConnectProviderARNs = (
+ list() if OpenIdConnectProviderARNs is None else OpenIdConnectProviderARNs
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- request_params = dict(IdentityPoolName=IdentityPoolName,
- AllowUnauthenticatedIdentities=AllowUnauthenticatedIdentities,
- SupportedLoginProviders=SupportedLoginProviders,
- OpenIdConnectProviderARNs=OpenIdConnectProviderARNs)
+ request_params = dict(
+ IdentityPoolName=IdentityPoolName,
+ AllowUnauthenticatedIdentities=AllowUnauthenticatedIdentities,
+ SupportedLoginProviders=SupportedLoginProviders,
+ OpenIdConnectProviderARNs=OpenIdConnectProviderARNs,
+ )
if DeveloperProviderName:
- request_params['DeveloperProviderName'] = DeveloperProviderName
+ request_params["DeveloperProviderName"] = DeveloperProviderName
response = conn.create_identity_pool(**request_params)
- response.pop('ResponseMetadata', None)
+ response.pop("ResponseMetadata", None)
- return {'created': True, 'identity_pool': response}
+ return {"created": True, "identity_pool": response}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def delete_identity_pools(IdentityPoolName, IdentityPoolId=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_identity_pools(
+ IdentityPoolName,
+ IdentityPoolId=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given an identity pool name, (optionally if an identity pool id is given,
the given name will be ignored)
@@ -234,7 +261,7 @@ def delete_identity_pools(IdentityPoolName, IdentityPoolId=None,
salt myminion boto_cognitoidentity.delete_identity_pools my_id_pool_name
salt myminion boto_cognitoidentity.delete_identity_pools '' IdentityPoolId=my_id_pool_id
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -245,16 +272,22 @@ def delete_identity_pools(IdentityPoolName, IdentityPoolId=None,
for pool_id in ids:
conn.delete_identity_pool(IdentityPoolId=pool_id)
count += 1
- return {'deleted': True, 'count': count}
+ return {"deleted": True, "count": count}
else:
- return {'deleted': False, 'count': count}
+ return {"deleted": False, "count": count}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def get_identity_pool_roles(IdentityPoolName, IdentityPoolId=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def get_identity_pool_roles(
+ IdentityPoolName,
+ IdentityPoolId=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given an identity pool name, (optionally if an identity pool id if given,
the given name will be ignored)
@@ -267,7 +300,7 @@ def get_identity_pool_roles(IdentityPoolName, IdentityPoolId=None,
salt myminion boto_cognitoidentity.get_identity_pool_roles my_id_pool_name
salt myminion boto_cognitoidentity.get_identity_pool_roles '' IdentityPoolId=my_id_pool_id
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -277,31 +310,38 @@ def get_identity_pool_roles(IdentityPoolName, IdentityPoolId=None,
results = []
for pool_id in ids:
response = conn.get_identity_pool_roles(IdentityPoolId=pool_id)
- response.pop('ResponseMetadata', None)
+ response.pop("ResponseMetadata", None)
results.append(response)
- return {'identity_pool_roles': results}
+ return {"identity_pool_roles": results}
else:
- return {'identity_pool_roles': None}
+ return {"identity_pool_roles": None}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
def _get_role_arn(name, **conn_params):
- '''
+ """
Helper function to turn a name into an arn string,
returns None if not able to resolve
- '''
- if name.startswith('arn:aws:iam'):
+ """
+ if name.startswith("arn:aws:iam"):
return name
- role = __salt__['boto_iam.describe_role'](name, **conn_params)
- rolearn = role.get('arn') if role else None
+ role = __salt__["boto_iam.describe_role"](name, **conn_params)
+ rolearn = role.get("arn") if role else None
return rolearn
-def set_identity_pool_roles(IdentityPoolId, AuthenticatedRole=None, UnauthenticatedRole=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def set_identity_pool_roles(
+ IdentityPoolId,
+ AuthenticatedRole=None,
+ UnauthenticatedRole=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given an identity pool id, set the given AuthenticatedRole and UnauthenticatedRole (the Role
can be an iam arn, or a role name) If AuthenticatedRole or UnauthenticatedRole is not given,
the authenticated and/or the unauthenticated role associated previously with the pool will be
@@ -321,7 +361,7 @@ def set_identity_pool_roles(IdentityPoolId, AuthenticatedRole=None, Unauthentica
salt myminion boto_cognitoidentity.set_identity_pool_roles my_id_pool_id \
UnauthenticatedRole=my_unauth_role # this will set the unauth role and clear the auth role
- '''
+ """
conn_params = dict(region=region, key=key, keyid=keyid, profile=profile)
conn = _get_conn(**conn_params)
@@ -329,36 +369,49 @@ def set_identity_pool_roles(IdentityPoolId, AuthenticatedRole=None, Unauthentica
if AuthenticatedRole:
role_arn = _get_role_arn(AuthenticatedRole, **conn_params)
if role_arn is None:
- return {'set': False, 'error': 'invalid AuthenticatedRole {0}'.format(AuthenticatedRole)}
+ return {
+ "set": False,
+ "error": "invalid AuthenticatedRole {0}".format(AuthenticatedRole),
+ }
AuthenticatedRole = role_arn
if UnauthenticatedRole:
role_arn = _get_role_arn(UnauthenticatedRole, **conn_params)
if role_arn is None:
- return {'set': False, 'error': 'invalid UnauthenticatedRole {0}'.format(UnauthenticatedRole)}
+ return {
+ "set": False,
+ "error": "invalid UnauthenticatedRole {0}".format(
+ UnauthenticatedRole
+ ),
+ }
UnauthenticatedRole = role_arn
Roles = dict()
if AuthenticatedRole:
- Roles['authenticated'] = AuthenticatedRole
+ Roles["authenticated"] = AuthenticatedRole
if UnauthenticatedRole:
- Roles['unauthenticated'] = UnauthenticatedRole
+ Roles["unauthenticated"] = UnauthenticatedRole
conn.set_identity_pool_roles(IdentityPoolId=IdentityPoolId, Roles=Roles)
- return {'set': True, 'roles': Roles}
+ return {"set": True, "roles": Roles}
except ClientError as e:
- return {'set': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"set": False, "error": __utils__["boto3.get_error"](e)}
-def update_identity_pool(IdentityPoolId,
- IdentityPoolName=None,
- AllowUnauthenticatedIdentities=False,
- SupportedLoginProviders=None,
- DeveloperProviderName=None,
- OpenIdConnectProviderARNs=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def update_identity_pool(
+ IdentityPoolId,
+ IdentityPoolName=None,
+ AllowUnauthenticatedIdentities=False,
+ SupportedLoginProviders=None,
+ DeveloperProviderName=None,
+ OpenIdConnectProviderARNs=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Updates the given IdentityPoolId's properties. All parameters except for IdentityPoolId,
is optional. SupportedLoginProviders should be a dictionary mapping provider names to
provider app IDs. OpenIdConnectProviderARNs should be a list of OpenID Connect provider
@@ -379,44 +432,53 @@ def update_identity_pool(IdentityPoolId,
salt myminion boto_cognitoidentity.update_identity_pool my_id_pool_id my_id_pool_name \
DeveloperProviderName=custom_developer_provider
- '''
+ """
conn_params = dict(region=region, key=key, keyid=keyid, profile=profile)
- response = describe_identity_pools('', IdentityPoolId=IdentityPoolId, **conn_params)
- error = response.get('error')
+ response = describe_identity_pools("", IdentityPoolId=IdentityPoolId, **conn_params)
+ error = response.get("error")
if error is None:
- error = 'No matching pool' if response.get('identity_pools') is None else None
+ error = "No matching pool" if response.get("identity_pools") is None else None
if error:
- return {'updated': False, 'error': error}
+ return {"updated": False, "error": error}
- id_pool = response.get('identity_pools')[0]
+ id_pool = response.get("identity_pools")[0]
request_params = id_pool.copy()
# IdentityPoolName and AllowUnauthenticatedIdentities are required for the call to update_identity_pool
- if IdentityPoolName is not None and IdentityPoolName != request_params.get('IdentityPoolName'):
- request_params['IdentityPoolName'] = IdentityPoolName
+ if IdentityPoolName is not None and IdentityPoolName != request_params.get(
+ "IdentityPoolName"
+ ):
+ request_params["IdentityPoolName"] = IdentityPoolName
- if AllowUnauthenticatedIdentities != request_params.get('AllowUnauthenticatedIdentities'):
- request_params['AllowUnauthenticatedIdentities'] = AllowUnauthenticatedIdentities
+ if AllowUnauthenticatedIdentities != request_params.get(
+ "AllowUnauthenticatedIdentities"
+ ):
+ request_params[
+ "AllowUnauthenticatedIdentities"
+ ] = AllowUnauthenticatedIdentities
- current_val = request_params.pop('SupportedLoginProviders', None)
+ current_val = request_params.pop("SupportedLoginProviders", None)
if SupportedLoginProviders is not None and SupportedLoginProviders != current_val:
- request_params['SupportedLoginProviders'] = SupportedLoginProviders
+ request_params["SupportedLoginProviders"] = SupportedLoginProviders
# we can only set DeveloperProviderName one time per AWS.
- current_val = request_params.pop('DeveloperProviderName', None)
+ current_val = request_params.pop("DeveloperProviderName", None)
if current_val is None and DeveloperProviderName is not None:
- request_params['DeveloperProviderName'] = DeveloperProviderName
+ request_params["DeveloperProviderName"] = DeveloperProviderName
- current_val = request_params.pop('OpenIdConnectProviderARNs', None)
- if OpenIdConnectProviderARNs is not None and OpenIdConnectProviderARNs != current_val:
- request_params['OpenIdConnectProviderARNs'] = OpenIdConnectProviderARNs
+ current_val = request_params.pop("OpenIdConnectProviderARNs", None)
+ if (
+ OpenIdConnectProviderARNs is not None
+ and OpenIdConnectProviderARNs != current_val
+ ):
+ request_params["OpenIdConnectProviderARNs"] = OpenIdConnectProviderARNs
conn = _get_conn(**conn_params)
try:
response = conn.update_identity_pool(**request_params)
- response.pop('ResponseMetadata', None)
+ response.pop("ResponseMetadata", None)
- return {'updated': True, 'identity_pool': response}
+ return {"updated": True, "identity_pool": response}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
diff --git a/salt/modules/boto_datapipeline.py b/salt/modules/boto_datapipeline.py
index cc62541b79c..42a421c3836 100644
--- a/salt/modules/boto_datapipeline.py
+++ b/salt/modules/boto_datapipeline.py
@@ -1,41 +1,44 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon Data Pipeline
.. versionadded:: 2016.3.0
:depends: boto3
-'''
+"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
+import salt.utils.versions
+
# Import Salt libs
from salt.ext import six
-import salt.utils.versions
log = logging.getLogger(__name__)
try:
import boto3
import botocore.exceptions
+
boto3.set_stream_logger(level=logging.CRITICAL)
- logging.getLogger('botocore').setLevel(logging.CRITICAL)
+ logging.getLogger("botocore").setLevel(logging.CRITICAL)
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def __virtual__():
- '''
+ """
Only load if boto3 libraries exists.
- '''
+ """
return salt.utils.versions.check_boto_reqs(check_boto=False)
def activate_pipeline(pipeline_id, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Start processing pipeline tasks. This function is idempotent.
CLI example:
@@ -43,20 +46,21 @@ def activate_pipeline(pipeline_id, region=None, key=None, keyid=None, profile=No
.. code-block:: bash
salt myminion boto_datapipeline.activate_pipeline my_pipeline_id
- '''
+ """
client = _get_client(region, key, keyid, profile)
r = {}
try:
client.activate_pipeline(pipelineId=pipeline_id)
- r['result'] = True
+ r["result"] = True
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- r['error'] = six.text_type(e)
+ r["error"] = six.text_type(e)
return r
-def create_pipeline(name, unique_id, description='', region=None, key=None, keyid=None,
- profile=None):
- '''
+def create_pipeline(
+ name, unique_id, description="", region=None, key=None, keyid=None, profile=None
+):
+ """
Create a new, empty pipeline. This function is idempotent.
CLI example:
@@ -64,23 +68,21 @@ def create_pipeline(name, unique_id, description='', region=None, key=None, keyi
.. code-block:: bash
salt myminion boto_datapipeline.create_pipeline my_name my_unique_id
- '''
+ """
client = _get_client(region, key, keyid, profile)
r = {}
try:
response = client.create_pipeline(
- name=name,
- uniqueId=unique_id,
- description=description,
+ name=name, uniqueId=unique_id, description=description,
)
- r['result'] = response['pipelineId']
+ r["result"] = response["pipelineId"]
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- r['error'] = six.text_type(e)
+ r["error"] = six.text_type(e)
return r
def delete_pipeline(pipeline_id, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete a pipeline, its pipeline definition, and its run history. This function is idempotent.
CLI example:
@@ -88,19 +90,19 @@ def delete_pipeline(pipeline_id, region=None, key=None, keyid=None, profile=None
.. code-block:: bash
salt myminion boto_datapipeline.delete_pipeline my_pipeline_id
- '''
+ """
client = _get_client(region, key, keyid, profile)
r = {}
try:
client.delete_pipeline(pipelineId=pipeline_id)
- r['result'] = True
+ r["result"] = True
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- r['error'] = six.text_type(e)
+ r["error"] = six.text_type(e)
return r
def describe_pipelines(pipeline_ids, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Retrieve metadata about one or more pipelines.
CLI example:
@@ -108,19 +110,20 @@ def describe_pipelines(pipeline_ids, region=None, key=None, keyid=None, profile=
.. code-block:: bash
salt myminion boto_datapipeline.describe_pipelines ['my_pipeline_id']
- '''
+ """
client = _get_client(region, key, keyid, profile)
r = {}
try:
- r['result'] = client.describe_pipelines(pipelineIds=pipeline_ids)
+ r["result"] = client.describe_pipelines(pipelineIds=pipeline_ids)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- r['error'] = six.text_type(e)
+ r["error"] = six.text_type(e)
return r
-def get_pipeline_definition(pipeline_id, version='latest', region=None, key=None, keyid=None,
- profile=None):
- '''
+def get_pipeline_definition(
+ pipeline_id, version="latest", region=None, key=None, keyid=None, profile=None
+):
+ """
Get the definition of the specified pipeline.
CLI example:
@@ -128,21 +131,20 @@ def get_pipeline_definition(pipeline_id, version='latest', region=None, key=None
.. code-block:: bash
salt myminion boto_datapipeline.get_pipeline_definition my_pipeline_id
- '''
+ """
client = _get_client(region, key, keyid, profile)
r = {}
try:
- r['result'] = client.get_pipeline_definition(
- pipelineId=pipeline_id,
- version=version,
+ r["result"] = client.get_pipeline_definition(
+ pipelineId=pipeline_id, version=version,
)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- r['error'] = six.text_type(e)
+ r["error"] = six.text_type(e)
return r
def list_pipelines(region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get a list of pipeline ids and names for all pipelines.
CLI Example:
@@ -150,22 +152,22 @@ def list_pipelines(region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_datapipeline.list_pipelines profile=myprofile
- '''
+ """
client = _get_client(region, key, keyid, profile)
r = {}
try:
- paginator = client.get_paginator('list_pipelines')
+ paginator = client.get_paginator("list_pipelines")
pipelines = []
for page in paginator.paginate():
- pipelines += page['pipelineIdList']
- r['result'] = pipelines
+ pipelines += page["pipelineIdList"]
+ r["result"] = pipelines
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- r['error'] = six.text_type(e)
+ r["error"] = six.text_type(e)
return r
def pipeline_id_from_name(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get the pipeline id, if it exists, for the given name.
CLI example:
@@ -173,23 +175,31 @@ def pipeline_id_from_name(name, region=None, key=None, keyid=None, profile=None)
.. code-block:: bash
salt myminion boto_datapipeline.pipeline_id_from_name my_pipeline_name
- '''
+ """
r = {}
result_pipelines = list_pipelines()
- if 'error' in result_pipelines:
+ if "error" in result_pipelines:
return result_pipelines
- for pipeline in result_pipelines['result']:
- if pipeline['name'] == name:
- r['result'] = pipeline['id']
+ for pipeline in result_pipelines["result"]:
+ if pipeline["name"] == name:
+ r["result"] = pipeline["id"]
return r
- r['error'] = 'No pipeline found with name={0}'.format(name)
+ r["error"] = "No pipeline found with name={0}".format(name)
return r
-def put_pipeline_definition(pipeline_id, pipeline_objects, parameter_objects=None,
- parameter_values=None, region=None, key=None, keyid=None, profile=None):
- '''
+def put_pipeline_definition(
+ pipeline_id,
+ pipeline_objects,
+ parameter_objects=None,
+ parameter_values=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Add tasks, schedules, and preconditions to the specified pipeline. This function is
idempotent and will replace an existing definition.
@@ -198,7 +208,7 @@ def put_pipeline_definition(pipeline_id, pipeline_objects, parameter_objects=Non
.. code-block:: bash
salt myminion boto_datapipeline.put_pipeline_definition my_pipeline_id my_pipeline_objects
- '''
+ """
parameter_objects = parameter_objects or []
parameter_values = parameter_values or []
client = _get_client(region, key, keyid, profile)
@@ -210,48 +220,46 @@ def put_pipeline_definition(pipeline_id, pipeline_objects, parameter_objects=Non
parameterObjects=parameter_objects,
parameterValues=parameter_values,
)
- if response['errored']:
- r['error'] = response['validationErrors']
+ if response["errored"]:
+ r["error"] = response["validationErrors"]
else:
- r['result'] = response
+ r["result"] = response
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- r['error'] = six.text_type(e)
+ r["error"] = six.text_type(e)
return r
def _get_client(region, key, keyid, profile):
- '''
+ """
Get a boto connection to Data Pipeline.
- '''
+ """
session = _get_session(region, key, keyid, profile)
if not session:
log.error("Failed to get datapipeline client.")
return None
- return session.client('datapipeline')
+ return session.client("datapipeline")
def _get_session(region, key, keyid, profile):
- '''
+ """
Get a boto3 session
- '''
+ """
if profile:
if isinstance(profile, six.string_types):
- _profile = __salt__['config.option'](profile)
+ _profile = __salt__["config.option"](profile)
elif isinstance(profile, dict):
_profile = profile
- key = _profile.get('key', None)
- keyid = _profile.get('keyid', None)
- region = _profile.get('region', None)
+ key = _profile.get("key", None)
+ keyid = _profile.get("keyid", None)
+ region = _profile.get("region", None)
- if not region and __salt__['config.option']('datapipeline.region'):
- region = __salt__['config.option']('datapipeline.region')
+ if not region and __salt__["config.option"]("datapipeline.region"):
+ region = __salt__["config.option"]("datapipeline.region")
if not region:
- region = 'us-east-1'
+ region = "us-east-1"
return boto3.session.Session(
- region_name=region,
- aws_secret_access_key=key,
- aws_access_key_id=keyid,
+ region_name=region, aws_secret_access_key=key, aws_access_key_id=keyid,
)
diff --git a/salt/modules/boto_dynamodb.py b/salt/modules/boto_dynamodb.py
index ad1c0256e46..a2fd2251f16 100644
--- a/salt/modules/boto_dynamodb.py
+++ b/salt/modules/boto_dynamodb.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon DynamoDB
.. versionadded:: 2015.5.0
@@ -40,54 +40,74 @@ Connection module for Amazon DynamoDB
region: us-east-1
:depends: boto
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
import time
-logger = logging.getLogger(__name__)
-logging.getLogger('boto').setLevel(logging.INFO)
+import salt.utils.versions
+from salt.exceptions import SaltInvocationError
# Import third party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
-from salt.exceptions import SaltInvocationError
-import salt.utils.versions
+
+logger = logging.getLogger(__name__)
+logging.getLogger("boto").setLevel(logging.INFO)
+
try:
- #pylint: disable=unused-import
+ # pylint: disable=unused-import
import boto
import boto.dynamodb2
- #pylint: enable=unused-import
+
+ # pylint: enable=unused-import
from boto.dynamodb2.fields import HashKey, RangeKey
- from boto.dynamodb2.fields import AllIndex, GlobalAllIndex, GlobalIncludeIndex, GlobalKeysOnlyIndex
+ from boto.dynamodb2.fields import (
+ AllIndex,
+ GlobalAllIndex,
+ GlobalIncludeIndex,
+ GlobalKeysOnlyIndex,
+ )
from boto.dynamodb2.table import Table
from boto.exception import JSONResponseError
+
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def __virtual__():
- '''
+ """
Only load if boto libraries exist.
- '''
+ """
has_boto_reqs = salt.utils.versions.check_boto_reqs(check_boto3=False)
if has_boto_reqs is True:
- __utils__['boto.assign_funcs'](__name__, 'dynamodb2', pack=__salt__)
+ __utils__["boto.assign_funcs"](__name__, "dynamodb2", pack=__salt__)
return has_boto_reqs
-def create_table(table_name, region=None, key=None, keyid=None, profile=None,
- read_capacity_units=None, write_capacity_units=None,
- hash_key=None, hash_key_data_type=None, range_key=None,
- range_key_data_type=None, local_indexes=None,
- global_indexes=None):
- '''
+def create_table(
+ table_name,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ read_capacity_units=None,
+ write_capacity_units=None,
+ hash_key=None,
+ hash_key_data_type=None,
+ range_key=None,
+ range_key_data_type=None,
+ local_indexes=None,
+ global_indexes=None,
+):
+ """
Creates a DynamoDB table.
CLI Example:
@@ -102,10 +122,10 @@ def create_table(table_name, region=None, key=None, keyid=None, profile=None,
range_key_data_type=N /
read_capacity_units=1 /
write_capacity_units=1
- '''
+ """
schema = []
primary_index_fields = []
- primary_index_name = ''
+ primary_index_name = ""
if hash_key:
hash_key_obj = HashKey(hash_key, data_type=hash_key_data_type)
schema.append(hash_key_obj)
@@ -115,13 +135,10 @@ def create_table(table_name, region=None, key=None, keyid=None, profile=None,
range_key_obj = RangeKey(range_key, data_type=range_key_data_type)
schema.append(range_key_obj)
primary_index_fields.append(range_key_obj)
- primary_index_name += '_'
+ primary_index_name += "_"
primary_index_name += range_key
- primary_index_name += '_index'
- throughput = {
- 'read': read_capacity_units,
- 'write': write_capacity_units
- }
+ primary_index_name += "_index"
+ throughput = {"read": read_capacity_units, "write": write_capacity_units}
local_table_indexes = []
if local_indexes:
for index in local_indexes:
@@ -129,9 +146,7 @@ def create_table(table_name, region=None, key=None, keyid=None, profile=None,
global_table_indexes = []
if global_indexes:
for index in global_indexes:
- global_table_indexes.append(
- extract_index(index, global_index=True)
- )
+ global_table_indexes.append(extract_index(index, global_index=True))
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
@@ -141,28 +156,22 @@ def create_table(table_name, region=None, key=None, keyid=None, profile=None,
throughput=throughput,
indexes=local_table_indexes,
global_indexes=global_table_indexes,
- connection=conn
+ connection=conn,
)
# Table creation can take several seconds to propagate.
# We will check MAX_ATTEMPTS times.
MAX_ATTEMPTS = 30
for i in range(MAX_ATTEMPTS):
- if exists(
- table_name,
- region,
- key,
- keyid,
- profile
- ):
+ if exists(table_name, region, key, keyid, profile):
return True
else:
- time.sleep(1) # sleep for one second and try again
+ time.sleep(1) # sleep for one second and try again
return False
def exists(table_name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if a table exists.
CLI Example:
@@ -170,12 +179,12 @@ def exists(table_name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_dynamodb.exists table_name region=us-east-1
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.describe_table(table_name)
except JSONResponseError as e:
- if e.error_code == 'ResourceNotFoundException':
+ if e.error_code == "ResourceNotFoundException":
return False
raise
@@ -183,7 +192,7 @@ def exists(table_name, region=None, key=None, keyid=None, profile=None):
def delete(table_name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete a DynamoDB table.
CLI Example:
@@ -191,7 +200,7 @@ def delete(table_name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_dynamodb.delete table_name region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
table = Table(table_name, connection=conn)
@@ -204,27 +213,35 @@ def delete(table_name, region=None, key=None, keyid=None, profile=None):
if not exists(table_name, region, key, keyid, profile):
return True
else:
- time.sleep(1) # sleep for one second and try again
+ time.sleep(1) # sleep for one second and try again
return False
-def update(table_name, throughput=None, global_indexes=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def update(
+ table_name,
+ throughput=None,
+ global_indexes=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Update a DynamoDB table.
CLI example::
salt myminion boto_dynamodb.update table_name region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
table = Table(table_name, connection=conn)
return table.update(throughput=throughput, global_indexes=global_indexes)
-def create_global_secondary_index(table_name, global_index, region=None,
- key=None, keyid=None, profile=None):
- '''
+def create_global_secondary_index(
+ table_name, global_index, region=None, key=None, keyid=None, profile=None
+):
+ """
Creates a single global secondary index on a DynamoDB table.
CLI Example:
@@ -232,15 +249,16 @@ def create_global_secondary_index(table_name, global_index, region=None,
salt myminion boto_dynamodb.create_global_secondary_index table_name /
index_name
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
table = Table(table_name, connection=conn)
return table.create_global_secondary_index(global_index)
-def update_global_secondary_index(table_name, global_indexes, region=None,
- key=None, keyid=None, profile=None):
- '''
+def update_global_secondary_index(
+ table_name, global_indexes, region=None, key=None, keyid=None, profile=None
+):
+ """
Updates the throughput of the given global secondary indexes.
CLI Example:
@@ -248,106 +266,104 @@ def update_global_secondary_index(table_name, global_indexes, region=None,
salt myminion boto_dynamodb.update_global_secondary_index table_name /
indexes
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
table = Table(table_name, connection=conn)
return table.update_global_secondary_index(global_indexes)
def describe(table_name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Describe a DynamoDB table.
CLI example::
salt myminion boto_dynamodb.describe table_name region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
table = Table(table_name, connection=conn)
return table.describe()
def extract_index(index_data, global_index=False):
- '''
+ """
Instantiates and returns an AllIndex object given a valid index
configuration
CLI Example:
salt myminion boto_dynamodb.extract_index index
- '''
+ """
parsed_data = {}
keys = []
for key, value in six.iteritems(index_data):
for item in value:
for field, data in six.iteritems(item):
- if field == 'hash_key':
- parsed_data['hash_key'] = data
- elif field == 'hash_key_data_type':
- parsed_data['hash_key_data_type'] = data
- elif field == 'range_key':
- parsed_data['range_key'] = data
- elif field == 'range_key_data_type':
- parsed_data['range_key_data_type'] = data
- elif field == 'name':
- parsed_data['name'] = data
- elif field == 'read_capacity_units':
- parsed_data['read_capacity_units'] = data
- elif field == 'write_capacity_units':
- parsed_data['write_capacity_units'] = data
- elif field == 'includes':
- parsed_data['includes'] = data
- elif field == 'keys_only':
- parsed_data['keys_only'] = True
+ if field == "hash_key":
+ parsed_data["hash_key"] = data
+ elif field == "hash_key_data_type":
+ parsed_data["hash_key_data_type"] = data
+ elif field == "range_key":
+ parsed_data["range_key"] = data
+ elif field == "range_key_data_type":
+ parsed_data["range_key_data_type"] = data
+ elif field == "name":
+ parsed_data["name"] = data
+ elif field == "read_capacity_units":
+ parsed_data["read_capacity_units"] = data
+ elif field == "write_capacity_units":
+ parsed_data["write_capacity_units"] = data
+ elif field == "includes":
+ parsed_data["includes"] = data
+ elif field == "keys_only":
+ parsed_data["keys_only"] = True
- if parsed_data['hash_key']:
+ if parsed_data["hash_key"]:
keys.append(
HashKey(
- parsed_data['hash_key'],
- data_type=parsed_data['hash_key_data_type']
+ parsed_data["hash_key"], data_type=parsed_data["hash_key_data_type"]
)
)
- if parsed_data.get('range_key'):
+ if parsed_data.get("range_key"):
keys.append(
RangeKey(
- parsed_data['range_key'],
- data_type=parsed_data['range_key_data_type']
+ parsed_data["range_key"], data_type=parsed_data["range_key_data_type"]
)
)
if (
- global_index and
- parsed_data['read_capacity_units'] and
- parsed_data['write_capacity_units']):
- parsed_data['throughput'] = {
- 'read': parsed_data['read_capacity_units'],
- 'write': parsed_data['write_capacity_units']
+ global_index
+ and parsed_data["read_capacity_units"]
+ and parsed_data["write_capacity_units"]
+ ):
+ parsed_data["throughput"] = {
+ "read": parsed_data["read_capacity_units"],
+ "write": parsed_data["write_capacity_units"],
}
- if parsed_data['name'] and len(keys) > 0:
+ if parsed_data["name"] and len(keys) > 0:
if global_index:
- if parsed_data.get('keys_only') and parsed_data.get('includes'):
- raise SaltInvocationError('Only one type of GSI projection can be used.')
-
- if parsed_data.get('includes'):
- return GlobalIncludeIndex(
- parsed_data['name'],
- parts=keys,
- throughput=parsed_data['throughput'],
- includes=parsed_data['includes']
+ if parsed_data.get("keys_only") and parsed_data.get("includes"):
+ raise SaltInvocationError(
+ "Only one type of GSI projection can be used."
)
- elif parsed_data.get('keys_only'):
- return GlobalKeysOnlyIndex(
- parsed_data['name'],
+
+ if parsed_data.get("includes"):
+ return GlobalIncludeIndex(
+ parsed_data["name"],
parts=keys,
- throughput=parsed_data['throughput'],
+ throughput=parsed_data["throughput"],
+ includes=parsed_data["includes"],
+ )
+ elif parsed_data.get("keys_only"):
+ return GlobalKeysOnlyIndex(
+ parsed_data["name"],
+ parts=keys,
+ throughput=parsed_data["throughput"],
)
else:
return GlobalAllIndex(
- parsed_data['name'],
+ parsed_data["name"],
parts=keys,
- throughput=parsed_data['throughput']
+ throughput=parsed_data["throughput"],
)
else:
- return AllIndex(
- parsed_data['name'],
- parts=keys
- )
+ return AllIndex(parsed_data["name"], parts=keys)
diff --git a/salt/modules/boto_ec2.py b/salt/modules/boto_ec2.py
index 27566ffb8b0..30081f3448d 100644
--- a/salt/modules/boto_ec2.py
+++ b/salt/modules/boto_ec2.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon EC2
.. versionadded:: 2015.8.0
@@ -39,12 +39,13 @@ as a passed in dict, or as a string to pull from pillars or minion config:
:depends: boto
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
import time
@@ -53,18 +54,23 @@ import salt.utils.compat
import salt.utils.data
import salt.utils.json
import salt.utils.versions
+from salt.exceptions import CommandExecutionError, SaltInvocationError
from salt.ext import six
from salt.ext.six.moves import map
-from salt.exceptions import SaltInvocationError, CommandExecutionError
# Import third party libs
try:
# pylint: disable=unused-import
import boto
import boto.ec2
+
# pylint: enable=unused-import
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
- from boto.ec2.networkinterface import NetworkInterfaceSpecification, NetworkInterfaceCollection
+ from boto.ec2.networkinterface import (
+ NetworkInterfaceSpecification,
+ NetworkInterfaceCollection,
+ )
+
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@@ -74,31 +80,31 @@ log = logging.getLogger(__name__)
def __virtual__():
- '''
+ """
Only load if boto libraries exist and if boto libraries are greater than
a given version.
- '''
+ """
# the boto_ec2 execution module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
has_boto_reqs = salt.utils.versions.check_boto_reqs(
- boto_ver='2.8.0',
- check_boto3=False
+ boto_ver="2.8.0", check_boto3=False
)
if has_boto_reqs is True:
- __utils__['boto.assign_funcs'](__name__, 'ec2', pack=__salt__)
+ __utils__["boto.assign_funcs"](__name__, "ec2", pack=__salt__)
return has_boto_reqs
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
- __utils__['boto.assign_funcs'](__name__, 'ec2')
+ __utils__["boto.assign_funcs"](__name__, "ec2")
-def _get_all_eip_addresses(addresses=None, allocation_ids=None, region=None,
- key=None, keyid=None, profile=None):
- '''
+def _get_all_eip_addresses(
+ addresses=None, allocation_ids=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Get all EIP's associated with the current credentials.
addresses
@@ -110,19 +116,22 @@ def _get_all_eip_addresses(addresses=None, allocation_ids=None, region=None,
returns
(list) - The requested Addresses as a list of :class:`boto.ec2.address.Address`
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- return conn.get_all_addresses(addresses=addresses, allocation_ids=allocation_ids)
+ return conn.get_all_addresses(
+ addresses=addresses, allocation_ids=allocation_ids
+ )
except boto.exception.BotoServerError as e:
log.error(e)
return []
-def get_all_eip_addresses(addresses=None, allocation_ids=None, region=None,
- key=None, keyid=None, profile=None):
- '''
+def get_all_eip_addresses(
+ addresses=None, allocation_ids=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Get public addresses of some, or all EIPs associated with the current account.
addresses
@@ -142,14 +151,19 @@ def get_all_eip_addresses(addresses=None, allocation_ids=None, region=None,
salt-call boto_ec2.get_all_eip_addresses
.. versionadded:: 2016.3.0
- '''
- return [x.public_ip for x in _get_all_eip_addresses(addresses, allocation_ids, region,
- key, keyid, profile)]
+ """
+ return [
+ x.public_ip
+ for x in _get_all_eip_addresses(
+ addresses, allocation_ids, region, key, keyid, profile
+ )
+ ]
-def get_unassociated_eip_address(domain='standard', region=None, key=None,
- keyid=None, profile=None):
- '''
+def get_unassociated_eip_address(
+ domain="standard", region=None, key=None, keyid=None, profile=None
+):
+ """
Return the first unassociated EIP
domain
@@ -163,40 +177,49 @@ def get_unassociated_eip_address(domain='standard', region=None, key=None,
salt-call boto_ec2.get_unassociated_eip_address
.. versionadded:: 2016.3.0
- '''
+ """
eip = None
- for address in get_all_eip_addresses(region=region, key=key, keyid=keyid,
- profile=profile):
- address_info = get_eip_address_info(addresses=address, region=region,
- key=key, keyid=keyid,
- profile=profile)[0]
- if address_info['instance_id']:
- log.debug('%s is already associated with the instance %s',
- address, address_info['instance_id'])
+ for address in get_all_eip_addresses(
+ region=region, key=key, keyid=keyid, profile=profile
+ ):
+ address_info = get_eip_address_info(
+ addresses=address, region=region, key=key, keyid=keyid, profile=profile
+ )[0]
+ if address_info["instance_id"]:
+ log.debug(
+ "%s is already associated with the instance %s",
+ address,
+ address_info["instance_id"],
+ )
continue
- if address_info['network_interface_id']:
- log.debug('%s is already associated with the network interface %s',
- address, address_info['network_interface_id'])
+ if address_info["network_interface_id"]:
+ log.debug(
+ "%s is already associated with the network interface %s",
+ address,
+ address_info["network_interface_id"],
+ )
continue
- if address_info['domain'] == domain:
+ if address_info["domain"] == domain:
log.debug(
"The first unassociated EIP address in the domain '%s' is %s",
- domain, address
+ domain,
+ address,
)
eip = address
break
if not eip:
- log.debug('No unassociated Elastic IP found!')
+ log.debug("No unassociated Elastic IP found!")
return eip
-def get_eip_address_info(addresses=None, allocation_ids=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def get_eip_address_info(
+ addresses=None, allocation_ids=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Get 'interesting' info about some, or all EIPs associated with the current account.
addresses
@@ -216,24 +239,37 @@ def get_eip_address_info(addresses=None, allocation_ids=None, region=None, key=N
salt-call boto_ec2.get_eip_address_info addresses=52.4.2.15
.. versionadded:: 2016.3.0
- '''
- if type(addresses) == (type('string')):
+ """
+ if type(addresses) == (type("string")):
addresses = [addresses]
- if type(allocation_ids) == (type('string')):
+ if type(allocation_ids) == (type("string")):
allocation_ids = [allocation_ids]
- ret = _get_all_eip_addresses(addresses=addresses, allocation_ids=allocation_ids,
- region=region, key=key, keyid=keyid, profile=profile)
+ ret = _get_all_eip_addresses(
+ addresses=addresses,
+ allocation_ids=allocation_ids,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
- interesting = ['allocation_id', 'association_id', 'domain', 'instance_id',
- 'network_interface_id', 'network_interface_owner_id', 'public_ip',
- 'private_ip_address']
+ interesting = [
+ "allocation_id",
+ "association_id",
+ "domain",
+ "instance_id",
+ "network_interface_id",
+ "network_interface_owner_id",
+ "public_ip",
+ "private_ip_address",
+ ]
return [dict([(x, getattr(address, x)) for x in interesting]) for address in ret]
def allocate_eip_address(domain=None, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Allocate a new Elastic IP address and associate it with your account.
domain
@@ -253,9 +289,11 @@ def allocate_eip_address(domain=None, region=None, key=None, keyid=None, profile
salt-call boto_ec2.allocate_eip_address domain=vpc
.. versionadded:: 2016.3.0
- '''
- if domain and domain != 'vpc':
- raise SaltInvocationError('The only permitted value for the \'domain\' param is \'vpc\'.')
+ """
+ if domain and domain != "vpc":
+ raise SaltInvocationError(
+ "The only permitted value for the 'domain' param is 'vpc'."
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
@@ -265,16 +303,24 @@ def allocate_eip_address(domain=None, region=None, key=None, keyid=None, profile
log.error(e)
return False
- interesting = ['allocation_id', 'association_id', 'domain', 'instance_id',
- 'network_interface_id', 'network_interface_owner_id', 'public_ip',
- 'private_ip_address']
+ interesting = [
+ "allocation_id",
+ "association_id",
+ "domain",
+ "instance_id",
+ "network_interface_id",
+ "network_interface_owner_id",
+ "public_ip",
+ "private_ip_address",
+ ]
return dict([(x, getattr(address, x)) for x in interesting])
-def release_eip_address(public_ip=None, allocation_id=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def release_eip_address(
+ public_ip=None, allocation_id=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Free an Elastic IP address. Pass either a public IP address to release an
EC2 Classic EIP, or an AllocationId to release a VPC EIP.
@@ -293,10 +339,11 @@ def release_eip_address(public_ip=None, allocation_id=None, region=None, key=Non
salt myminion boto_ec2.release_eip_address allocation_id=eipalloc-ef382c8a
.. versionadded:: 2016.3.0
- '''
+ """
if not salt.utils.data.exactly_one((public_ip, allocation_id)):
- raise SaltInvocationError("Exactly one of 'public_ip' OR "
- "'allocation_id' must be provided")
+ raise SaltInvocationError(
+ "Exactly one of 'public_ip' OR " "'allocation_id' must be provided"
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
@@ -307,12 +354,21 @@ def release_eip_address(public_ip=None, allocation_id=None, region=None, key=Non
return False
-def associate_eip_address(instance_id=None, instance_name=None, public_ip=None,
- allocation_id=None, network_interface_id=None,
- network_interface_name=None, private_ip_address=None,
- allow_reassociation=False, region=None, key=None,
- keyid=None, profile=None):
- '''
+def associate_eip_address(
+ instance_id=None,
+ instance_name=None,
+ public_ip=None,
+ allocation_id=None,
+ network_interface_id=None,
+ network_interface_name=None,
+ private_ip_address=None,
+ allow_reassociation=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Associate an Elastic IP address with a currently running instance or a network interface.
This requires exactly one of either 'public_ip' or 'allocation_id', depending
on whether you’re associating a VPC address or a plain EC2 address.
@@ -344,58 +400,71 @@ def associate_eip_address(instance_id=None, instance_name=None, public_ip=None,
salt myminion boto_ec2.associate_eip_address instance_name=bubba.ho.tep allocation_id=eipalloc-ef382c8a
.. versionadded:: 2016.3.0
- '''
- if not salt.utils.data.exactly_one((instance_id, instance_name,
- network_interface_id,
- network_interface_name)):
- raise SaltInvocationError("Exactly one of 'instance_id', "
- "'instance_name', 'network_interface_id', "
- "'network_interface_name' must be provided")
+ """
+ if not salt.utils.data.exactly_one(
+ (instance_id, instance_name, network_interface_id, network_interface_name)
+ ):
+ raise SaltInvocationError(
+ "Exactly one of 'instance_id', "
+ "'instance_name', 'network_interface_id', "
+ "'network_interface_name' must be provided"
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if instance_name:
try:
- instance_id = get_id(name=instance_name, region=region, key=key,
- keyid=keyid, profile=profile)
+ instance_id = get_id(
+ name=instance_name, region=region, key=key, keyid=keyid, profile=profile
+ )
except boto.exception.BotoServerError as e:
log.error(e)
return False
if not instance_id:
log.error(
"Given instance_name '%s' cannot be mapped to an instance_id",
- instance_name
+ instance_name,
)
return False
if network_interface_name:
try:
network_interface_id = get_network_interface_id(
- network_interface_name, region=region, key=key, keyid=keyid,
- profile=profile)
+ network_interface_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
except boto.exception.BotoServerError as e:
log.error(e)
return False
if not network_interface_id:
- log.error("Given network_interface_name '%s' cannot be mapped to "
- "an network_interface_id", network_interface_name)
+ log.error(
+ "Given network_interface_name '%s' cannot be mapped to "
+ "an network_interface_id",
+ network_interface_name,
+ )
return False
try:
- return conn.associate_address(instance_id=instance_id,
- public_ip=public_ip,
- allocation_id=allocation_id,
- network_interface_id=network_interface_id,
- private_ip_address=private_ip_address,
- allow_reassociation=allow_reassociation)
+ return conn.associate_address(
+ instance_id=instance_id,
+ public_ip=public_ip,
+ allocation_id=allocation_id,
+ network_interface_id=network_interface_id,
+ private_ip_address=private_ip_address,
+ allow_reassociation=allow_reassociation,
+ )
except boto.exception.BotoServerError as e:
log.error(e)
return False
-def disassociate_eip_address(public_ip=None, association_id=None, region=None,
- key=None, keyid=None, profile=None):
- '''
+def disassociate_eip_address(
+ public_ip=None, association_id=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Disassociate an Elastic IP address from a currently running instance. This
requires exactly one of either 'association_id' or 'public_ip', depending
on whether you’re dealing with a VPC or EC2 Classic address.
@@ -415,7 +484,7 @@ def disassociate_eip_address(public_ip=None, association_id=None, region=None,
salt myminion boto_ec2.disassociate_eip_address association_id=eipassoc-e3ba2d16
.. versionadded:: 2016.3.0
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -425,11 +494,18 @@ def disassociate_eip_address(public_ip=None, association_id=None, region=None,
return False
-def assign_private_ip_addresses(network_interface_name=None, network_interface_id=None,
- private_ip_addresses=None, secondary_private_ip_address_count=None,
- allow_reassignment=False, region=None, key=None,
- keyid=None, profile=None):
- '''
+def assign_private_ip_addresses(
+ network_interface_name=None,
+ network_interface_id=None,
+ private_ip_addresses=None,
+ secondary_private_ip_address_count=None,
+ allow_reassignment=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Assigns one or more secondary private IP addresses to a network interface.
network_interface_id
@@ -454,41 +530,57 @@ def assign_private_ip_addresses(network_interface_name=None, network_interface_i
salt myminion boto_ec2.assign_private_ip_addresses network_interface_name=my_eni secondary_private_ip_address_count=2
.. versionadded:: 2017.7.0
- '''
- if not salt.utils.data.exactly_one((network_interface_name,
- network_interface_id)):
- raise SaltInvocationError("Exactly one of 'network_interface_name', "
- "'network_interface_id' must be provided")
+ """
+ if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)):
+ raise SaltInvocationError(
+ "Exactly one of 'network_interface_name', "
+ "'network_interface_id' must be provided"
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if network_interface_name:
try:
network_interface_id = get_network_interface_id(
- network_interface_name, region=region, key=key, keyid=keyid,
- profile=profile)
+ network_interface_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
except boto.exception.BotoServerError as e:
log.error(e)
return False
if not network_interface_id:
- log.error("Given network_interface_name '%s' cannot be mapped to "
- "an network_interface_id", network_interface_name)
+ log.error(
+ "Given network_interface_name '%s' cannot be mapped to "
+ "an network_interface_id",
+ network_interface_name,
+ )
return False
try:
- return conn.assign_private_ip_addresses(network_interface_id=network_interface_id,
- private_ip_addresses=private_ip_addresses,
- secondary_private_ip_address_count=secondary_private_ip_address_count,
- allow_reassignment=allow_reassignment)
+ return conn.assign_private_ip_addresses(
+ network_interface_id=network_interface_id,
+ private_ip_addresses=private_ip_addresses,
+ secondary_private_ip_address_count=secondary_private_ip_address_count,
+ allow_reassignment=allow_reassignment,
+ )
except boto.exception.BotoServerError as e:
log.error(e)
return False
-def unassign_private_ip_addresses(network_interface_name=None, network_interface_id=None,
- private_ip_addresses=None, region=None,
- key=None, keyid=None, profile=None):
- '''
+def unassign_private_ip_addresses(
+ network_interface_name=None,
+ network_interface_id=None,
+ private_ip_addresses=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Unassigns one or more secondary private IP addresses from a network interface
network_interface_id
@@ -508,37 +600,47 @@ def unassign_private_ip_addresses(network_interface_name=None, network_interface
salt myminion boto_ec2.unassign_private_ip_addresses network_interface_name=my_eni private_ip_addresses=private_ip
.. versionadded:: 2017.7.0
- '''
- if not salt.utils.data.exactly_one((network_interface_name,
- network_interface_id)):
- raise SaltInvocationError("Exactly one of 'network_interface_name', "
- "'network_interface_id' must be provided")
+ """
+ if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)):
+ raise SaltInvocationError(
+ "Exactly one of 'network_interface_name', "
+ "'network_interface_id' must be provided"
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if network_interface_name:
try:
network_interface_id = get_network_interface_id(
- network_interface_name, region=region, key=key, keyid=keyid,
- profile=profile)
+ network_interface_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
except boto.exception.BotoServerError as e:
log.error(e)
return False
if not network_interface_id:
- log.error("Given network_interface_name '%s' cannot be mapped to "
- "an network_interface_id", network_interface_name)
+ log.error(
+ "Given network_interface_name '%s' cannot be mapped to "
+ "an network_interface_id",
+ network_interface_name,
+ )
return False
try:
- return conn.unassign_private_ip_addresses(network_interface_id=network_interface_id,
- private_ip_addresses=private_ip_addresses)
+ return conn.unassign_private_ip_addresses(
+ network_interface_id=network_interface_id,
+ private_ip_addresses=private_ip_addresses,
+ )
except boto.exception.BotoServerError as e:
log.error(e)
return False
def get_zones(region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get a list of AZs for the configured region.
CLI Example:
@@ -546,17 +648,26 @@ def get_zones(region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_ec2.get_zones
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
return [z.name for z in conn.get_all_zones()]
-def find_instances(instance_id=None, name=None, tags=None, region=None,
- key=None, keyid=None, profile=None, return_objs=False,
- in_states=None, filters=None):
+def find_instances(
+ instance_id=None,
+ name=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ return_objs=False,
+ in_states=None,
+ filters=None,
+):
- '''
+ """
Given instance properties, find and return matching instance ids
CLI Examples:
@@ -568,35 +679,38 @@ def find_instances(instance_id=None, name=None, tags=None, region=None,
salt myminion boto_ec2.find_instances tags='{"mytag": "value"}'
salt myminion boto_ec2.find_instances filters='{"vpc-id": "vpc-12345678"}'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- filter_parameters = {'filters': {}}
+ filter_parameters = {"filters": {}}
if instance_id:
- filter_parameters['instance_ids'] = [instance_id]
+ filter_parameters["instance_ids"] = [instance_id]
if name:
- filter_parameters['filters']['tag:Name'] = name
+ filter_parameters["filters"]["tag:Name"] = name
if tags:
for tag_name, tag_value in six.iteritems(tags):
- filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
+ filter_parameters["filters"]["tag:{0}".format(tag_name)] = tag_value
if filters:
- filter_parameters['filters'].update(filters)
+ filter_parameters["filters"].update(filters)
reservations = conn.get_all_reservations(**filter_parameters)
instances = [i for r in reservations for i in r.instances]
- log.debug('The filters criteria %s matched the following '
- 'instances:%s', filter_parameters, instances)
+ log.debug(
+ "The filters criteria %s matched the following " "instances:%s",
+ filter_parameters,
+ instances,
+ )
if in_states:
instances = [i for i in instances if i.state in in_states]
log.debug(
- 'Limiting instance matches to those in the requested states: %s',
- instances
+ "Limiting instance matches to those in the requested states: %s",
+ instances,
)
if instances:
if return_objs:
@@ -609,10 +723,21 @@ def find_instances(instance_id=None, name=None, tags=None, region=None,
return []
-def create_image(ami_name, instance_id=None, instance_name=None, tags=None, region=None,
- key=None, keyid=None, profile=None, description=None, no_reboot=False,
- dry_run=False, filters=None):
- '''
+def create_image(
+ ami_name,
+ instance_id=None,
+ instance_name=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ description=None,
+ no_reboot=False,
+ dry_run=False,
+ filters=None,
+):
+ """
Given instance properties that define exactly one instance, create AMI and return AMI-id.
CLI Examples:
@@ -622,32 +747,53 @@ def create_image(ami_name, instance_id=None, instance_name=None, tags=None, regi
salt myminion boto_ec2.create_image ami_name instance_name=myinstance
salt myminion boto_ec2.create_image another_ami_name tags='{"mytag": "value"}' description='this is my ami'
- '''
+ """
- instances = find_instances(instance_id=instance_id, name=instance_name, tags=tags,
- region=region, key=key, keyid=keyid, profile=profile,
- return_objs=True, filters=filters)
+ instances = find_instances(
+ instance_id=instance_id,
+ name=instance_name,
+ tags=tags,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ return_objs=True,
+ filters=filters,
+ )
if not instances:
- log.error('Source instance not found')
+ log.error("Source instance not found")
return False
if len(instances) > 1:
- log.error('Multiple instances found, must match exactly only one instance to create an image from')
+ log.error(
+ "Multiple instances found, must match exactly only one instance to create an image from"
+ )
return False
instance = instances[0]
try:
- return instance.create_image(ami_name, description=description,
- no_reboot=no_reboot, dry_run=dry_run)
+ return instance.create_image(
+ ami_name, description=description, no_reboot=no_reboot, dry_run=dry_run
+ )
except boto.exception.BotoServerError as exc:
log.error(exc)
return False
-def find_images(ami_name=None, executable_by=None, owners=None, image_ids=None, tags=None,
- region=None, key=None, keyid=None, profile=None, return_objs=False):
+def find_images(
+ ami_name=None,
+ executable_by=None,
+ owners=None,
+ image_ids=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ return_objs=False,
+):
- '''
+ """
Given image properties, find and return matching AMI ids
CLI Examples:
@@ -656,26 +802,29 @@ def find_images(ami_name=None, executable_by=None, owners=None, image_ids=None,
salt myminion boto_ec2.find_images tags='{"mytag": "value"}'
- '''
+ """
retries = 30
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
while retries:
try:
- filter_parameters = {'filters': {}}
+ filter_parameters = {"filters": {}}
if image_ids:
- filter_parameters['image_ids'] = [image_ids]
+ filter_parameters["image_ids"] = [image_ids]
if executable_by:
- filter_parameters['executable_by'] = [executable_by]
+ filter_parameters["executable_by"] = [executable_by]
if owners:
- filter_parameters['owners'] = [owners]
+ filter_parameters["owners"] = [owners]
if ami_name:
- filter_parameters['filters']['name'] = ami_name
+ filter_parameters["filters"]["name"] = ami_name
if tags:
for tag_name, tag_value in six.iteritems(tags):
- filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
+ filter_parameters["filters"]["tag:{0}".format(tag_name)] = tag_value
images = conn.get_all_images(**filter_parameters)
- log.debug('The filters criteria %s matched the following '
- 'images:%s', filter_parameters, images)
+ log.debug(
+ "The filters criteria %s matched the following " "images:%s",
+ filter_parameters,
+ images,
+ )
if images:
if return_objs:
return images
@@ -683,19 +832,26 @@ def find_images(ami_name=None, executable_by=None, owners=None, image_ids=None,
else:
return False
except boto.exception.BotoServerError as exc:
- if exc.error_code == 'Throttling':
+ if exc.error_code == "Throttling":
log.debug("Throttled by AWS API, will retry in 5 seconds...")
time.sleep(5)
retries -= 1
continue
- log.error('Failed to convert AMI name `%s` to an AMI ID: %s', ami_name, exc)
+ log.error("Failed to convert AMI name `%s` to an AMI ID: %s", ami_name, exc)
return False
return False
-def terminate(instance_id=None, name=None, region=None,
- key=None, keyid=None, profile=None, filters=None):
- '''
+def terminate(
+ instance_id=None,
+ name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ filters=None,
+):
+ """
Terminate the instance described by instance_id or name.
CLI Example:
@@ -704,11 +860,17 @@ def terminate(instance_id=None, name=None, region=None,
salt myminion boto_ec2.terminate name=myinstance
salt myminion boto_ec2.terminate instance_id=i-a46b9f
- '''
- instances = find_instances(instance_id=instance_id, name=name,
- region=region, key=key, keyid=keyid,
- profile=profile, return_objs=True,
- filters=filters)
+ """
+ instances = find_instances(
+ instance_id=instance_id,
+ name=name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ return_objs=True,
+ filters=filters,
+ )
if instances in (False, None, []):
return instances
@@ -716,14 +878,22 @@ def terminate(instance_id=None, name=None, region=None,
instances[0].terminate()
return True
else:
- log.warning('Refusing to terminate multiple instances at once')
+ log.warning("Refusing to terminate multiple instances at once")
return False
-def get_id(name=None, tags=None, region=None, key=None,
- keyid=None, profile=None, in_states=None, filters=None):
+def get_id(
+ name=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ in_states=None,
+ filters=None,
+):
- '''
+ """
Given instance properties, return the instance id if it exists.
CLI Example:
@@ -732,25 +902,32 @@ def get_id(name=None, tags=None, region=None, key=None,
salt myminion boto_ec2.get_id myinstance
- '''
- instance_ids = find_instances(name=name, tags=tags, region=region, key=key,
- keyid=keyid, profile=profile, in_states=in_states,
- filters=filters)
+ """
+ instance_ids = find_instances(
+ name=name,
+ tags=tags,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ in_states=in_states,
+ filters=filters,
+ )
if instance_ids:
log.info("Instance ids: %s", " ".join(instance_ids))
if len(instance_ids) == 1:
return instance_ids[0]
else:
- raise CommandExecutionError('Found more than one instance '
- 'matching the criteria.')
+ raise CommandExecutionError(
+ "Found more than one instance " "matching the criteria."
+ )
else:
- log.warning('Could not find instance.')
+ log.warning("Could not find instance.")
return None
-def get_tags(instance_id=None, keyid=None, key=None, profile=None,
- region=None):
- '''
+def get_tags(instance_id=None, keyid=None, key=None, profile=None, region=None):
+ """
Given an instance_id, return a list of tags associated with that instance.
returns
@@ -761,7 +938,7 @@ def get_tags(instance_id=None, keyid=None, key=None, profile=None,
.. code-block:: bash
salt myminion boto_ec2.get_tags instance_id
- '''
+ """
tags = []
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
result = client.get_all_tags(filters={"resource-id": instance_id})
@@ -773,9 +950,18 @@ def get_tags(instance_id=None, keyid=None, key=None, profile=None,
return tags
-def exists(instance_id=None, name=None, tags=None, region=None, key=None,
- keyid=None, profile=None, in_states=None, filters=None):
- '''
+def exists(
+ instance_id=None,
+ name=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ in_states=None,
+ filters=None,
+):
+ """
Given an instance id, check to see if the given instance id exists.
Returns True if the given instance with the given id, name, or tags
@@ -786,20 +972,28 @@ def exists(instance_id=None, name=None, tags=None, region=None, key=None,
.. code-block:: bash
salt myminion boto_ec2.exists myinstance
- '''
- instances = find_instances(instance_id=instance_id, name=name, tags=tags,
- region=region, key=key, keyid=keyid,
- profile=profile, in_states=in_states, filters=filters)
+ """
+ instances = find_instances(
+ instance_id=instance_id,
+ name=name,
+ tags=tags,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ in_states=in_states,
+ filters=filters,
+ )
if instances:
- log.info('Instance exists.')
+ log.info("Instance exists.")
return True
else:
- log.warning('Instance does not exist.')
+ log.warning("Instance does not exist.")
return False
def _to_blockdev_map(thing):
- '''
+ """
Convert a string, or a json payload, or a dict in the right
format, into a boto.ec2.blockdevicemapping.BlockDeviceMapping as
needed by instance_present(). The following YAML is a direct
@@ -821,7 +1015,7 @@ def _to_blockdev_map(thing):
size: 20
volume_type: gp2
- '''
+ """
if not thing:
return None
if isinstance(thing, BlockDeviceMapping):
@@ -829,41 +1023,73 @@ def _to_blockdev_map(thing):
if isinstance(thing, six.string_types):
thing = salt.utils.json.loads(thing)
if not isinstance(thing, dict):
- log.error("Can't convert '%s' of type %s to a "
- "boto.ec2.blockdevicemapping.BlockDeviceMapping", thing, type(thing))
+ log.error(
+ "Can't convert '%s' of type %s to a "
+ "boto.ec2.blockdevicemapping.BlockDeviceMapping",
+ thing,
+ type(thing),
+ )
return None
bdm = BlockDeviceMapping()
for d, t in six.iteritems(thing):
- bdt = BlockDeviceType(ephemeral_name=t.get('ephemeral_name'),
- no_device=t.get('no_device', False),
- volume_id=t.get('volume_id'),
- snapshot_id=t.get('snapshot_id'),
- status=t.get('status'),
- attach_time=t.get('attach_time'),
- delete_on_termination=t.get('delete_on_termination', False),
- size=t.get('size'),
- volume_type=t.get('volume_type'),
- iops=t.get('iops'),
- encrypted=t.get('encrypted'))
+ bdt = BlockDeviceType(
+ ephemeral_name=t.get("ephemeral_name"),
+ no_device=t.get("no_device", False),
+ volume_id=t.get("volume_id"),
+ snapshot_id=t.get("snapshot_id"),
+ status=t.get("status"),
+ attach_time=t.get("attach_time"),
+ delete_on_termination=t.get("delete_on_termination", False),
+ size=t.get("size"),
+ volume_type=t.get("volume_type"),
+ iops=t.get("iops"),
+ encrypted=t.get("encrypted"),
+ )
bdm[d] = bdt
return bdm
-def run(image_id, name=None, tags=None, key_name=None, security_groups=None,
- user_data=None, instance_type='m1.small', placement=None,
- kernel_id=None, ramdisk_id=None, monitoring_enabled=None, vpc_id=None,
- vpc_name=None, subnet_id=None, subnet_name=None, private_ip_address=None,
- block_device_map=None, disable_api_termination=None,
- instance_initiated_shutdown_behavior=None, placement_group=None,
- client_token=None, security_group_ids=None, security_group_names=None,
- additional_info=None, tenancy=None, instance_profile_arn=None,
- instance_profile_name=None, ebs_optimized=None,
- network_interface_id=None, network_interface_name=None,
- region=None, key=None, keyid=None, profile=None, network_interfaces=None):
- #TODO: support multi-instance reservations
- '''
+def run(
+ image_id,
+ name=None,
+ tags=None,
+ key_name=None,
+ security_groups=None,
+ user_data=None,
+ instance_type="m1.small",
+ placement=None,
+ kernel_id=None,
+ ramdisk_id=None,
+ monitoring_enabled=None,
+ vpc_id=None,
+ vpc_name=None,
+ subnet_id=None,
+ subnet_name=None,
+ private_ip_address=None,
+ block_device_map=None,
+ disable_api_termination=None,
+ instance_initiated_shutdown_behavior=None,
+ placement_group=None,
+ client_token=None,
+ security_group_ids=None,
+ security_group_names=None,
+ additional_info=None,
+ tenancy=None,
+ instance_profile_arn=None,
+ instance_profile_name=None,
+ ebs_optimized=None,
+ network_interface_id=None,
+ network_interface_name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ network_interfaces=None,
+):
+ # TODO: support multi-instance reservations
+ """
Create and start an EC2 instance.
Returns True if the instance was created; otherwise False.
@@ -980,109 +1206,133 @@ def run(image_id, name=None, tags=None, key_name=None, security_groups=None,
network_interface_name
(string) - Name of the network interface to attach to the instance
- '''
+ """
if all((subnet_id, subnet_name)):
- raise SaltInvocationError('Only one of subnet_name or subnet_id may be '
- 'provided.')
+ raise SaltInvocationError(
+ "Only one of subnet_name or subnet_id may be " "provided."
+ )
if subnet_name:
- r = __salt__['boto_vpc.get_resource_id']('subnet', subnet_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
- if 'id' not in r:
- log.warning('Couldn\'t resolve subnet name %s.', subnet_name)
+ r = __salt__["boto_vpc.get_resource_id"](
+ "subnet", subnet_name, region=region, key=key, keyid=keyid, profile=profile
+ )
+ if "id" not in r:
+ log.warning("Couldn't resolve subnet name %s.", subnet_name)
return False
- subnet_id = r['id']
+ subnet_id = r["id"]
if all((security_group_ids, security_group_names)):
- raise SaltInvocationError('Only one of security_group_ids or '
- 'security_group_names may be provided.')
+ raise SaltInvocationError(
+ "Only one of security_group_ids or " "security_group_names may be provided."
+ )
if security_group_names:
security_group_ids = []
for sgn in security_group_names:
- r = __salt__['boto_secgroup.get_group_id'](sgn, vpc_name=vpc_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ r = __salt__["boto_secgroup.get_group_id"](
+ sgn,
+ vpc_name=vpc_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not r:
- log.warning('Couldn\'t resolve security group name %s', sgn)
+ log.warning("Couldn't resolve security group name %s", sgn)
return False
security_group_ids += [r]
- network_interface_args = list(map(int, [network_interface_id is not None,
- network_interface_name is not None,
- network_interfaces is not None]))
+ network_interface_args = list(
+ map(
+ int,
+ [
+ network_interface_id is not None,
+ network_interface_name is not None,
+ network_interfaces is not None,
+ ],
+ )
+ )
if sum(network_interface_args) > 1:
- raise SaltInvocationError('Only one of network_interface_id, '
- 'network_interface_name or '
- 'network_interfaces may be provided.')
+ raise SaltInvocationError(
+ "Only one of network_interface_id, "
+ "network_interface_name or "
+ "network_interfaces may be provided."
+ )
if network_interface_name:
- result = get_network_interface_id(network_interface_name,
- region=region, key=key,
- keyid=keyid,
- profile=profile)
- network_interface_id = result['result']
+ result = get_network_interface_id(
+ network_interface_name, region=region, key=key, keyid=keyid, profile=profile
+ )
+ network_interface_id = result["result"]
if not network_interface_id:
log.warning(
"Given network_interface_name '%s' cannot be mapped to an "
- "network_interface_id", network_interface_name
+ "network_interface_id",
+ network_interface_name,
)
if network_interface_id:
interface = NetworkInterfaceSpecification(
- network_interface_id=network_interface_id,
- device_index=0)
+ network_interface_id=network_interface_id, device_index=0
+ )
else:
interface = NetworkInterfaceSpecification(
- subnet_id=subnet_id,
- groups=security_group_ids,
- device_index=0)
+ subnet_id=subnet_id, groups=security_group_ids, device_index=0
+ )
if network_interfaces:
- interfaces_specs = [NetworkInterfaceSpecification(**x) for x in network_interfaces]
+ interfaces_specs = [
+ NetworkInterfaceSpecification(**x) for x in network_interfaces
+ ]
interfaces = NetworkInterfaceCollection(*interfaces_specs)
else:
interfaces = NetworkInterfaceCollection(interface)
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- reservation = conn.run_instances(image_id, key_name=key_name, security_groups=security_groups,
- user_data=user_data, instance_type=instance_type,
- placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id,
- monitoring_enabled=monitoring_enabled,
- private_ip_address=private_ip_address,
- block_device_map=_to_blockdev_map(block_device_map),
- disable_api_termination=disable_api_termination,
- instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior,
- placement_group=placement_group, client_token=client_token,
- additional_info=additional_info,
- tenancy=tenancy, instance_profile_arn=instance_profile_arn,
- instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized,
- network_interfaces=interfaces)
+ reservation = conn.run_instances(
+ image_id,
+ key_name=key_name,
+ security_groups=security_groups,
+ user_data=user_data,
+ instance_type=instance_type,
+ placement=placement,
+ kernel_id=kernel_id,
+ ramdisk_id=ramdisk_id,
+ monitoring_enabled=monitoring_enabled,
+ private_ip_address=private_ip_address,
+ block_device_map=_to_blockdev_map(block_device_map),
+ disable_api_termination=disable_api_termination,
+ instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior,
+ placement_group=placement_group,
+ client_token=client_token,
+ additional_info=additional_info,
+ tenancy=tenancy,
+ instance_profile_arn=instance_profile_arn,
+ instance_profile_name=instance_profile_name,
+ ebs_optimized=ebs_optimized,
+ network_interfaces=interfaces,
+ )
if not reservation:
- log.warning('Instance could not be reserved')
+ log.warning("Instance could not be reserved")
return False
instance = reservation.instances[0]
- status = 'pending'
- while status == 'pending':
+ status = "pending"
+ while status == "pending":
time.sleep(5)
status = instance.update()
- if status == 'running':
+ if status == "running":
if name:
- instance.add_tag('Name', name)
+ instance.add_tag("Name", name)
if tags:
instance.add_tags(tags)
- return {'instance_id': instance.id}
+ return {"instance_id": instance.id}
else:
- log.warning(
- 'Instance could not be started -- status is "%s"',
- status
- )
+ log.warning('Instance could not be started -- status is "%s"', status)
def get_key(key_name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if a key exists. Returns fingerprint and name if
it does and False if it doesn't
CLI Example:
@@ -1090,7 +1340,7 @@ def get_key(key_name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_ec2.get_key mykey
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -1104,9 +1354,8 @@ def get_key(key_name, region=None, key=None, keyid=None, profile=None):
return False
-def create_key(key_name, save_path, region=None, key=None, keyid=None,
- profile=None):
- '''
+def create_key(key_name, save_path, region=None, key=None, keyid=None, profile=None):
+ """
Creates a key and saves it to a given path.
Returns the private key.
@@ -1115,7 +1364,7 @@ def create_key(key_name, save_path, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_ec2.create_key mykey /root/
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -1128,9 +1377,10 @@ def create_key(key_name, save_path, region=None, key=None, keyid=None,
return False
-def import_key(key_name, public_key_material, region=None, key=None,
- keyid=None, profile=None):
- '''
+def import_key(
+ key_name, public_key_material, region=None, key=None, keyid=None, profile=None
+):
+ """
Imports the public key from an RSA key pair that you created with a third-party tool.
Supported formats:
- OpenSSH public key format (e.g., the format in ~/.ssh/authorized_keys)
@@ -1144,7 +1394,7 @@ def import_key(key_name, public_key_material, region=None, key=None,
.. code-block:: bash
salt myminion boto_ec2.import mykey publickey
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -1157,7 +1407,7 @@ def import_key(key_name, public_key_material, region=None, key=None,
def delete_key(key_name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Deletes a key. Always returns True
CLI Example:
@@ -1165,7 +1415,7 @@ def delete_key(key_name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_ec2.delete_key mykey
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -1177,9 +1427,10 @@ def delete_key(key_name, region=None, key=None, keyid=None, profile=None):
return False
-def get_keys(keynames=None, filters=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def get_keys(
+ keynames=None, filters=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Gets all keys or filters them by name and returns a list.
keynames (list):: A list of the names of keypairs to retrieve.
If not provided, all key pairs will be returned.
@@ -1194,7 +1445,7 @@ def get_keys(keynames=None, filters=None, region=None, key=None,
.. code-block:: bash
salt myminion boto_ec2.get_keys
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -1210,9 +1461,17 @@ def get_keys(keynames=None, filters=None, region=None, key=None,
return False
-def get_attribute(attribute, instance_name=None, instance_id=None, region=None, key=None,
- keyid=None, profile=None, filters=None):
- '''
+def get_attribute(
+ attribute,
+ instance_name=None,
+ instance_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ filters=None,
+):
+ """
Get an EC2 instance attribute.
CLI Example:
@@ -1235,28 +1494,52 @@ def get_attribute(attribute, instance_name=None, instance_id=None, region=None,
* groupSet
* ebsOptimized
* sriovNetSupport
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- attribute_list = ['instanceType', 'kernel', 'ramdisk', 'userData', 'disableApiTermination',
- 'instanceInitiatedShutdownBehavior', 'rootDeviceName', 'blockDeviceMapping', 'productCodes',
- 'sourceDestCheck', 'groupSet', 'ebsOptimized', 'sriovNetSupport']
+ attribute_list = [
+ "instanceType",
+ "kernel",
+ "ramdisk",
+ "userData",
+ "disableApiTermination",
+ "instanceInitiatedShutdownBehavior",
+ "rootDeviceName",
+ "blockDeviceMapping",
+ "productCodes",
+ "sourceDestCheck",
+ "groupSet",
+ "ebsOptimized",
+ "sriovNetSupport",
+ ]
if not any((instance_name, instance_id)):
- raise SaltInvocationError('At least one of the following must be specified: '
- 'instance_name or instance_id.')
+ raise SaltInvocationError(
+ "At least one of the following must be specified: "
+ "instance_name or instance_id."
+ )
if instance_name and instance_id:
- raise SaltInvocationError('Both instance_name and instance_id can not be specified in the same command.')
+ raise SaltInvocationError(
+ "Both instance_name and instance_id can not be specified in the same command."
+ )
if attribute not in attribute_list:
- raise SaltInvocationError('Attribute must be one of: {0}.'.format(attribute_list))
+ raise SaltInvocationError(
+ "Attribute must be one of: {0}.".format(attribute_list)
+ )
try:
if instance_name:
- instances = find_instances(name=instance_name, region=region, key=key, keyid=keyid, profile=profile,
- filters=filters)
+ instances = find_instances(
+ name=instance_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ filters=filters,
+ )
if len(instances) > 1:
- log.error('Found more than one EC2 instance matching the criteria.')
+ log.error("Found more than one EC2 instance matching the criteria.")
return False
elif len(instances) < 1:
- log.error('Found no EC2 instance matching the criteria.')
+ log.error("Found no EC2 instance matching the criteria.")
return False
instance_id = instances[0]
instance_attribute = conn.get_instance_attribute(instance_id, attribute)
@@ -1268,9 +1551,18 @@ def get_attribute(attribute, instance_name=None, instance_id=None, region=None,
return False
-def set_attribute(attribute, attribute_value, instance_name=None, instance_id=None, region=None, key=None, keyid=None,
- profile=None, filters=None):
- '''
+def set_attribute(
+ attribute,
+ attribute_value,
+ instance_name=None,
+ instance_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ filters=None,
+):
+ """
Set an EC2 instance attribute.
Returns whether the operation succeeded or not.
@@ -1294,26 +1586,54 @@ def set_attribute(attribute, attribute_value, instance_name=None, instance_id=No
* groupSet
* ebsOptimized
* sriovNetSupport
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- attribute_list = ['instanceType', 'kernel', 'ramdisk', 'userData', 'disableApiTermination',
- 'instanceInitiatedShutdownBehavior', 'rootDeviceName', 'blockDeviceMapping', 'productCodes',
- 'sourceDestCheck', 'groupSet', 'ebsOptimized', 'sriovNetSupport']
+ attribute_list = [
+ "instanceType",
+ "kernel",
+ "ramdisk",
+ "userData",
+ "disableApiTermination",
+ "instanceInitiatedShutdownBehavior",
+ "rootDeviceName",
+ "blockDeviceMapping",
+ "productCodes",
+ "sourceDestCheck",
+ "groupSet",
+ "ebsOptimized",
+ "sriovNetSupport",
+ ]
if not any((instance_name, instance_id)):
- raise SaltInvocationError('At least one of the following must be specified: instance_name or instance_id.')
+ raise SaltInvocationError(
+ "At least one of the following must be specified: instance_name or instance_id."
+ )
if instance_name and instance_id:
- raise SaltInvocationError('Both instance_name and instance_id can not be specified in the same command.')
+ raise SaltInvocationError(
+ "Both instance_name and instance_id can not be specified in the same command."
+ )
if attribute not in attribute_list:
- raise SaltInvocationError('Attribute must be one of: {0}.'.format(attribute_list))
+ raise SaltInvocationError(
+ "Attribute must be one of: {0}.".format(attribute_list)
+ )
try:
if instance_name:
- instances = find_instances(name=instance_name, region=region, key=key, keyid=keyid, profile=profile,
- filters=filters)
+ instances = find_instances(
+ name=instance_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ filters=filters,
+ )
if len(instances) != 1:
- raise CommandExecutionError('Found more than one EC2 instance matching the criteria.')
+ raise CommandExecutionError(
+ "Found more than one EC2 instance matching the criteria."
+ )
instance_id = instances[0]
- attribute = conn.modify_instance_attribute(instance_id, attribute, attribute_value)
+ attribute = conn.modify_instance_attribute(
+ instance_id, attribute, attribute_value
+ )
if not attribute:
return False
return attribute
@@ -1322,9 +1642,8 @@ def set_attribute(attribute, attribute_value, instance_name=None, instance_id=No
return False
-def get_network_interface_id(name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def get_network_interface_id(name, region=None, key=None, keyid=None, profile=None):
+ """
Get an Elastic Network Interface id from its name tag.
.. versionadded:: 2016.3.0
@@ -1334,26 +1653,32 @@ def get_network_interface_id(name, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_ec2.get_network_interface_id name=my_eni
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
- enis = conn.get_all_network_interfaces(filters={'tag:Name': name})
+ enis = conn.get_all_network_interfaces(filters={"tag:Name": name})
if not enis:
- r['error'] = {'message': 'No ENIs found.'}
+ r["error"] = {"message": "No ENIs found."}
elif len(enis) > 1:
- r['error'] = {'message': 'Name specified is tagged on multiple ENIs.'}
+ r["error"] = {"message": "Name specified is tagged on multiple ENIs."}
else:
eni = enis[0]
- r['result'] = eni.id
+ r["result"] = eni.id
except boto.exception.EC2ResponseError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def get_network_interface(name=None, network_interface_id=None, region=None,
- key=None, keyid=None, profile=None):
- '''
+def get_network_interface(
+ name=None,
+ network_interface_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Get an Elastic Network Interface.
.. versionadded:: 2016.3.0
@@ -1363,17 +1688,17 @@ def get_network_interface(name=None, network_interface_id=None, region=None,
.. code-block:: bash
salt myminion boto_ec2.get_network_interface name=my_eni
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
result = _get_network_interface(conn, name, network_interface_id)
- if 'error' in result:
- if result['error']['message'] == 'No ENIs found.':
- r['result'] = None
+ if "error" in result:
+ if result["error"]["message"] == "No ENIs found.":
+ r["result"] = None
return r
return result
- eni = result['result']
- r['result'] = _describe_network_interface(eni)
+ eni = result["result"]
+ r["result"] = _describe_network_interface(eni)
return r
@@ -1381,59 +1706,91 @@ def _get_network_interface(conn, name=None, network_interface_id=None):
r = {}
if not (name or network_interface_id):
raise SaltInvocationError(
- 'Either name or network_interface_id must be provided.'
+ "Either name or network_interface_id must be provided."
)
try:
if network_interface_id:
enis = conn.get_all_network_interfaces([network_interface_id])
else:
- enis = conn.get_all_network_interfaces(filters={'tag:Name': name})
+ enis = conn.get_all_network_interfaces(filters={"tag:Name": name})
if not enis:
- r['error'] = {'message': 'No ENIs found.'}
+ r["error"] = {"message": "No ENIs found."}
elif len(enis) > 1:
- r['error'] = {'message': 'Name specified is tagged on multiple ENIs.'}
+ r["error"] = {"message": "Name specified is tagged on multiple ENIs."}
else:
eni = enis[0]
- r['result'] = eni
+ r["result"] = eni
except boto.exception.EC2ResponseError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
def _describe_network_interface(eni):
r = {}
- for attr in ['status', 'description', 'availability_zone', 'requesterId',
- 'requester_managed', 'mac_address', 'private_ip_address',
- 'vpc_id', 'id', 'source_dest_check', 'owner_id', 'tags',
- 'subnet_id', 'associationId', 'publicDnsName', 'owner_id',
- 'ipOwnerId', 'publicIp', 'allocationId']:
+ for attr in [
+ "status",
+ "description",
+ "availability_zone",
+ "requesterId",
+ "requester_managed",
+ "mac_address",
+ "private_ip_address",
+ "vpc_id",
+ "id",
+ "source_dest_check",
+ "owner_id",
+ "tags",
+ "subnet_id",
+ "associationId",
+ "publicDnsName",
+ "owner_id",
+ "ipOwnerId",
+ "publicIp",
+ "allocationId",
+ ]:
if hasattr(eni, attr):
r[attr] = getattr(eni, attr)
- r['region'] = eni.region.name
- r['groups'] = []
+ r["region"] = eni.region.name
+ r["groups"] = []
for group in eni.groups:
- r['groups'].append({'name': group.name, 'id': group.id})
- r['private_ip_addresses'] = []
+ r["groups"].append({"name": group.name, "id": group.id})
+ r["private_ip_addresses"] = []
for address in eni.private_ip_addresses:
- r['private_ip_addresses'].append(
- {'private_ip_address': address.private_ip_address,
- 'primary': address.primary}
+ r["private_ip_addresses"].append(
+ {
+ "private_ip_address": address.private_ip_address,
+ "primary": address.primary,
+ }
)
- r['attachment'] = {}
- for attr in ['status', 'attach_time', 'device_index',
- 'delete_on_termination', 'instance_id',
- 'instance_owner_id', 'id']:
+ r["attachment"] = {}
+ for attr in [
+ "status",
+ "attach_time",
+ "device_index",
+ "delete_on_termination",
+ "instance_id",
+ "instance_owner_id",
+ "id",
+ ]:
if hasattr(eni.attachment, attr):
- r['attachment'][attr] = getattr(eni.attachment, attr)
+ r["attachment"][attr] = getattr(eni.attachment, attr)
return r
-def create_network_interface(name, subnet_id=None, subnet_name=None,
- private_ip_address=None, description=None,
- groups=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def create_network_interface(
+ name,
+ subnet_id=None,
+ subnet_name=None,
+ private_ip_address=None,
+ description=None,
+ groups=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create an Elastic Network Interface.
.. versionadded:: 2016.3.0
@@ -1443,58 +1800,62 @@ def create_network_interface(name, subnet_id=None, subnet_name=None,
.. code-block:: bash
salt myminion boto_ec2.create_network_interface my_eni subnet-12345 description=my_eni groups=['my_group']
- '''
+ """
if not salt.utils.data.exactly_one((subnet_id, subnet_name)):
- raise SaltInvocationError('One (but not both) of subnet_id or '
- 'subnet_name must be provided.')
+ raise SaltInvocationError(
+ "One (but not both) of subnet_id or " "subnet_name must be provided."
+ )
if subnet_name:
- resource = __salt__['boto_vpc.get_resource_id']('subnet', subnet_name,
- region=region, key=key,
- keyid=keyid,
- profile=profile)
- if 'id' not in resource:
- log.warning('Couldn\'t resolve subnet name %s.', subnet_name)
+ resource = __salt__["boto_vpc.get_resource_id"](
+ "subnet", subnet_name, region=region, key=key, keyid=keyid, profile=profile
+ )
+ if "id" not in resource:
+ log.warning("Couldn't resolve subnet name %s.", subnet_name)
return False
- subnet_id = resource['id']
+ subnet_id = resource["id"]
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
result = _get_network_interface(conn, name)
- if 'result' in result:
- r['error'] = {'message': 'An ENI with this Name tag already exists.'}
+ if "result" in result:
+ r["error"] = {"message": "An ENI with this Name tag already exists."}
return r
- vpc_id = __salt__['boto_vpc.get_subnet_association'](
+ vpc_id = __salt__["boto_vpc.get_subnet_association"](
[subnet_id], region=region, key=key, keyid=keyid, profile=profile
)
- vpc_id = vpc_id.get('vpc_id')
+ vpc_id = vpc_id.get("vpc_id")
if not vpc_id:
- msg = 'subnet_id {0} does not map to a valid vpc id.'.format(subnet_id)
- r['error'] = {'message': msg}
+ msg = "subnet_id {0} does not map to a valid vpc id.".format(subnet_id)
+ r["error"] = {"message": msg}
return r
- _groups = __salt__['boto_secgroup.convert_to_group_ids'](
- groups, vpc_id=vpc_id, region=region, key=key,
- keyid=keyid, profile=profile
+ _groups = __salt__["boto_secgroup.convert_to_group_ids"](
+ groups, vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile
)
try:
eni = conn.create_network_interface(
subnet_id,
private_ip_address=private_ip_address,
description=description,
- groups=_groups
+ groups=_groups,
)
- eni.add_tag('Name', name)
+ eni.add_tag("Name", name)
except boto.exception.EC2ResponseError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
- r['result'] = _describe_network_interface(eni)
+ r["result"] = _describe_network_interface(eni)
return r
def delete_network_interface(
- name=None, network_interface_id=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+ name=None,
+ network_interface_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create an Elastic Network Interface.
.. versionadded:: 2016.3.0
@@ -1504,34 +1865,42 @@ def delete_network_interface(
.. code-block:: bash
salt myminion boto_ec2.create_network_interface my_eni subnet-12345 description=my_eni groups=['my_group']
- '''
+ """
if not (name or network_interface_id):
raise SaltInvocationError(
- 'Either name or network_interface_id must be provided.'
+ "Either name or network_interface_id must be provided."
)
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
result = _get_network_interface(conn, name, network_interface_id)
- if 'error' in result:
+ if "error" in result:
return result
- eni = result['result']
+ eni = result["result"]
try:
info = _describe_network_interface(eni)
- network_interface_id = info['id']
+ network_interface_id = info["id"]
except KeyError:
- r['error'] = {'message': 'ID not found for this network interface.'}
+ r["error"] = {"message": "ID not found for this network interface."}
return r
try:
- r['result'] = conn.delete_network_interface(network_interface_id)
+ r["result"] = conn.delete_network_interface(network_interface_id)
except boto.exception.EC2ResponseError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def attach_network_interface(device_index, name=None, network_interface_id=None,
- instance_name=None, instance_id=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def attach_network_interface(
+ device_index,
+ name=None,
+ network_interface_id=None,
+ instance_name=None,
+ instance_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Attach an Elastic Network Interface.
.. versionadded:: 2016.3.0
@@ -1541,7 +1910,7 @@ def attach_network_interface(device_index, name=None, network_interface_id=None,
.. code-block:: bash
salt myminion boto_ec2.attach_network_interface my_eni instance_name=salt-master device_index=0
- '''
+ """
if not salt.utils.data.exactly_one((name, network_interface_id)):
raise SaltInvocationError(
"Exactly one (but not both) of 'name' or 'network_interface_id' "
@@ -1557,37 +1926,45 @@ def attach_network_interface(device_index, name=None, network_interface_id=None,
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
result = _get_network_interface(conn, name, network_interface_id)
- if 'error' in result:
+ if "error" in result:
return result
- eni = result['result']
+ eni = result["result"]
try:
info = _describe_network_interface(eni)
- network_interface_id = info['id']
+ network_interface_id = info["id"]
except KeyError:
- r['error'] = {'message': 'ID not found for this network interface.'}
+ r["error"] = {"message": "ID not found for this network interface."}
return r
if instance_name:
try:
- instance_id = get_id(name=instance_name, region=region, key=key,
- keyid=keyid, profile=profile)
+ instance_id = get_id(
+ name=instance_name, region=region, key=key, keyid=keyid, profile=profile
+ )
except boto.exception.BotoServerError as e:
log.error(e)
return False
try:
- r['result'] = conn.attach_network_interface(
+ r["result"] = conn.attach_network_interface(
network_interface_id, instance_id, device_index
)
except boto.exception.EC2ResponseError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
def detach_network_interface(
- name=None, network_interface_id=None, attachment_id=None,
- force=False, region=None, key=None, keyid=None, profile=None):
- '''
+ name=None,
+ network_interface_id=None,
+ attachment_id=None,
+ force=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Detach an Elastic Network Interface.
.. versionadded:: 2016.3.0
@@ -1597,36 +1974,42 @@ def detach_network_interface(
.. code-block:: bash
salt myminion boto_ec2.detach_network_interface my_eni
- '''
+ """
if not (name or network_interface_id or attachment_id):
raise SaltInvocationError(
- 'Either name or network_interface_id or attachment_id must be'
- ' provided.'
+ "Either name or network_interface_id or attachment_id must be" " provided."
)
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
if not attachment_id:
result = _get_network_interface(conn, name, network_interface_id)
- if 'error' in result:
+ if "error" in result:
return result
- eni = result['result']
+ eni = result["result"]
info = _describe_network_interface(eni)
try:
- attachment_id = info['attachment']['id']
+ attachment_id = info["attachment"]["id"]
except KeyError:
- r['error'] = {'message': 'Attachment id not found for this ENI.'}
+ r["error"] = {"message": "Attachment id not found for this ENI."}
return r
try:
- r['result'] = conn.detach_network_interface(attachment_id, force)
+ r["result"] = conn.detach_network_interface(attachment_id, force)
except boto.exception.EC2ResponseError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
def modify_network_interface_attribute(
- name=None, network_interface_id=None, attr=None,
- value=None, region=None, key=None, keyid=None, profile=None):
- '''
+ name=None,
+ network_interface_id=None,
+ attr=None,
+ value=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Modify an attribute of an Elastic Network Interface.
.. versionadded:: 2016.3.0
@@ -1636,67 +2019,77 @@ def modify_network_interface_attribute(
.. code-block:: bash
salt myminion boto_ec2.modify_network_interface_attribute my_eni attr=description value='example description'
- '''
+ """
if not (name or network_interface_id):
raise SaltInvocationError(
- 'Either name or network_interface_id must be provided.'
+ "Either name or network_interface_id must be provided."
)
if attr is None and value is None:
- raise SaltInvocationError(
- 'attr and value must be provided.'
- )
+ raise SaltInvocationError("attr and value must be provided.")
r = {}
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
result = _get_network_interface(conn, name, network_interface_id)
- if 'error' in result:
+ if "error" in result:
return result
- eni = result['result']
+ eni = result["result"]
info = _describe_network_interface(eni)
- network_interface_id = info['id']
+ network_interface_id = info["id"]
# munge attr into what the API requires
- if attr == 'groups':
- _attr = 'groupSet'
- elif attr == 'source_dest_check':
- _attr = 'sourceDestCheck'
- elif attr == 'delete_on_termination':
- _attr = 'deleteOnTermination'
+ if attr == "groups":
+ _attr = "groupSet"
+ elif attr == "source_dest_check":
+ _attr = "sourceDestCheck"
+ elif attr == "delete_on_termination":
+ _attr = "deleteOnTermination"
else:
_attr = attr
_value = value
- if info.get('vpc_id') and _attr == 'groupSet':
- _value = __salt__['boto_secgroup.convert_to_group_ids'](
- value, vpc_id=info.get('vpc_id'), region=region, key=key,
- keyid=keyid, profile=profile
+ if info.get("vpc_id") and _attr == "groupSet":
+ _value = __salt__["boto_secgroup.convert_to_group_ids"](
+ value,
+ vpc_id=info.get("vpc_id"),
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
)
if not _value:
- r['error'] = {
- 'message': ('Security groups do not map to valid security'
- ' group ids')
+ r["error"] = {
+ "message": ("Security groups do not map to valid security" " group ids")
}
return r
_attachment_id = None
- if _attr == 'deleteOnTermination':
+ if _attr == "deleteOnTermination":
try:
- _attachment_id = info['attachment']['id']
+ _attachment_id = info["attachment"]["id"]
except KeyError:
- r['error'] = {
- 'message': ('No attachment id found for this ENI. The ENI must'
- ' be attached before delete_on_termination can be'
- ' modified')
+ r["error"] = {
+ "message": (
+ "No attachment id found for this ENI. The ENI must"
+ " be attached before delete_on_termination can be"
+ " modified"
+ )
}
return r
try:
- r['result'] = conn.modify_network_interface_attribute(
+ r["result"] = conn.modify_network_interface_attribute(
network_interface_id, _attr, _value, attachment_id=_attachment_id
)
except boto.exception.EC2ResponseError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def get_all_volumes(volume_ids=None, filters=None, return_objs=False,
- region=None, key=None, keyid=None, profile=None):
- '''
+def get_all_volumes(
+ volume_ids=None,
+ filters=None,
+ return_objs=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Get a list of all EBS volumes, optionally filtered by provided 'filters' param
.. versionadded:: 2016.11.0
@@ -1738,7 +2131,7 @@ def get_all_volumes(volume_ids=None, filters=None, return_objs=False,
salt-call boto_ec2.get_all_volumes filters='{"tag:Name": "myVolume01"}'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -1749,9 +2142,16 @@ def get_all_volumes(volume_ids=None, filters=None, return_objs=False,
return []
-def set_volumes_tags(tag_maps, authoritative=False, dry_run=False,
- region=None, key=None, keyid=None, profile=None):
- '''
+def set_volumes_tags(
+ tag_maps,
+ authoritative=False,
+ dry_run=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
.. versionadded:: 2016.11.0
tag_maps (list)
@@ -1802,82 +2202,114 @@ def set_volumes_tags(tag_maps, authoritative=False, dry_run=False,
returns (dict)
A dict describing status and any changes.
- '''
- ret = {'success': True, 'comment': '', 'changes': {}}
- running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped')
+ """
+ ret = {"success": True, "comment": "", "changes": {}}
+ running_states = ("pending", "rebooting", "running", "stopping", "stopped")
### First creeate a dictionary mapping all changes for a given volume to its volume ID...
tag_sets = {}
for tm in tag_maps:
- filters = dict(tm.get('filters', {}))
- tags = dict(tm.get('tags', {}))
- args = {'return_objs': True, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile}
+ filters = dict(tm.get("filters", {}))
+ tags = dict(tm.get("tags", {}))
+ args = {
+ "return_objs": True,
+ "region": region,
+ "key": key,
+ "keyid": keyid,
+ "profile": profile,
+ }
new_filters = {}
- log.debug('got filters: %s', filters)
+ log.debug("got filters: %s", filters)
instance_id = None
- in_states = tm.get('in_states', running_states)
+ in_states = tm.get("in_states", running_states)
try:
for k, v in filters.items():
- if k == 'volume_ids':
- args['volume_ids'] = v
- elif k == 'instance_name':
- instance_id = get_id(name=v, in_states=in_states, region=region, key=key,
- keyid=keyid, profile=profile)
+ if k == "volume_ids":
+ args["volume_ids"] = v
+ elif k == "instance_name":
+ instance_id = get_id(
+ name=v,
+ in_states=in_states,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not instance_id:
msg = "Couldn't resolve instance Name {0} to an ID.".format(v)
raise CommandExecutionError(msg)
- new_filters['attachment.instance_id'] = instance_id
+ new_filters["attachment.instance_id"] = instance_id
else:
new_filters[k] = v
except CommandExecutionError as e:
log.warning(e)
continue # Hmme, abort or do what we can...? Guess the latter for now.
- args['filters'] = new_filters
+ args["filters"] = new_filters
volumes = get_all_volumes(**args)
- log.debug('got volume list: %s', volumes)
+ log.debug("got volume list: %s", volumes)
for vol in volumes:
- tag_sets.setdefault(vol.id.replace('-', '_'), {'vol': vol, 'tags': tags.copy()})['tags'].update(tags.copy())
- log.debug('tag_sets after munging: %s', tag_sets)
+ tag_sets.setdefault(
+ vol.id.replace("-", "_"), {"vol": vol, "tags": tags.copy()}
+ )["tags"].update(tags.copy())
+ log.debug("tag_sets after munging: %s", tag_sets)
### ...then loop through all those volume->tag pairs and apply them.
- changes = {'old': {}, 'new': {}}
+ changes = {"old": {}, "new": {}}
for volume in tag_sets.values():
- vol, tags = volume['vol'], volume['tags']
- log.debug('current tags on vol.id %s: %s', vol.id, dict(getattr(vol, 'tags', {})))
- curr = set(dict(getattr(vol, 'tags', {})).keys())
- log.debug('requested tags on vol.id %s: %s', vol.id, tags)
+ vol, tags = volume["vol"], volume["tags"]
+ log.debug(
+ "current tags on vol.id %s: %s", vol.id, dict(getattr(vol, "tags", {}))
+ )
+ curr = set(dict(getattr(vol, "tags", {})).keys())
+ log.debug("requested tags on vol.id %s: %s", vol.id, tags)
req = set(tags.keys())
add = list(req - curr)
update = [r for r in (req & curr) if vol.tags[r] != tags[r]]
remove = list(curr - req)
if add or update or (authoritative and remove):
- changes['old'][vol.id] = dict(getattr(vol, 'tags', {}))
- changes['new'][vol.id] = tags
+ changes["old"][vol.id] = dict(getattr(vol, "tags", {}))
+ changes["new"][vol.id] = tags
else:
- log.debug('No changes needed for vol.id %s', vol.id)
+ log.debug("No changes needed for vol.id %s", vol.id)
if add:
d = dict((k, tags[k]) for k in add)
- log.debug('New tags for vol.id %s: %s', vol.id, d)
+ log.debug("New tags for vol.id %s: %s", vol.id, d)
if update:
d = dict((k, tags[k]) for k in update)
- log.debug('Updated tags for vol.id %s: %s', vol.id, d)
+ log.debug("Updated tags for vol.id %s: %s", vol.id, d)
if not dry_run:
- if not create_tags(vol.id, tags, region=region, key=key, keyid=keyid, profile=profile):
- ret['success'] = False
- ret['comment'] = "Failed to set tags on vol.id {0}: {1}".format(vol.id, tags)
+ if not create_tags(
+ vol.id, tags, region=region, key=key, keyid=keyid, profile=profile
+ ):
+ ret["success"] = False
+ ret["comment"] = "Failed to set tags on vol.id {0}: {1}".format(
+ vol.id, tags
+ )
return ret
if authoritative:
if remove:
- log.debug('Removed tags for vol.id %s: %s', vol.id, remove)
- if not delete_tags(vol.id, remove, region=region, key=key, keyid=keyid, profile=profile):
- ret['success'] = False
- ret['comment'] = "Failed to remove tags on vol.id {0}: {1}".format(vol.id, remove)
+ log.debug("Removed tags for vol.id %s: %s", vol.id, remove)
+ if not delete_tags(
+ vol.id,
+ remove,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ):
+ ret["success"] = False
+ ret[
+ "comment"
+ ] = "Failed to remove tags on vol.id {0}: {1}".format(
+ vol.id, remove
+ )
return ret
- ret['changes'].update(changes) if changes['old'] or changes['new'] else None # pylint: disable=W0106
+ if changes["old"] or changes["new"]:
+ ret["changes"].update(changes)
return ret
def get_all_tags(filters=None, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Describe all tags matching the filter criteria, or all tags in the account otherwise.
.. versionadded:: 2018.3.0
@@ -1894,7 +2326,7 @@ def get_all_tags(filters=None, region=None, key=None, keyid=None, profile=None):
salt-call boto_ec2.get_all_tags '{"tag:Name": myInstanceNameTag, resource-type: instance}'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.get_all_tags(filters)
@@ -1910,7 +2342,7 @@ def get_all_tags(filters=None, region=None, key=None, keyid=None, profile=None):
def create_tags(resource_ids, tags, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Create new metadata tags for the specified resource ids.
.. versionadded:: 2016.11.0
@@ -1929,7 +2361,7 @@ def create_tags(resource_ids, tags, region=None, key=None, keyid=None, profile=N
salt-call boto_ec2.create_tags vol-12345678 '{"Name": "myVolume01"}'
- '''
+ """
if not isinstance(resource_ids, list):
resource_ids = [resource_ids]
@@ -1943,7 +2375,7 @@ def create_tags(resource_ids, tags, region=None, key=None, keyid=None, profile=N
def delete_tags(resource_ids, tags, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete metadata tags for the specified resource ids.
.. versionadded:: 2016.11.0
@@ -1966,7 +2398,7 @@ def delete_tags(resource_ids, tags, region=None, key=None, keyid=None, profile=N
salt-call boto_ec2.delete_tags vol-12345678 '{"Name": "myVolume01"}'
salt-call boto_ec2.delete_tags vol-12345678 '["Name","MountPoint"]'
- '''
+ """
if not isinstance(resource_ids, list):
resource_ids = [resource_ids]
@@ -1979,9 +2411,18 @@ def delete_tags(resource_ids, tags, region=None, key=None, keyid=None, profile=N
return False
-def detach_volume(volume_id, instance_id=None, device=None, force=False,
- wait_for_detachement=False, region=None, key=None, keyid=None, profile=None):
- '''
+def detach_volume(
+ volume_id,
+ instance_id=None,
+ device=None,
+ force=False,
+ wait_for_detachement=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Detach an EBS volume from an EC2 instance.
.. versionadded:: 2016.11.0
@@ -2010,11 +2451,15 @@ def detach_volume(volume_id, instance_id=None, device=None, force=False,
salt-call boto_ec2.detach_volume vol-12345678 i-87654321
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.detach_volume(volume_id, instance_id, device, force)
- if ret and wait_for_detachement and not _wait_for_volume_available(conn, volume_id):
+ if (
+ ret
+ and wait_for_detachement
+ and not _wait_for_volume_available(conn, volume_id)
+ ):
timeout_msg = 'Timed out waiting for the volume status "available".'
log.error(timeout_msg)
return False
@@ -2024,9 +2469,17 @@ def detach_volume(volume_id, instance_id=None, device=None, force=False,
return False
-def delete_volume(volume_id, instance_id=None, device=None, force=False,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_volume(
+ volume_id,
+ instance_id=None,
+ device=None,
+ force=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Detach an EBS volume from an EC2 instance.
.. versionadded:: 2016.11.0
@@ -2045,7 +2498,7 @@ def delete_volume(volume_id, instance_id=None, device=None, force=False,
salt-call boto_ec2.delete_volume vol-12345678
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.delete_volume(volume_id)
@@ -2070,15 +2523,16 @@ def _wait_for_volume_available(conn, volume_id, retries=5, interval=5):
if len(vols) != 1:
return False
vol = vols[0]
- if vol.status == 'available':
+ if vol.status == "available":
return True
if i > retries:
return False
-def attach_volume(volume_id, instance_id, device,
- region=None, key=None, keyid=None, profile=None):
- '''
+def attach_volume(
+ volume_id, instance_id, device, region=None, key=None, keyid=None, profile=None
+):
+ """
Attach an EBS volume to an EC2 instance.
..
@@ -2098,7 +2552,7 @@ def attach_volume(volume_id, instance_id, device,
salt-call boto_ec2.attach_volume vol-12345678 i-87654321 /dev/sdh
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.attach_volume(volume_id, instance_id, device)
@@ -2107,10 +2561,21 @@ def attach_volume(volume_id, instance_id, device,
return False
-def create_volume(zone_name, size=None, snapshot_id=None, volume_type=None,
- iops=None, encrypted=False, kms_key_id=None, wait_for_creation=False,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_volume(
+ zone_name,
+ size=None,
+ snapshot_id=None,
+ volume_type=None,
+ iops=None,
+ encrypted=False,
+ kms_key_id=None,
+ wait_for_creation=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create an EBS volume to an availability zone.
..
@@ -2147,23 +2612,27 @@ def create_volume(zone_name, size=None, snapshot_id=None, volume_type=None,
salt-call boto_ec2.create_volume us-east-1a size=10
salt-call boto_ec2.create_volume us-east-1a snapshot_id=snap-0123abcd
- '''
+ """
if size is None and snapshot_id is None:
- raise SaltInvocationError(
- 'Size must be provided if not created from snapshot.'
- )
+ raise SaltInvocationError("Size must be provided if not created from snapshot.")
ret = {}
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- vol = conn.create_volume(size=size, zone=zone_name, snapshot=snapshot_id,
- volume_type=volume_type, iops=iops, encrypted=encrypted,
- kms_key_id=kms_key_id)
+ vol = conn.create_volume(
+ size=size,
+ zone=zone_name,
+ snapshot=snapshot_id,
+ volume_type=volume_type,
+ iops=iops,
+ encrypted=encrypted,
+ kms_key_id=kms_key_id,
+ )
if wait_for_creation and not _wait_for_volume_available(conn, vol.id):
timeout_msg = 'Timed out waiting for the volume status "available".'
log.error(timeout_msg)
- ret['error'] = timeout_msg
+ ret["error"] = timeout_msg
else:
- ret['result'] = vol.id
+ ret["result"] = vol.id
except boto.exception.BotoServerError as error:
- ret['error'] = __utils__['boto.get_error'](error)
+ ret["error"] = __utils__["boto.get_error"](error)
return ret
diff --git a/salt/modules/boto_efs.py b/salt/modules/boto_efs.py
index 64930a6dd36..225403d5fbf 100644
--- a/salt/modules/boto_efs.py
+++ b/salt/modules/boto_efs.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon EFS
.. versionadded:: 2017.7.0
@@ -46,47 +46,43 @@ Connection module for Amazon EFS
region: us-east-1
:depends: boto3
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
+# Import salt libs
+import salt.utils.versions
# Import 3rd-party libs
from salt.ext import six
+
try:
import boto3
+
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
-# Import salt libs
-import salt.utils.versions
log = logging.getLogger(__name__)
def __virtual__():
- '''
+ """
Only load if boto3 libraries exist and if boto3 libraries are greater than
a given version.
- '''
- return salt.utils.versions.check_boto_reqs(
- boto3_ver='1.0.0',
- check_boto=False
- )
+ """
+ return salt.utils.versions.check_boto_reqs(boto3_ver="1.0.0", check_boto=False)
-def _get_conn(key=None,
- keyid=None,
- profile=None,
- region=None,
- **kwargs):
- '''
+def _get_conn(key=None, keyid=None, profile=None, region=None, **kwargs):
+ """
Create a boto3 client connection to EFS
- '''
+ """
client = None
if profile:
if isinstance(profile, six.string_types):
@@ -97,39 +93,41 @@ def _get_conn(key=None,
elif key or keyid or region:
profile = {}
if key:
- profile['key'] = key
+ profile["key"] = key
if keyid:
- profile['keyid'] = keyid
+ profile["keyid"] = keyid
if region:
- profile['region'] = region
+ profile["region"] = region
if isinstance(profile, dict):
- if 'region' in profile:
- profile['region_name'] = profile['region']
- profile.pop('region', None)
- if 'key' in profile:
- profile['aws_secret_access_key'] = profile['key']
- profile.pop('key', None)
- if 'keyid' in profile:
- profile['aws_access_key_id'] = profile['keyid']
- profile.pop('keyid', None)
+ if "region" in profile:
+ profile["region_name"] = profile["region"]
+ profile.pop("region", None)
+ if "key" in profile:
+ profile["aws_secret_access_key"] = profile["key"]
+ profile.pop("key", None)
+ if "keyid" in profile:
+ profile["aws_access_key_id"] = profile["keyid"]
+ profile.pop("keyid", None)
- client = boto3.client('efs', **profile)
+ client = boto3.client("efs", **profile)
else:
- client = boto3.client('efs')
+ client = boto3.client("efs")
return client
-def create_file_system(name,
- performance_mode='generalPurpose',
- keyid=None,
- key=None,
- profile=None,
- region=None,
- creation_token=None,
- **kwargs):
- '''
+def create_file_system(
+ name,
+ performance_mode="generalPurpose",
+ keyid=None,
+ key=None,
+ profile=None,
+ region=None,
+ creation_token=None,
+ **kwargs
+):
+ """
Creates a new, empty file system.
name
@@ -151,7 +149,7 @@ def create_file_system(name,
.. code-block:: bash
salt 'my-minion' boto_efs.create_file_system efs-name generalPurpose
- '''
+ """
if creation_token is None:
creation_token = name
@@ -160,28 +158,31 @@ def create_file_system(name,
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
- response = client.create_file_system(CreationToken=creation_token,
- PerformanceMode=performance_mode)
+ response = client.create_file_system(
+ CreationToken=creation_token, PerformanceMode=performance_mode
+ )
- if 'FileSystemId' in response:
- client.create_tags(FileSystemId=response['FileSystemId'], Tags=tags)
+ if "FileSystemId" in response:
+ client.create_tags(FileSystemId=response["FileSystemId"], Tags=tags)
- if 'Name' in response:
- response['Name'] = name
+ if "Name" in response:
+ response["Name"] = name
return response
-def create_mount_target(filesystemid,
- subnetid,
- ipaddress=None,
- securitygroups=None,
- keyid=None,
- key=None,
- profile=None,
- region=None,
- **kwargs):
- '''
+def create_mount_target(
+ filesystemid,
+ subnetid,
+ ipaddress=None,
+ securitygroups=None,
+ keyid=None,
+ key=None,
+ profile=None,
+ region=None,
+ **kwargs
+):
+ """
Creates a mount target for a file system.
You can then mount the file system on EC2 instances via the mount target.
@@ -217,37 +218,34 @@ def create_mount_target(filesystemid,
.. code-block:: bash
salt 'my-minion' boto_efs.create_mount_target filesystemid subnetid
- '''
+ """
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
if ipaddress is None and securitygroups is None:
- return client.create_mount_target(FileSystemId=filesystemid,
- SubnetId=subnetid)
+ return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid)
if ipaddress is None:
- return client.create_mount_target(FileSystemId=filesystemid,
- SubnetId=subnetid,
- SecurityGroups=securitygroups)
+ return client.create_mount_target(
+ FileSystemId=filesystemid, SubnetId=subnetid, SecurityGroups=securitygroups
+ )
if securitygroups is None:
- return client.create_mount_target(FileSystemId=filesystemid,
- SubnetId=subnetid,
- IpAddress=ipaddress)
+ return client.create_mount_target(
+ FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress
+ )
- return client.create_mount_target(FileSystemId=filesystemid,
- SubnetId=subnetid,
- IpAddress=ipaddress,
- SecurityGroups=securitygroups)
+ return client.create_mount_target(
+ FileSystemId=filesystemid,
+ SubnetId=subnetid,
+ IpAddress=ipaddress,
+ SecurityGroups=securitygroups,
+ )
-def create_tags(filesystemid,
- tags,
- keyid=None,
- key=None,
- profile=None,
- region=None,
- **kwargs):
- '''
+def create_tags(
+ filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs
+):
+ """
Creates or overwrites tags associated with a file system.
Each tag is a key-value pair. If a tag key specified in the request
already exists on the file system, this operation overwrites
@@ -264,24 +262,21 @@ def create_tags(filesystemid,
.. code-block:: bash
salt 'my-minion' boto_efs.create_tags
- '''
+ """
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
new_tags = []
for k, v in six.iteritems(tags):
- new_tags.append({'Key': k, 'Value': v})
+ new_tags.append({"Key": k, "Value": v})
client.create_tags(FileSystemId=filesystemid, Tags=new_tags)
-def delete_file_system(filesystemid,
- keyid=None,
- key=None,
- profile=None,
- region=None,
- **kwargs):
- '''
+def delete_file_system(
+ filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs
+):
+ """
Deletes a file system, permanently severing access to its contents.
Upon return, the file system no longer exists and you can't access
any contents of the deleted file system. You can't delete a file system
@@ -296,20 +291,17 @@ def delete_file_system(filesystemid,
.. code-block:: bash
salt 'my-minion' boto_efs.delete_file_system filesystemid
- '''
+ """
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
client.delete_file_system(FileSystemId=filesystemid)
-def delete_mount_target(mounttargetid,
- keyid=None,
- key=None,
- profile=None,
- region=None,
- **kwargs):
- '''
+def delete_mount_target(
+ mounttargetid, keyid=None, key=None, profile=None, region=None, **kwargs
+):
+ """
Deletes the specified mount target.
This operation forcibly breaks any mounts of the file system via the
@@ -330,21 +322,17 @@ def delete_mount_target(mounttargetid,
.. code-block:: bash
salt 'my-minion' boto_efs.delete_mount_target mounttargetid
- '''
+ """
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
client.delete_mount_target(MountTargetId=mounttargetid)
-def delete_tags(filesystemid,
- tags,
- keyid=None,
- key=None,
- profile=None,
- region=None,
- **kwargs):
- '''
+def delete_tags(
+ filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs
+):
+ """
Deletes the specified tags from a file system.
filesystemid
@@ -358,21 +346,23 @@ def delete_tags(filesystemid,
.. code-block:: bash
salt 'my-minion' boto_efs.delete_tags
- '''
+ """
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
client.delete_tags(FileSystemId=filesystemid, Tags=tags)
-def get_file_systems(filesystemid=None,
- keyid=None,
- key=None,
- profile=None,
- region=None,
- creation_token=None,
- **kwargs):
- '''
+def get_file_systems(
+ filesystemid=None,
+ keyid=None,
+ key=None,
+ profile=None,
+ region=None,
+ creation_token=None,
+ **kwargs
+):
+ """
Get all EFS properties or a specific instance property
if filesystemid is specified
@@ -393,14 +383,15 @@ def get_file_systems(filesystemid=None,
.. code-block:: bash
salt 'my-minion' boto_efs.get_file_systems efs-id
- '''
+ """
result = None
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
if filesystemid and creation_token:
- response = client.describe_file_systems(FileSystemId=filesystemid,
- CreationToken=creation_token)
+ response = client.describe_file_systems(
+ FileSystemId=filesystemid, CreationToken=creation_token
+ )
result = response["FileSystems"]
elif filesystemid:
response = client.describe_file_systems(FileSystemId=filesystemid)
@@ -414,21 +405,22 @@ def get_file_systems(filesystemid=None,
result = response["FileSystems"]
while "NextMarker" in response:
- response = client.describe_file_systems(
- Marker=response["NextMarker"])
+ response = client.describe_file_systems(Marker=response["NextMarker"])
result.extend(response["FileSystems"])
return result
-def get_mount_targets(filesystemid=None,
- mounttargetid=None,
- keyid=None,
- key=None,
- profile=None,
- region=None,
- **kwargs):
- '''
+def get_mount_targets(
+ filesystemid=None,
+ mounttargetid=None,
+ keyid=None,
+ key=None,
+ profile=None,
+ region=None,
+ **kwargs
+):
+ """
Get all the EFS mount point properties for a specific filesystemid or
the properties for a specific mounttargetid. One or the other must be
specified
@@ -449,7 +441,7 @@ def get_mount_targets(filesystemid=None,
.. code-block:: bash
salt 'my-minion' boto_efs.get_mount_targets
- '''
+ """
result = None
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
@@ -458,8 +450,9 @@ def get_mount_targets(filesystemid=None,
response = client.describe_mount_targets(FileSystemId=filesystemid)
result = response["MountTargets"]
while "NextMarker" in response:
- response = client.describe_mount_targets(FileSystemId=filesystemid,
- Marker=response["NextMarker"])
+ response = client.describe_mount_targets(
+ FileSystemId=filesystemid, Marker=response["NextMarker"]
+ )
result.extend(response["MountTargets"])
elif mounttargetid:
response = client.describe_mount_targets(MountTargetId=mounttargetid)
@@ -468,13 +461,8 @@ def get_mount_targets(filesystemid=None,
return result
-def get_tags(filesystemid,
- keyid=None,
- key=None,
- profile=None,
- region=None,
- **kwargs):
- '''
+def get_tags(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs):
+ """
Return the tags associated with an EFS instance.
filesystemid
@@ -488,27 +476,30 @@ def get_tags(filesystemid,
.. code-block:: bash
salt 'my-minion' boto_efs.get_tags efs-id
- '''
+ """
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
response = client.describe_tags(FileSystemId=filesystemid)
result = response["Tags"]
while "NextMarker" in response:
- response = client.describe_tags(FileSystemId=filesystemid,
- Marker=response["NextMarker"])
+ response = client.describe_tags(
+ FileSystemId=filesystemid, Marker=response["NextMarker"]
+ )
result.extend(response["Tags"])
return result
-def set_security_groups(mounttargetid,
- securitygroup,
- keyid=None,
- key=None,
- profile=None,
- region=None,
- **kwargs):
- '''
+def set_security_groups(
+ mounttargetid,
+ securitygroup,
+ keyid=None,
+ key=None,
+ profile=None,
+ region=None,
+ **kwargs
+):
+ """
Modifies the set of security groups in effect for a mount target
mounttargetid
@@ -522,8 +513,9 @@ def set_security_groups(mounttargetid,
.. code-block:: bash
salt 'my-minion' boto_efs.set_security_groups my-mount-target-id my-sec-group
- '''
+ """
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
- client.modify_mount_target_security_groups(MountTargetId=mounttargetid,
- SecurityGroups=securitygroup)
+ client.modify_mount_target_security_groups(
+ MountTargetId=mounttargetid, SecurityGroups=securitygroup
+ )
diff --git a/salt/modules/boto_elasticache.py b/salt/modules/boto_elasticache.py
index 8f9cdf8c7fc..f2008da77dc 100644
--- a/salt/modules/boto_elasticache.py
+++ b/salt/modules/boto_elasticache.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon Elasticache
.. versionadded:: 2014.7.0
@@ -40,9 +40,9 @@ Connection module for Amazon Elasticache
region: us-east-1
:depends: boto
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
from __future__ import absolute_import, print_function, unicode_literals
@@ -50,11 +50,12 @@ from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
-# Import Salt libs
-from salt.ext import six
-from salt.exceptions import SaltInvocationError
import salt.utils.odict as odict
import salt.utils.versions
+from salt.exceptions import SaltInvocationError
+
+# Import Salt libs
+from salt.ext import six
log = logging.getLogger(__name__)
@@ -63,35 +64,35 @@ try:
# pylint: disable=unused-import
import boto
import boto.elasticache
+
# pylint: enable=unused-import
import boto.utils
- logging.getLogger('boto').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def __virtual__():
- '''
+ """
Only load if boto libraries exist.
- '''
- has_boto_reqs = salt.utils.versions.check_boto_reqs(
- check_boto3=False
- )
+ """
+ has_boto_reqs = salt.utils.versions.check_boto_reqs(check_boto3=False)
if has_boto_reqs is True:
- __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__)
+ __utils__["boto.assign_funcs"](__name__, "elasticache", pack=__salt__)
return has_boto_reqs
def exists(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if a cache cluster exists.
CLI example::
salt myminion boto_elasticache.exists myelasticache
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -103,13 +104,13 @@ def exists(name, region=None, key=None, keyid=None, profile=None):
def group_exists(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if a replication group exists.
CLI example::
salt myminion boto_elasticache.group_exists myelasticache
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -120,73 +121,82 @@ def group_exists(name, region=None, key=None, keyid=None, profile=None):
return False
-def create_replication_group(name, primary_cluster_id, replication_group_description,
- wait=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def create_replication_group(
+ name,
+ primary_cluster_id,
+ replication_group_description,
+ wait=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create replication group.
CLI example::
salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return None
try:
- cc = conn.create_replication_group(name, primary_cluster_id,
- replication_group_description)
+ cc = conn.create_replication_group(
+ name, primary_cluster_id, replication_group_description
+ )
if not wait:
- log.info('Created cache cluster %s.', name)
+ log.info("Created cache cluster %s.", name)
return True
while True:
time.sleep(3)
config = describe_replication_group(name, region, key, keyid, profile)
if not config:
return True
- if config['status'] == 'available':
+ if config["status"] == "available":
return True
except boto.exception.BotoServerError as e:
- msg = 'Failed to create replication group {0}.'.format(name)
+ msg = "Failed to create replication group {0}.".format(name)
log.error(msg)
log.debug(e)
return {}
def delete_replication_group(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete an ElastiCache replication group.
CLI example::
salt myminion boto_elasticache.delete_replication_group my-replication-group \
region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return False
try:
conn.delete_replication_group(name)
- msg = 'Deleted ElastiCache replication group {0}.'.format(name)
+ msg = "Deleted ElastiCache replication group {0}.".format(name)
log.info(msg)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- msg = 'Failed to delete ElastiCache replication group {0}'.format(name)
+ msg = "Failed to delete ElastiCache replication group {0}".format(name)
log.error(msg)
return False
-def describe_replication_group(name, region=None, key=None, keyid=None,
- profile=None, parameter=None):
- '''
+def describe_replication_group(
+ name, region=None, key=None, keyid=None, profile=None, parameter=None
+):
+ """
Get replication group information.
CLI example::
salt myminion boto_elasticache.describe_replication_group mygroup
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
@@ -194,46 +204,52 @@ def describe_replication_group(name, region=None, key=None, keyid=None,
try:
cc = conn.describe_replication_groups(name)
except boto.exception.BotoServerError as e:
- msg = 'Failed to get config for cache cluster {0}.'.format(name)
+ msg = "Failed to get config for cache cluster {0}.".format(name)
log.error(msg)
log.debug(e)
return {}
ret = odict.OrderedDict()
- cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']
- cc = cc['ReplicationGroups'][0]
+ cc = cc["DescribeReplicationGroupsResponse"]["DescribeReplicationGroupsResult"]
+ cc = cc["ReplicationGroups"][0]
- attrs = ['status', 'description', 'primary_endpoint',
- 'member_clusters', 'replication_group_id',
- 'pending_modified_values', 'primary_cluster_id',
- 'node_groups']
+ attrs = [
+ "status",
+ "description",
+ "primary_endpoint",
+ "member_clusters",
+ "replication_group_id",
+ "pending_modified_values",
+ "primary_cluster_id",
+ "node_groups",
+ ]
for key, val in six.iteritems(cc):
_key = boto.utils.pythonize_name(key)
- if _key == 'status':
+ if _key == "status":
if val:
ret[_key] = val
else:
ret[_key] = None
- if _key == 'description':
+ if _key == "description":
if val:
ret[_key] = val
else:
ret[_key] = None
- if _key == 'replication_group_id':
+ if _key == "replication_group_id":
if val:
ret[_key] = val
else:
ret[_key] = None
- if _key == 'member_clusters':
+ if _key == "member_clusters":
if val:
ret[_key] = val
else:
ret[_key] = None
- if _key == 'node_groups':
+ if _key == "node_groups":
if val:
ret[_key] = val
else:
ret[_key] = None
- if _key == 'pending_modified_values':
+ if _key == "pending_modified_values":
if val:
ret[_key] = val
else:
@@ -242,105 +258,115 @@ def describe_replication_group(name, region=None, key=None, keyid=None,
def get_config(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get the configuration for a cache cluster.
CLI example::
salt myminion boto_elasticache.get_config myelasticache
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return None
try:
- cc = conn.describe_cache_clusters(name,
- show_cache_node_info=True)
+ cc = conn.describe_cache_clusters(name, show_cache_node_info=True)
except boto.exception.BotoServerError as e:
- msg = 'Failed to get config for cache cluster {0}.'.format(name)
+ msg = "Failed to get config for cache cluster {0}.".format(name)
log.error(msg)
log.debug(e)
return {}
- cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult']
- cc = cc['CacheClusters'][0]
+ cc = cc["DescribeCacheClustersResponse"]["DescribeCacheClustersResult"]
+ cc = cc["CacheClusters"][0]
ret = odict.OrderedDict()
- attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id',
- 'cache_security_groups', 'replication_group_id',
- 'auto_minor_version_upgrade', 'num_cache_nodes',
- 'preferred_availability_zone', 'security_groups',
- 'cache_subnet_group_name', 'engine_version', 'cache_node_type',
- 'notification_configuration', 'preferred_maintenance_window',
- 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes']
+ attrs = [
+ "engine",
+ "cache_parameter_group",
+ "cache_cluster_id",
+ "cache_security_groups",
+ "replication_group_id",
+ "auto_minor_version_upgrade",
+ "num_cache_nodes",
+ "preferred_availability_zone",
+ "security_groups",
+ "cache_subnet_group_name",
+ "engine_version",
+ "cache_node_type",
+ "notification_configuration",
+ "preferred_maintenance_window",
+ "configuration_endpoint",
+ "cache_cluster_status",
+ "cache_nodes",
+ ]
for key, val in six.iteritems(cc):
_key = boto.utils.pythonize_name(key)
if _key not in attrs:
continue
- if _key == 'cache_parameter_group':
+ if _key == "cache_parameter_group":
if val:
- ret[_key] = val['CacheParameterGroupName']
+ ret[_key] = val["CacheParameterGroupName"]
else:
ret[_key] = None
- elif _key == 'cache_nodes':
+ elif _key == "cache_nodes":
if val:
ret[_key] = [k for k in val]
else:
ret[_key] = []
- elif _key == 'cache_security_groups':
+ elif _key == "cache_security_groups":
if val:
- ret[_key] = [k['CacheSecurityGroupName'] for k in val]
+ ret[_key] = [k["CacheSecurityGroupName"] for k in val]
else:
ret[_key] = []
- elif _key == 'configuration_endpoint':
+ elif _key == "configuration_endpoint":
if val:
- ret['port'] = val['Port']
- ret['address'] = val['Address']
+ ret["port"] = val["Port"]
+ ret["address"] = val["Address"]
else:
- ret['port'] = None
- ret['address'] = None
- elif _key == 'notification_configuration':
+ ret["port"] = None
+ ret["address"] = None
+ elif _key == "notification_configuration":
if val:
- ret['notification_topic_arn'] = val['TopicArn']
+ ret["notification_topic_arn"] = val["TopicArn"]
else:
- ret['notification_topic_arn'] = None
+ ret["notification_topic_arn"] = None
else:
ret[_key] = val
return ret
def get_node_host(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get hostname from cache node
CLI example::
salt myminion boto_elasticache.get_node_host myelasticache
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return None
try:
- cc = conn.describe_cache_clusters(name,
- show_cache_node_info=True)
+ cc = conn.describe_cache_clusters(name, show_cache_node_info=True)
except boto.exception.BotoServerError as e:
- msg = 'Failed to get config for cache cluster {0}.'.format(name)
+ msg = "Failed to get config for cache cluster {0}.".format(name)
log.error(msg)
log.debug(e)
return {}
- cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult']
- host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address']
+ cc = cc["DescribeCacheClustersResponse"]["DescribeCacheClustersResult"]
+ host = cc["CacheClusters"][0]["CacheNodes"][0]["Endpoint"]["Address"]
return host
def get_group_host(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get hostname from replication cache group
CLI example::
salt myminion boto_elasticache.get_group_host myelasticachegroup
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
@@ -348,74 +374,82 @@ def get_group_host(name, region=None, key=None, keyid=None, profile=None):
try:
cc = conn.describe_replication_groups(name)
except boto.exception.BotoServerError as e:
- msg = 'Failed to get config for cache cluster {0}.'.format(name)
+ msg = "Failed to get config for cache cluster {0}.".format(name)
log.error(msg)
log.debug(e)
return {}
- cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']
- cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint']
- host = cc['Address']
+ cc = cc["DescribeReplicationGroupsResponse"]["DescribeReplicationGroupsResult"]
+ cc = cc["ReplicationGroups"][0]["NodeGroups"][0]["PrimaryEndpoint"]
+ host = cc["Address"]
return host
-def get_all_cache_subnet_groups(name=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def get_all_cache_subnet_groups(
+ name=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Return a list of all cache subnet groups with details
CLI example::
salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- marker = ''
+ marker = ""
groups = []
while marker is not None:
- ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name,
- marker=marker)
- trimmed = ret.get('DescribeCacheSubnetGroupsResponse',
- {}).get('DescribeCacheSubnetGroupsResult', {})
- groups += trimmed.get('CacheSubnetGroups', [])
- marker = trimmed.get('Marker', None)
+ ret = conn.describe_cache_subnet_groups(
+ cache_subnet_group_name=name, marker=marker
+ )
+ trimmed = ret.get("DescribeCacheSubnetGroupsResponse", {}).get(
+ "DescribeCacheSubnetGroupsResult", {}
+ )
+ groups += trimmed.get("CacheSubnetGroups", [])
+ marker = trimmed.get("Marker", None)
if not groups:
- log.debug('No ElastiCache subnet groups found.')
+ log.debug("No ElastiCache subnet groups found.")
return groups
except boto.exception.BotoServerError as e:
log.error(e)
return []
-def list_cache_subnet_groups(name=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def list_cache_subnet_groups(
+ name=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Return a list of all cache subnet group names
CLI example::
salt myminion boto_elasticache.list_subnet_groups region=us-east-1
- '''
- return [g['CacheSubnetGroupName'] for g in
- get_all_cache_subnet_groups(name, region, key, keyid, profile)]
+ """
+ return [
+ g["CacheSubnetGroupName"]
+ for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)
+ ]
-def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None):
- '''
+def subnet_group_exists(
+ name, tags=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Check to see if an ElastiCache subnet group exists.
CLI example::
salt myminion boto_elasticache.subnet_group_exists my-param-group \
region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return False
try:
ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name)
if not ec:
- msg = ('ElastiCache subnet group does not exist in region {0}'.format(region))
+ msg = "ElastiCache subnet group does not exist in region {0}".format(region)
log.debug(msg)
return False
return True
@@ -424,9 +458,18 @@ def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, prof
return False
-def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_subnet_group(
+ name,
+ description,
+ subnet_ids=None,
+ subnet_names=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create an ElastiCache subnet group
CLI example to create an ElastiCache subnet group::
@@ -434,10 +477,11 @@ def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, t
salt myminion boto_elasticache.create_subnet_group my-subnet-group \
"group description" subnet_ids='[subnet-12345678, subnet-87654321]' \
region=us-east-1
- '''
+ """
if not _exactly_one((subnet_ids, subnet_names)):
- raise SaltInvocationError("Exactly one of either 'subnet_ids' or "
- "'subnet_names' must be provided.")
+ raise SaltInvocationError(
+ "Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided."
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return False
@@ -446,279 +490,316 @@ def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, t
if subnet_names:
subnet_ids = []
for n in subnet_names:
- r = __salt__['boto_vpc.get_resource_id']('subnet', n,
- region=region, key=key,
- keyid=keyid, profile=profile)
- if 'id' not in r:
- log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name)
+ r = __salt__["boto_vpc.get_resource_id"](
+ "subnet", n, region=region, key=key, keyid=keyid, profile=profile
+ )
+ if "id" not in r:
+ log.error("Couldn't resolve subnet name %s to an ID.", subnet_name)
return False
- subnet_ids += [r['id']]
+ subnet_ids += [r["id"]]
try:
ec = conn.create_cache_subnet_group(name, description, subnet_ids)
if not ec:
- msg = 'Failed to create ElastiCache subnet group {0}'.format(name)
+ msg = "Failed to create ElastiCache subnet group {0}".format(name)
log.error(msg)
return False
- log.info('Created ElastiCache subnet group %s', name)
+ log.info("Created ElastiCache subnet group %s", name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- msg = 'Failed to create ElastiCache subnet group {0}'.format(name)
+ msg = "Failed to create ElastiCache subnet group {0}".format(name)
log.error(msg)
return False
-def get_cache_subnet_group(name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None):
+ """
Get information about a cache subnet group.
CLI example::
salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
csg = conn.describe_cache_subnet_groups(name)
- csg = csg['DescribeCacheSubnetGroupsResponse']
- csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0]
+ csg = csg["DescribeCacheSubnetGroupsResponse"]
+ csg = csg["DescribeCacheSubnetGroupsResult"]["CacheSubnetGroups"][0]
except boto.exception.BotoServerError as e:
- msg = 'Failed to get cache subnet group {0}.'.format(name)
+ msg = "Failed to get cache subnet group {0}.".format(name)
log.error(msg)
log.debug(e)
return False
except (IndexError, TypeError, KeyError):
- msg = 'Failed to get cache subnet group {0} (2).'.format(name)
+ msg = "Failed to get cache subnet group {0} (2).".format(name)
log.error(msg)
return False
ret = {}
for key, val in six.iteritems(csg):
- if key == 'CacheSubnetGroupName':
- ret['cache_subnet_group_name'] = val
- elif key == 'CacheSubnetGroupDescription':
- ret['cache_subnet_group_description'] = val
- elif key == 'VpcId':
- ret['vpc_id'] = val
- elif key == 'Subnets':
- ret['subnets'] = []
+ if key == "CacheSubnetGroupName":
+ ret["cache_subnet_group_name"] = val
+ elif key == "CacheSubnetGroupDescription":
+ ret["cache_subnet_group_description"] = val
+ elif key == "VpcId":
+ ret["vpc_id"] = val
+ elif key == "Subnets":
+ ret["subnets"] = []
for subnet in val:
_subnet = {}
- _subnet['subnet_id'] = subnet['SubnetIdentifier']
- _az = subnet['SubnetAvailabilityZone']['Name']
- _subnet['subnet_availability_zone'] = _az
- ret['subnets'].append(_subnet)
+ _subnet["subnet_id"] = subnet["SubnetIdentifier"]
+ _az = subnet["SubnetAvailabilityZone"]["Name"]
+ _subnet["subnet_availability_zone"] = _az
+ ret["subnets"].append(_subnet)
else:
ret[key] = val
return ret
def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete an ElastiCache subnet group.
CLI example::
salt myminion boto_elasticache.delete_subnet_group my-subnet-group \
region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return False
try:
conn.delete_cache_subnet_group(name)
- msg = 'Deleted ElastiCache subnet group {0}.'.format(name)
+ msg = "Deleted ElastiCache subnet group {0}.".format(name)
log.info(msg)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- msg = 'Failed to delete ElastiCache subnet group {0}'.format(name)
+ msg = "Failed to delete ElastiCache subnet group {0}".format(name)
log.error(msg)
return False
-def create(name, num_cache_nodes=None, engine=None, cache_node_type=None,
- replication_group_id=None, engine_version=None,
- cache_parameter_group_name=None, cache_subnet_group_name=None,
- cache_security_group_names=None, security_group_ids=None,
- snapshot_arns=None, preferred_availability_zone=None,
- preferred_maintenance_window=None, port=None,
- notification_topic_arn=None, auto_minor_version_upgrade=None,
- wait=None, region=None, key=None, keyid=None, profile=None):
- '''
+def create(
+ name,
+ num_cache_nodes=None,
+ engine=None,
+ cache_node_type=None,
+ replication_group_id=None,
+ engine_version=None,
+ cache_parameter_group_name=None,
+ cache_subnet_group_name=None,
+ cache_security_group_names=None,
+ security_group_ids=None,
+ snapshot_arns=None,
+ preferred_availability_zone=None,
+ preferred_maintenance_window=None,
+ port=None,
+ notification_topic_arn=None,
+ auto_minor_version_upgrade=None,
+ wait=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create a cache cluster.
CLI example::
salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro
cache_security_group_names='["myelasticachesg"]'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.create_cache_cluster(
- name, num_cache_nodes, cache_node_type, engine,
- replication_group_id, engine_version, cache_parameter_group_name,
- cache_subnet_group_name, cache_security_group_names,
- security_group_ids, snapshot_arns, preferred_availability_zone,
- preferred_maintenance_window, port, notification_topic_arn,
- auto_minor_version_upgrade)
+ name,
+ num_cache_nodes,
+ cache_node_type,
+ engine,
+ replication_group_id,
+ engine_version,
+ cache_parameter_group_name,
+ cache_subnet_group_name,
+ cache_security_group_names,
+ security_group_ids,
+ snapshot_arns,
+ preferred_availability_zone,
+ preferred_maintenance_window,
+ port,
+ notification_topic_arn,
+ auto_minor_version_upgrade,
+ )
if not wait:
- log.info('Created cache cluster %s.', name)
+ log.info("Created cache cluster %s.", name)
return True
while True:
time.sleep(3)
config = get_config(name, region, key, keyid, profile)
if not config:
return True
- if config['cache_cluster_status'] == 'available':
+ if config["cache_cluster_status"] == "available":
return True
- log.info('Created cache cluster %s.', name)
+ log.info("Created cache cluster %s.", name)
except boto.exception.BotoServerError as e:
- msg = 'Failed to create cache cluster {0}.'.format(name)
+ msg = "Failed to create cache cluster {0}.".format(name)
log.error(msg)
log.debug(e)
return False
def delete(name, wait=False, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete a cache cluster.
CLI example::
salt myminion boto_elasticache.delete myelasticache
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.delete_cache_cluster(name)
if not wait:
- log.info('Deleted cache cluster %s.', name)
+ log.info("Deleted cache cluster %s.", name)
return True
while True:
config = get_config(name, region, key, keyid, profile)
if not config:
return True
- if config['cache_cluster_status'] == 'deleting':
+ if config["cache_cluster_status"] == "deleting":
return True
time.sleep(2)
- log.info('Deleted cache cluster %s.', name)
+ log.info("Deleted cache cluster %s.", name)
return True
except boto.exception.BotoServerError as e:
- msg = 'Failed to delete cache cluster {0}.'.format(name)
+ msg = "Failed to delete cache cluster {0}.".format(name)
log.error(msg)
log.debug(e)
return False
-def create_cache_security_group(name, description, region=None, key=None,
- keyid=None, profile=None):
- '''
+def create_cache_security_group(
+ name, description, region=None, key=None, keyid=None, profile=None
+):
+ """
Create a cache security group.
CLI example::
salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
created = conn.create_cache_security_group(name, description)
if created:
- log.info('Created cache security group %s.', name)
+ log.info("Created cache security group %s.", name)
return True
else:
- msg = 'Failed to create cache security group {0}.'.format(name)
+ msg = "Failed to create cache security group {0}.".format(name)
log.error(msg)
return False
-def delete_cache_security_group(name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None):
+ """
Delete a cache security group.
CLI example::
salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
deleted = conn.delete_cache_security_group(name)
if deleted:
- log.info('Deleted cache security group %s.', name)
+ log.info("Deleted cache security group %s.", name)
return True
else:
- msg = 'Failed to delete cache security group {0}.'.format(name)
+ msg = "Failed to delete cache security group {0}.".format(name)
log.error(msg)
return False
-def authorize_cache_security_group_ingress(name, ec2_security_group_name,
- ec2_security_group_owner_id,
- region=None, key=None, keyid=None,
- profile=None):
- '''
+def authorize_cache_security_group_ingress(
+ name,
+ ec2_security_group_name,
+ ec2_security_group_owner_id,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Authorize network ingress from an ec2 security group to a cache security
group.
CLI example::
salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
added = conn.authorize_cache_security_group_ingress(
- name, ec2_security_group_name, ec2_security_group_owner_id)
+ name, ec2_security_group_name, ec2_security_group_owner_id
+ )
if added:
- msg = 'Added {0} to cache security group {1}.'
+ msg = "Added {0} to cache security group {1}."
msg = msg.format(name, ec2_security_group_name)
log.info(msg)
return True
else:
- msg = 'Failed to add {0} to cache security group {1}.'
+ msg = "Failed to add {0} to cache security group {1}."
msg = msg.format(name, ec2_security_group_name)
log.error(msg)
return False
except boto.exception.EC2ResponseError as e:
log.debug(e)
- msg = 'Failed to add {0} to cache security group {1}.'
+ msg = "Failed to add {0} to cache security group {1}."
msg = msg.format(name, ec2_security_group_name)
log.error(msg)
return False
-def revoke_cache_security_group_ingress(name, ec2_security_group_name,
- ec2_security_group_owner_id,
- region=None, key=None, keyid=None,
- profile=None):
- '''
+def revoke_cache_security_group_ingress(
+ name,
+ ec2_security_group_name,
+ ec2_security_group_owner_id,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Revoke network ingress from an ec2 security group to a cache security
group.
CLI example::
salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
removed = conn.revoke_cache_security_group_ingress(
- name, ec2_security_group_name, ec2_security_group_owner_id)
+ name, ec2_security_group_name, ec2_security_group_owner_id
+ )
if removed:
- msg = 'Removed {0} from cache security group {1}.'
+ msg = "Removed {0} from cache security group {1}."
msg = msg.format(name, ec2_security_group_name)
log.info(msg)
return True
else:
- msg = 'Failed to remove {0} from cache security group {1}.'
+ msg = "Failed to remove {0} from cache security group {1}."
msg = msg.format(name, ec2_security_group_name)
log.error(msg)
return False
except boto.exception.EC2ResponseError as e:
log.debug(e)
- msg = 'Failed to remove {0} from cache security group {1}.'
+ msg = "Failed to remove {0} from cache security group {1}."
msg = msg.format(name, ec2_security_group_name)
log.error(msg)
return False
diff --git a/salt/modules/boto_elasticsearch_domain.py b/salt/modules/boto_elasticsearch_domain.py
index a2ef782231c..c07cf5685df 100644
--- a/salt/modules/boto_elasticsearch_domain.py
+++ b/salt/modules/boto_elasticsearch_domain.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon Elasticsearch Service
.. versionadded:: 2016.11.0
@@ -70,34 +70,38 @@ Connection module for Amazon Elasticsearch Service
:depends: boto3
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
-# Import Salt libs
-from salt.ext import six
import salt.utils.compat
import salt.utils.json
import salt.utils.versions
from salt.exceptions import SaltInvocationError
+# Import Salt libs
+from salt.ext import six
+
log = logging.getLogger(__name__)
# Import third party libs
# pylint: disable=import-error
try:
- #pylint: disable=unused-import
+ # pylint: disable=unused-import
import boto
import boto3
- #pylint: enable=unused-import
+
+ # pylint: enable=unused-import
from botocore.exceptions import ClientError
- logging.getLogger('boto').setLevel(logging.CRITICAL)
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto").setLevel(logging.CRITICAL)
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@@ -105,28 +109,24 @@ except ImportError:
def __virtual__():
- '''
+ """
Only load if boto libraries exist and if boto libraries are greater than
a given version.
- '''
+ """
# the boto_lambda execution module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
- return salt.utils.versions.check_boto_reqs(
- boto_ver='2.8.0',
- boto3_ver='1.4.0'
- )
+ return salt.utils.versions.check_boto_reqs(boto_ver="2.8.0", boto3_ver="1.4.0")
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
- __utils__['boto3.assign_funcs'](__name__, 'es')
+ __utils__["boto3.assign_funcs"](__name__, "es")
-def exists(DomainName,
- region=None, key=None, keyid=None, profile=None):
- '''
+def exists(DomainName, region=None, key=None, keyid=None, profile=None):
+ """
Given a domain name, check to see if the given domain exists.
Returns True if the given domain exists and returns False if the given
@@ -138,21 +138,20 @@ def exists(DomainName,
salt myminion boto_elasticsearch_domain.exists mydomain
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
domain = conn.describe_elasticsearch_domain(DomainName=DomainName)
- return {'exists': True}
+ return {"exists": True}
except ClientError as e:
- if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
- return {'exists': False}
- return {'error': __utils__['boto3.get_error'](e)}
+ if e.response.get("Error", {}).get("Code") == "ResourceNotFoundException":
+ return {"exists": False}
+ return {"error": __utils__["boto3.get_error"](e)}
-def status(DomainName,
- region=None, key=None, keyid=None, profile=None):
- '''
+def status(DomainName, region=None, key=None, keyid=None, profile=None):
+ """
Given a domain name describe its status.
Returns a dictionary of interesting properties.
@@ -163,27 +162,36 @@ def status(DomainName,
salt myminion boto_elasticsearch_domain.status mydomain
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
domain = conn.describe_elasticsearch_domain(DomainName=DomainName)
- if domain and 'DomainStatus' in domain:
- domain = domain.get('DomainStatus', {})
- keys = ('Endpoint', 'Created', 'Deleted',
- 'DomainName', 'DomainId', 'EBSOptions', 'SnapshotOptions',
- 'AccessPolicies', 'Processing', 'AdvancedOptions', 'ARN',
- 'ElasticsearchVersion')
- return {'domain': dict([(k, domain.get(k)) for k in keys if k in domain])}
+ if domain and "DomainStatus" in domain:
+ domain = domain.get("DomainStatus", {})
+ keys = (
+ "Endpoint",
+ "Created",
+ "Deleted",
+ "DomainName",
+ "DomainId",
+ "EBSOptions",
+ "SnapshotOptions",
+ "AccessPolicies",
+ "Processing",
+ "AdvancedOptions",
+ "ARN",
+ "ElasticsearchVersion",
+ )
+ return {"domain": dict([(k, domain.get(k)) for k in keys if k in domain])}
else:
- return {'domain': None}
+ return {"domain": None}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def describe(DomainName,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe(DomainName, region=None, key=None, keyid=None, profile=None):
+ """
Given a domain name describe its properties.
Returns a dictionary of interesting properties.
@@ -194,27 +202,45 @@ def describe(DomainName,
salt myminion boto_elasticsearch_domain.describe mydomain
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
domain = conn.describe_elasticsearch_domain_config(DomainName=DomainName)
- if domain and 'DomainConfig' in domain:
- domain = domain['DomainConfig']
- keys = ('ElasticsearchClusterConfig', 'EBSOptions', 'AccessPolicies',
- 'SnapshotOptions', 'AdvancedOptions')
- return {'domain': dict([(k, domain.get(k, {}).get('Options')) for k in keys if k in domain])}
+ if domain and "DomainConfig" in domain:
+ domain = domain["DomainConfig"]
+ keys = (
+ "ElasticsearchClusterConfig",
+ "EBSOptions",
+ "AccessPolicies",
+ "SnapshotOptions",
+ "AdvancedOptions",
+ )
+ return {
+ "domain": dict(
+ [(k, domain.get(k, {}).get("Options")) for k in keys if k in domain]
+ )
+ }
else:
- return {'domain': None}
+ return {"domain": None}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def create(DomainName, ElasticsearchClusterConfig=None, EBSOptions=None,
- AccessPolicies=None, SnapshotOptions=None, AdvancedOptions=None,
- region=None, key=None, keyid=None, profile=None,
- ElasticsearchVersion=None):
- '''
+def create(
+ DomainName,
+ ElasticsearchClusterConfig=None,
+ EBSOptions=None,
+ AccessPolicies=None,
+ SnapshotOptions=None,
+ AdvancedOptions=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ ElasticsearchVersion=None,
+):
+ """
Given a valid config, create a domain.
Returns {created: true} if the domain was created and returns
@@ -234,38 +260,48 @@ def create(DomainName, ElasticsearchClusterConfig=None, EBSOptions=None,
"Condition": {"IpAddress": {"aws:SourceIp": ["127.0.0.1"]}}}]} \\
{"AutomatedSnapshotStartHour": 0} \\
{"rest.action.multi.allow_explicit_index": "true"}
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
- for k in ('ElasticsearchClusterConfig', 'EBSOptions',
- 'AccessPolicies', 'SnapshotOptions', 'AdvancedOptions',
- 'ElasticsearchVersion'):
+ for k in (
+ "ElasticsearchClusterConfig",
+ "EBSOptions",
+ "AccessPolicies",
+ "SnapshotOptions",
+ "AdvancedOptions",
+ "ElasticsearchVersion",
+ ):
if locals()[k] is not None:
val = locals()[k]
if isinstance(val, six.string_types):
try:
val = salt.utils.json.loads(val)
except ValueError as e:
- return {'updated': False, 'error': 'Error parsing {0}: {1}'.format(k, e.message)}
+ return {
+ "updated": False,
+ "error": "Error parsing {0}: {1}".format(k, e.message),
+ }
kwargs[k] = val
- if 'AccessPolicies' in kwargs:
- kwargs['AccessPolicies'] = salt.utils.json.dumps(kwargs['AccessPolicies'])
- if 'ElasticsearchVersion' in kwargs:
- kwargs['ElasticsearchVersion'] = six.text_type(kwargs['ElasticsearchVersion'])
+ if "AccessPolicies" in kwargs:
+ kwargs["AccessPolicies"] = salt.utils.json.dumps(kwargs["AccessPolicies"])
+ if "ElasticsearchVersion" in kwargs:
+ kwargs["ElasticsearchVersion"] = six.text_type(
+ kwargs["ElasticsearchVersion"]
+ )
domain = conn.create_elasticsearch_domain(DomainName=DomainName, **kwargs)
- if domain and 'DomainStatus' in domain:
- return {'created': True}
+ if domain and "DomainStatus" in domain:
+ return {"created": True}
else:
- log.warning('Domain was not created')
- return {'created': False}
+ log.warning("Domain was not created")
+ return {"created": False}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
def delete(DomainName, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Given a domain name, delete it.
Returns {deleted: true} if the domain was deleted and returns
@@ -277,20 +313,29 @@ def delete(DomainName, region=None, key=None, keyid=None, profile=None):
salt myminion boto_elasticsearch_domain.delete mydomain
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_elasticsearch_domain(DomainName=DomainName)
- return {'deleted': True}
+ return {"deleted": True}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def update(DomainName, ElasticsearchClusterConfig=None, EBSOptions=None,
- AccessPolicies=None, SnapshotOptions=None, AdvancedOptions=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def update(
+ DomainName,
+ ElasticsearchClusterConfig=None,
+ EBSOptions=None,
+ AccessPolicies=None,
+ SnapshotOptions=None,
+ AdvancedOptions=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Update the named domain to the configuration.
Returns {updated: true} if the domain was updated and returns
@@ -311,35 +356,46 @@ def update(DomainName, ElasticsearchClusterConfig=None, EBSOptions=None,
{"AutomatedSnapshotStartHour": 0} \\
{"rest.action.multi.allow_explicit_index": "true"}
- '''
+ """
call_args = {}
- for k in ('ElasticsearchClusterConfig', 'EBSOptions',
- 'AccessPolicies', 'SnapshotOptions', 'AdvancedOptions'):
+ for k in (
+ "ElasticsearchClusterConfig",
+ "EBSOptions",
+ "AccessPolicies",
+ "SnapshotOptions",
+ "AdvancedOptions",
+ ):
if locals()[k] is not None:
val = locals()[k]
if isinstance(val, six.string_types):
try:
val = salt.utils.json.loads(val)
except ValueError as e:
- return {'updated': False, 'error': 'Error parsing {0}: {1}'.format(k, e.message)}
+ return {
+ "updated": False,
+ "error": "Error parsing {0}: {1}".format(k, e.message),
+ }
call_args[k] = val
- if 'AccessPolicies' in call_args:
- call_args['AccessPolicies'] = salt.utils.json.dumps(call_args['AccessPolicies'])
+ if "AccessPolicies" in call_args:
+ call_args["AccessPolicies"] = salt.utils.json.dumps(call_args["AccessPolicies"])
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- domain = conn.update_elasticsearch_domain_config(DomainName=DomainName, **call_args)
- if not domain or 'DomainConfig' not in domain:
- log.warning('Domain was not updated')
- return {'updated': False}
- return {'updated': True}
+ domain = conn.update_elasticsearch_domain_config(
+ DomainName=DomainName, **call_args
+ )
+ if not domain or "DomainConfig" not in domain:
+ log.warning("Domain was not updated")
+ return {"updated": False}
+ return {"updated": True}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def add_tags(DomainName=None, ARN=None,
- region=None, key=None, keyid=None, profile=None, **kwargs):
- '''
+def add_tags(
+ DomainName=None, ARN=None, region=None, key=None, keyid=None, profile=None, **kwargs
+):
+ """
Add tags to a domain
Returns {tagged: true} if the domain was tagged and returns
@@ -351,38 +407,45 @@ def add_tags(DomainName=None, ARN=None,
salt myminion boto_elasticsearch_domain.add_tags mydomain tag_a=tag_value tag_b=tag_value
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tagslist = []
for k, v in six.iteritems(kwargs):
- if six.text_type(k).startswith('__'):
+ if six.text_type(k).startswith("__"):
continue
- tagslist.append({'Key': six.text_type(k), 'Value': six.text_type(v)})
+ tagslist.append({"Key": six.text_type(k), "Value": six.text_type(v)})
if ARN is None:
if DomainName is None:
- raise SaltInvocationError('One (but not both) of ARN or '
- 'domain must be specified.')
- domaindata = status(DomainName=DomainName,
- region=region, key=key, keyid=keyid,
- profile=profile)
- if not domaindata or 'domain' not in domaindata:
- log.warning('Domain tags not updated')
- return {'tagged': False}
- ARN = domaindata.get('domain', {}).get('ARN')
+ raise SaltInvocationError(
+ "One (but not both) of ARN or " "domain must be specified."
+ )
+ domaindata = status(
+ DomainName=DomainName,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ if not domaindata or "domain" not in domaindata:
+ log.warning("Domain tags not updated")
+ return {"tagged": False}
+ ARN = domaindata.get("domain", {}).get("ARN")
elif DomainName is not None:
- raise SaltInvocationError('One (but not both) of ARN or '
- 'domain must be specified.')
+ raise SaltInvocationError(
+ "One (but not both) of ARN or " "domain must be specified."
+ )
conn.add_tags(ARN=ARN, TagList=tagslist)
- return {'tagged': True}
+ return {"tagged": True}
except ClientError as e:
- return {'tagged': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"tagged": False, "error": __utils__["boto3.get_error"](e)}
-def remove_tags(TagKeys, DomainName=None, ARN=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def remove_tags(
+ TagKeys, DomainName=None, ARN=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Remove tags from a trail
Returns {tagged: true} if the trail was tagged and returns
@@ -394,34 +457,40 @@ def remove_tags(TagKeys, DomainName=None, ARN=None,
salt myminion boto_cloudtrail.remove_tags my_trail tag_a=tag_value tag_b=tag_value
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if ARN is None:
if DomainName is None:
- raise SaltInvocationError('One (but not both) of ARN or '
- 'domain must be specified.')
- domaindata = status(DomainName=DomainName,
- region=region, key=key, keyid=keyid,
- profile=profile)
- if not domaindata or 'domain' not in domaindata:
- log.warning('Domain tags not updated')
- return {'tagged': False}
- ARN = domaindata.get('domain', {}).get('ARN')
+ raise SaltInvocationError(
+ "One (but not both) of ARN or " "domain must be specified."
+ )
+ domaindata = status(
+ DomainName=DomainName,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ if not domaindata or "domain" not in domaindata:
+ log.warning("Domain tags not updated")
+ return {"tagged": False}
+ ARN = domaindata.get("domain", {}).get("ARN")
elif DomainName is not None:
- raise SaltInvocationError('One (but not both) of ARN or '
- 'domain must be specified.')
- conn.remove_tags(ARN=domaindata.get('domain', {}).get('ARN'),
- TagKeys=TagKeys)
- return {'tagged': True}
+ raise SaltInvocationError(
+ "One (but not both) of ARN or " "domain must be specified."
+ )
+ conn.remove_tags(ARN=domaindata.get("domain", {}).get("ARN"), TagKeys=TagKeys)
+ return {"tagged": True}
except ClientError as e:
- return {'tagged': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"tagged": False, "error": __utils__["boto3.get_error"](e)}
-def list_tags(DomainName=None, ARN=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def list_tags(
+ DomainName=None, ARN=None, region=None, key=None, keyid=None, profile=None
+):
+ """
List tags of a trail
Returns:
@@ -435,30 +504,36 @@ def list_tags(DomainName=None, ARN=None,
salt myminion boto_cloudtrail.list_tags my_trail
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if ARN is None:
if DomainName is None:
- raise SaltInvocationError('One (but not both) of ARN or '
- 'domain must be specified.')
- domaindata = status(DomainName=DomainName,
- region=region, key=key, keyid=keyid,
- profile=profile)
- if not domaindata or 'domain' not in domaindata:
- log.warning('Domain tags not updated')
- return {'tagged': False}
- ARN = domaindata.get('domain', {}).get('ARN')
+ raise SaltInvocationError(
+ "One (but not both) of ARN or " "domain must be specified."
+ )
+ domaindata = status(
+ DomainName=DomainName,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ if not domaindata or "domain" not in domaindata:
+ log.warning("Domain tags not updated")
+ return {"tagged": False}
+ ARN = domaindata.get("domain", {}).get("ARN")
elif DomainName is not None:
- raise SaltInvocationError('One (but not both) of ARN or '
- 'domain must be specified.')
+ raise SaltInvocationError(
+ "One (but not both) of ARN or " "domain must be specified."
+ )
ret = conn.list_tags(ARN=ARN)
log.warning(ret)
- tlist = ret.get('TagList', [])
+ tlist = ret.get("TagList", [])
tagdict = {}
for tag in tlist:
- tagdict[tag.get('Key')] = tag.get('Value')
- return {'tags': tagdict}
+ tagdict[tag.get("Key")] = tag.get("Value")
+ return {"tags": tagdict}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
diff --git a/salt/modules/boto_elb.py b/salt/modules/boto_elb.py
index 66455a3c0b6..b6b08e78bd9 100644
--- a/salt/modules/boto_elb.py
+++ b/salt/modules/boto_elb.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon ELB
.. versionadded:: 2014.7.0
@@ -40,7 +40,7 @@ Connection module for Amazon ELB
region: us-east-1
:depends: boto >= 2.33.0
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
@@ -50,8 +50,6 @@ from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
-log = logging.getLogger(__name__)
-
# Import Salt libs
import salt.utils.json
import salt.utils.odict as odict
@@ -59,6 +57,10 @@ import salt.utils.versions
# Import third party libs
from salt.ext import six
+
+log = logging.getLogger(__name__)
+
+
try:
import boto
import boto.ec2 # pylint: enable=unused-import
@@ -67,28 +69,28 @@ try:
from boto.ec2.elb.attributes import ConnectionDrainingAttribute
from boto.ec2.elb.attributes import ConnectionSettingAttribute
from boto.ec2.elb.attributes import CrossZoneLoadBalancingAttribute
- logging.getLogger('boto').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def __virtual__():
- '''
+ """
Only load if boto libraries exist.
- '''
+ """
# connection settings were added in 2.33.0
has_boto_reqs = salt.utils.versions.check_boto_reqs(
- boto_ver='2.33.0',
- check_boto3=False
+ boto_ver="2.33.0", check_boto3=False
)
if has_boto_reqs is True:
- __utils__['boto.assign_funcs'](__name__, 'elb', module='ec2.elb', pack=__salt__)
+ __utils__["boto.assign_funcs"](__name__, "elb", module="ec2.elb", pack=__salt__)
return has_boto_reqs
def exists(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if an ELB exists.
CLI example:
@@ -96,7 +98,7 @@ def exists(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_elb.exists myelb region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -104,7 +106,7 @@ def exists(name, region=None, key=None, keyid=None, profile=None):
if elb:
return True
else:
- log.debug('The load balancer does not exist in region %s', region)
+ log.debug("The load balancer does not exist in region %s", region)
return False
except boto.exception.BotoServerError as error:
log.warning(error)
@@ -112,7 +114,7 @@ def exists(name, region=None, key=None, keyid=None, profile=None):
def get_all_elbs(region=None, key=None, keyid=None, profile=None):
- '''
+ """
Return all load balancers associated with an account
CLI example:
@@ -120,7 +122,7 @@ def get_all_elbs(region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_elb.get_all_elbs region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -131,7 +133,7 @@ def get_all_elbs(region=None, key=None, keyid=None, profile=None):
def list_elbs(region=None, key=None, keyid=None, profile=None):
- '''
+ """
Return names of all load balancers associated with an account
CLI example:
@@ -139,14 +141,16 @@ def list_elbs(region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_elb.list_elbs region=us-east-1
- '''
+ """
- return [e.name for e in get_all_elbs(region=region, key=key, keyid=keyid,
- profile=profile)]
+ return [
+ e.name
+ for e in get_all_elbs(region=region, key=key, keyid=keyid, profile=profile)
+ ]
def get_elb_config(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get an ELB configuration.
CLI example:
@@ -154,7 +158,7 @@ def get_elb_config(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_elb.exists myelb region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
@@ -163,55 +167,55 @@ def get_elb_config(name, region=None, key=None, keyid=None, profile=None):
lb = conn.get_all_load_balancers(load_balancer_names=[name])
lb = lb[0]
ret = {}
- ret['availability_zones'] = lb.availability_zones
+ ret["availability_zones"] = lb.availability_zones
listeners = []
for _listener in lb.listeners:
listener_dict = {}
- listener_dict['elb_port'] = _listener.load_balancer_port
- listener_dict['elb_protocol'] = _listener.protocol
- listener_dict['instance_port'] = _listener.instance_port
- listener_dict['instance_protocol'] = _listener.instance_protocol
- listener_dict['policies'] = _listener.policy_names
+ listener_dict["elb_port"] = _listener.load_balancer_port
+ listener_dict["elb_protocol"] = _listener.protocol
+ listener_dict["instance_port"] = _listener.instance_port
+ listener_dict["instance_protocol"] = _listener.instance_protocol
+ listener_dict["policies"] = _listener.policy_names
if _listener.ssl_certificate_id:
- listener_dict['certificate'] = _listener.ssl_certificate_id
+ listener_dict["certificate"] = _listener.ssl_certificate_id
listeners.append(listener_dict)
- ret['listeners'] = listeners
+ ret["listeners"] = listeners
backends = []
for _backend in lb.backends:
bs_dict = {}
- bs_dict['instance_port'] = _backend.instance_port
- bs_dict['policies'] = [p.policy_name for p in _backend.policies]
+ bs_dict["instance_port"] = _backend.instance_port
+ bs_dict["policies"] = [p.policy_name for p in _backend.policies]
backends.append(bs_dict)
- ret['backends'] = backends
- ret['subnets'] = lb.subnets
- ret['security_groups'] = lb.security_groups
- ret['scheme'] = lb.scheme
- ret['dns_name'] = lb.dns_name
- ret['tags'] = _get_all_tags(conn, name)
+ ret["backends"] = backends
+ ret["subnets"] = lb.subnets
+ ret["security_groups"] = lb.security_groups
+ ret["scheme"] = lb.scheme
+ ret["dns_name"] = lb.dns_name
+ ret["tags"] = _get_all_tags(conn, name)
lb_policy_lists = [
lb.policies.app_cookie_stickiness_policies,
lb.policies.lb_cookie_stickiness_policies,
- lb.policies.other_policies
- ]
+ lb.policies.other_policies,
+ ]
policies = []
for policy_list in lb_policy_lists:
policies += [p.policy_name for p in policy_list]
- ret['policies'] = policies
+ ret["policies"] = policies
return ret
except boto.exception.BotoServerError as error:
- if error.error_code == 'Throttling':
- log.debug('Throttled by AWS API, will retry in 5 seconds.')
+ if error.error_code == "Throttling":
+ log.debug("Throttled by AWS API, will retry in 5 seconds.")
time.sleep(5)
retries -= 1
continue
- log.error('Error fetching config for ELB %s: %s', name, error.message)
+ log.error("Error fetching config for ELB %s: %s", name, error.message)
log.error(error)
return {}
return {}
def listener_dict_to_tuple(listener):
- '''
+ """
Convert an ELB listener dict into a listener tuple used by certain parts of
the AWS ELB API.
@@ -220,24 +224,36 @@ def listener_dict_to_tuple(listener):
.. code-block:: bash
salt myminion boto_elb.listener_dict_to_tuple '{"elb_port":80,"instance_port":80,"elb_protocol":"HTTP"}'
- '''
+ """
# We define all listeners as complex listeners.
- if 'instance_protocol' not in listener:
- instance_protocol = listener['elb_protocol'].upper()
+ if "instance_protocol" not in listener:
+ instance_protocol = listener["elb_protocol"].upper()
else:
- instance_protocol = listener['instance_protocol'].upper()
- listener_tuple = [listener['elb_port'], listener['instance_port'],
- listener['elb_protocol'], instance_protocol]
- if 'certificate' in listener:
- listener_tuple.append(listener['certificate'])
+ instance_protocol = listener["instance_protocol"].upper()
+ listener_tuple = [
+ listener["elb_port"],
+ listener["instance_port"],
+ listener["elb_protocol"],
+ instance_protocol,
+ ]
+ if "certificate" in listener:
+ listener_tuple.append(listener["certificate"])
return tuple(listener_tuple)
-def create(name, availability_zones, listeners, subnets=None,
- security_groups=None, scheme='internet-facing',
- region=None, key=None, keyid=None,
- profile=None):
- '''
+def create(
+ name,
+ availability_zones,
+ listeners,
+ subnets=None,
+ security_groups=None,
+ scheme="internet-facing",
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create an ELB
CLI example to create an ELB:
@@ -245,7 +261,7 @@ def create(name, availability_zones, listeners, subnets=None,
.. code-block:: bash
salt myminion boto_elb.create myelb '["us-east-1a", "us-east-1e"]' '{"elb_port": 443, "elb_protocol": "HTTPS", ...}' region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if exists(name, region, key, keyid, profile):
return True
@@ -260,24 +276,33 @@ def create(name, availability_zones, listeners, subnets=None,
_complex_listeners.append(listener_dict_to_tuple(listener))
try:
- lb = conn.create_load_balancer(name=name, zones=availability_zones, subnets=subnets,
- security_groups=security_groups, scheme=scheme,
- complex_listeners=_complex_listeners)
+ lb = conn.create_load_balancer(
+ name=name,
+ zones=availability_zones,
+ subnets=subnets,
+ security_groups=security_groups,
+ scheme=scheme,
+ complex_listeners=_complex_listeners,
+ )
if lb:
- log.info('Created ELB %s', name)
+ log.info("Created ELB %s", name)
return True
else:
- log.error('Failed to create ELB %s', name)
+ log.error("Failed to create ELB %s", name)
return False
except boto.exception.BotoServerError as error:
- log.error('Failed to create ELB %s: %s: %s',
- name, error.error_code, error.message,
- exc_info_on_loglevel=logging.DEBUG)
+ log.error(
+ "Failed to create ELB %s: %s: %s",
+ name,
+ error.error_code,
+ error.message,
+ exc_info_on_loglevel=logging.DEBUG,
+ )
return False
def delete(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete an ELB.
CLI example to delete an ELB:
@@ -285,24 +310,22 @@ def delete(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_elb.delete myelb region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not exists(name, region, key, keyid, profile):
return True
try:
conn.delete_load_balancer(name)
- log.info('Deleted ELB %s.', name)
+ log.info("Deleted ELB %s.", name)
return True
except boto.exception.BotoServerError as error:
- log.error('Failed to delete ELB %s', name,
- exc_info_on_loglevel=logging.DEBUG)
+ log.error("Failed to delete ELB %s", name, exc_info_on_loglevel=logging.DEBUG)
return False
-def create_listeners(name, listeners, region=None, key=None, keyid=None,
- profile=None):
- '''
+def create_listeners(name, listeners, region=None, key=None, keyid=None, profile=None):
+ """
Create listeners on an ELB.
CLI example:
@@ -310,7 +333,7 @@ def create_listeners(name, listeners, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_elb.create_listeners myelb '[["HTTPS", "HTTP", 443, 80, "arn:aws:iam::11 11111:server-certificate/mycert"]]'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(listeners, six.string_types):
@@ -321,17 +344,20 @@ def create_listeners(name, listeners, region=None, key=None, keyid=None,
_complex_listeners.append(listener_dict_to_tuple(listener))
try:
conn.create_load_balancer_listeners(name, [], _complex_listeners)
- log.info('Created ELB listeners on %s', name)
+ log.info("Created ELB listeners on %s", name)
return True
except boto.exception.BotoServerError as error:
- log.error('Failed to create ELB listeners on %s: %s', name, error,
- exc_info_on_loglevel=logging.DEBUG)
+ log.error(
+ "Failed to create ELB listeners on %s: %s",
+ name,
+ error,
+ exc_info_on_loglevel=logging.DEBUG,
+ )
return False
-def delete_listeners(name, ports, region=None, key=None, keyid=None,
- profile=None):
- '''
+def delete_listeners(name, ports, region=None, key=None, keyid=None, profile=None):
+ """
Delete listeners on an ELB.
CLI example:
@@ -339,24 +365,29 @@ def delete_listeners(name, ports, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_elb.delete_listeners myelb '[80,443]'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(ports, six.string_types):
ports = salt.utils.json.loads(ports)
try:
conn.delete_load_balancer_listeners(name, ports)
- log.info('Deleted ELB listeners on %s', name)
+ log.info("Deleted ELB listeners on %s", name)
return True
except boto.exception.BotoServerError as error:
- log.error('Failed to delete ELB listeners on %s: %s', name, error,
- exc_info_on_loglevel=logging.DEBUG)
+ log.error(
+ "Failed to delete ELB listeners on %s: %s",
+ name,
+ error,
+ exc_info_on_loglevel=logging.DEBUG,
+ )
return False
-def apply_security_groups(name, security_groups, region=None, key=None,
- keyid=None, profile=None):
- '''
+def apply_security_groups(
+ name, security_groups, region=None, key=None, keyid=None, profile=None
+):
+ """
Apply security groups to ELB.
CLI example:
@@ -364,25 +395,25 @@ def apply_security_groups(name, security_groups, region=None, key=None,
.. code-block:: bash
salt myminion boto_elb.apply_security_groups myelb '["mysecgroup1"]'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(security_groups, six.string_types):
security_groups = salt.utils.json.loads(security_groups)
try:
conn.apply_security_groups_to_lb(name, security_groups)
- log.info('Applied security_groups on ELB %s', name)
+ log.info("Applied security_groups on ELB %s", name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to appply security_groups on ELB %s: %s',
- name, e.message)
+ log.error("Failed to appply security_groups on ELB %s: %s", name, e.message)
return False
-def enable_availability_zones(name, availability_zones, region=None, key=None,
- keyid=None, profile=None):
- '''
+def enable_availability_zones(
+ name, availability_zones, region=None, key=None, keyid=None, profile=None
+):
+ """
Enable availability zones for ELB.
CLI example:
@@ -390,23 +421,24 @@ def enable_availability_zones(name, availability_zones, region=None, key=None,
.. code-block:: bash
salt myminion boto_elb.enable_availability_zones myelb '["us-east-1a"]'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(availability_zones, six.string_types):
availability_zones = salt.utils.json.loads(availability_zones)
try:
conn.enable_availability_zones(name, availability_zones)
- log.info('Enabled availability_zones on ELB %s', name)
+ log.info("Enabled availability_zones on ELB %s", name)
return True
except boto.exception.BotoServerError as error:
- log.error('Failed to enable availability_zones on ELB %s: %s', name, error)
+ log.error("Failed to enable availability_zones on ELB %s: %s", name, error)
return False
-def disable_availability_zones(name, availability_zones, region=None, key=None,
- keyid=None, profile=None):
- '''
+def disable_availability_zones(
+ name, availability_zones, region=None, key=None, keyid=None, profile=None
+):
+ """
Disable availability zones for ELB.
CLI example:
@@ -414,24 +446,27 @@ def disable_availability_zones(name, availability_zones, region=None, key=None,
.. code-block:: bash
salt myminion boto_elb.disable_availability_zones myelb '["us-east-1a"]'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(availability_zones, six.string_types):
availability_zones = salt.utils.json.loads(availability_zones)
try:
conn.disable_availability_zones(name, availability_zones)
- log.info('Disabled availability_zones on ELB %s', name)
+ log.info("Disabled availability_zones on ELB %s", name)
return True
except boto.exception.BotoServerError as error:
- log.error('Failed to disable availability_zones on ELB %s: %s',
- name, error, exc_info_on_loglevel=logging.DEBUG)
+ log.error(
+ "Failed to disable availability_zones on ELB %s: %s",
+ name,
+ error,
+ exc_info_on_loglevel=logging.DEBUG,
+ )
return False
-def attach_subnets(name, subnets, region=None, key=None, keyid=None,
- profile=None):
- '''
+def attach_subnets(name, subnets, region=None, key=None, keyid=None, profile=None):
+ """
Attach ELB to subnets.
CLI example:
@@ -439,24 +474,27 @@ def attach_subnets(name, subnets, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_elb.attach_subnets myelb '["mysubnet"]'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(subnets, six.string_types):
subnets = salt.utils.json.loads(subnets)
try:
conn.attach_lb_to_subnets(name, subnets)
- log.info('Attached ELB %s on subnets.', name)
+ log.info("Attached ELB %s on subnets.", name)
return True
except boto.exception.BotoServerError as error:
- log.error('Failed to attach ELB %s on subnets: %s', name, error,
- exc_info_on_loglevel=logging.DEBUG)
+ log.error(
+ "Failed to attach ELB %s on subnets: %s",
+ name,
+ error,
+ exc_info_on_loglevel=logging.DEBUG,
+ )
return False
-def detach_subnets(name, subnets, region=None, key=None, keyid=None,
- profile=None):
- '''
+def detach_subnets(name, subnets, region=None, key=None, keyid=None, profile=None):
+ """
Detach ELB from subnets.
CLI example:
@@ -464,23 +502,27 @@ def detach_subnets(name, subnets, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_elb.detach_subnets myelb '["mysubnet"]'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(subnets, six.string_types):
subnets = salt.utils.json.loads(subnets)
try:
conn.detach_lb_from_subnets(name, subnets)
- log.info('Detached ELB %s from subnets.', name)
+ log.info("Detached ELB %s from subnets.", name)
return True
except boto.exception.BotoServerError as error:
- log.error('Failed to detach ELB %s from subnets: %s', name, error,
- exc_info_on_loglevel=logging.DEBUG)
+ log.error(
+ "Failed to detach ELB %s from subnets: %s",
+ name,
+ error,
+ exc_info_on_loglevel=logging.DEBUG,
+ )
return False
def get_attributes(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if attributes are set on an ELB.
CLI example:
@@ -488,7 +530,7 @@ def get_attributes(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_elb.get_attributes myelb
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
@@ -496,37 +538,36 @@ def get_attributes(name, region=None, key=None, keyid=None, profile=None):
try:
lbattrs = conn.get_all_lb_attributes(name)
ret = odict.OrderedDict()
- ret['access_log'] = odict.OrderedDict()
- ret['cross_zone_load_balancing'] = odict.OrderedDict()
- ret['connection_draining'] = odict.OrderedDict()
- ret['connecting_settings'] = odict.OrderedDict()
+ ret["access_log"] = odict.OrderedDict()
+ ret["cross_zone_load_balancing"] = odict.OrderedDict()
+ ret["connection_draining"] = odict.OrderedDict()
+ ret["connecting_settings"] = odict.OrderedDict()
al = lbattrs.access_log
czlb = lbattrs.cross_zone_load_balancing
cd = lbattrs.connection_draining
cs = lbattrs.connecting_settings
- ret['access_log']['enabled'] = al.enabled
- ret['access_log']['s3_bucket_name'] = al.s3_bucket_name
- ret['access_log']['s3_bucket_prefix'] = al.s3_bucket_prefix
- ret['access_log']['emit_interval'] = al.emit_interval
- ret['cross_zone_load_balancing']['enabled'] = czlb.enabled
- ret['connection_draining']['enabled'] = cd.enabled
- ret['connection_draining']['timeout'] = cd.timeout
- ret['connecting_settings']['idle_timeout'] = cs.idle_timeout
+ ret["access_log"]["enabled"] = al.enabled
+ ret["access_log"]["s3_bucket_name"] = al.s3_bucket_name
+ ret["access_log"]["s3_bucket_prefix"] = al.s3_bucket_prefix
+ ret["access_log"]["emit_interval"] = al.emit_interval
+ ret["cross_zone_load_balancing"]["enabled"] = czlb.enabled
+ ret["connection_draining"]["enabled"] = cd.enabled
+ ret["connection_draining"]["timeout"] = cd.timeout
+ ret["connecting_settings"]["idle_timeout"] = cs.idle_timeout
return ret
except boto.exception.BotoServerError as e:
- if e.error_code == 'Throttling':
+ if e.error_code == "Throttling":
log.debug("Throttled by AWS API, will retry in 5 seconds...")
time.sleep(5)
retries -= 1
continue
- log.error('ELB %s does not exist: %s', name, e.message)
+ log.error("ELB %s does not exist: %s", name, e.message)
return {}
return {}
-def set_attributes(name, attributes, region=None, key=None, keyid=None,
- profile=None):
- '''
+def set_attributes(name, attributes, region=None, key=None, keyid=None, profile=None):
+ """
Set attributes on an ELB.
name (string)
@@ -565,66 +606,67 @@ def set_attributes(name, attributes, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_elb.set_attributes myelb '{"access_log": {"enabled": "true", "s3_bucket_name": "mybucket", "s3_bucket_prefix": "mylogs/", "emit_interval": "5"}}' region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- al = attributes.get('access_log', {})
- czlb = attributes.get('cross_zone_load_balancing', {})
- cd = attributes.get('connection_draining', {})
- cs = attributes.get('connecting_settings', {})
+ al = attributes.get("access_log", {})
+ czlb = attributes.get("cross_zone_load_balancing", {})
+ cd = attributes.get("connection_draining", {})
+ cs = attributes.get("connecting_settings", {})
if not al and not czlb and not cd and not cs:
- log.error('No supported attributes for ELB.')
+ log.error("No supported attributes for ELB.")
return False
if al:
_al = AccessLogAttribute()
- _al.enabled = al.get('enabled', False)
+ _al.enabled = al.get("enabled", False)
if not _al.enabled:
- msg = 'Access log attribute configured, but enabled config missing'
+ msg = "Access log attribute configured, but enabled config missing"
log.error(msg)
return False
- _al.s3_bucket_name = al.get('s3_bucket_name', None)
- _al.s3_bucket_prefix = al.get('s3_bucket_prefix', None)
- _al.emit_interval = al.get('emit_interval', None)
- added_attr = conn.modify_lb_attribute(name, 'accessLog', _al)
+ _al.s3_bucket_name = al.get("s3_bucket_name", None)
+ _al.s3_bucket_prefix = al.get("s3_bucket_prefix", None)
+ _al.emit_interval = al.get("emit_interval", None)
+ added_attr = conn.modify_lb_attribute(name, "accessLog", _al)
if added_attr:
- log.info('Added access_log attribute to %s elb.', name)
+ log.info("Added access_log attribute to %s elb.", name)
else:
- log.error('Failed to add access_log attribute to %s elb.', name)
+ log.error("Failed to add access_log attribute to %s elb.", name)
return False
if czlb:
_czlb = CrossZoneLoadBalancingAttribute()
- _czlb.enabled = czlb['enabled']
- added_attr = conn.modify_lb_attribute(name, 'crossZoneLoadBalancing',
- _czlb.enabled)
+ _czlb.enabled = czlb["enabled"]
+ added_attr = conn.modify_lb_attribute(
+ name, "crossZoneLoadBalancing", _czlb.enabled
+ )
if added_attr:
- log.info('Added cross_zone_load_balancing attribute to %s elb.', name)
+ log.info("Added cross_zone_load_balancing attribute to %s elb.", name)
else:
- log.error('Failed to add cross_zone_load_balancing attribute.')
+ log.error("Failed to add cross_zone_load_balancing attribute.")
return False
if cd:
_cd = ConnectionDrainingAttribute()
- _cd.enabled = cd['enabled']
- _cd.timeout = cd.get('timeout', 300)
- added_attr = conn.modify_lb_attribute(name, 'connectionDraining', _cd)
+ _cd.enabled = cd["enabled"]
+ _cd.timeout = cd.get("timeout", 300)
+ added_attr = conn.modify_lb_attribute(name, "connectionDraining", _cd)
if added_attr:
- log.info('Added connection_draining attribute to %s elb.', name)
+ log.info("Added connection_draining attribute to %s elb.", name)
else:
- log.error('Failed to add connection_draining attribute.')
+ log.error("Failed to add connection_draining attribute.")
return False
if cs:
_cs = ConnectionSettingAttribute()
- _cs.idle_timeout = cs.get('idle_timeout', 60)
- added_attr = conn.modify_lb_attribute(name, 'connectingSettings', _cs)
+ _cs.idle_timeout = cs.get("idle_timeout", 60)
+ added_attr = conn.modify_lb_attribute(name, "connectingSettings", _cs)
if added_attr:
- log.info('Added connecting_settings attribute to %s elb.', name)
+ log.info("Added connecting_settings attribute to %s elb.", name)
else:
- log.error('Failed to add connecting_settings attribute.')
+ log.error("Failed to add connecting_settings attribute.")
return False
return True
def get_health_check(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get the health check configured for this ELB.
CLI example:
@@ -632,7 +674,7 @@ def get_health_check(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_elb.get_health_check myelb
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
@@ -642,26 +684,26 @@ def get_health_check(name, region=None, key=None, keyid=None, profile=None):
lb = lb[0]
ret = odict.OrderedDict()
hc = lb.health_check
- ret['interval'] = hc.interval
- ret['target'] = hc.target
- ret['healthy_threshold'] = hc.healthy_threshold
- ret['timeout'] = hc.timeout
- ret['unhealthy_threshold'] = hc.unhealthy_threshold
+ ret["interval"] = hc.interval
+ ret["target"] = hc.target
+ ret["healthy_threshold"] = hc.healthy_threshold
+ ret["timeout"] = hc.timeout
+ ret["unhealthy_threshold"] = hc.unhealthy_threshold
return ret
except boto.exception.BotoServerError as e:
- if retries and e.code == 'Throttling':
- log.debug('Throttled by AWS API, will retry in 5 seconds.')
+ if retries and e.code == "Throttling":
+ log.debug("Throttled by AWS API, will retry in 5 seconds.")
time.sleep(5)
retries -= 1
continue
- log.error('ELB %s not found.', name,
- exc_info_on_logleve=logging.DEBUG)
+ log.error("ELB %s not found.", name, exc_info_on_logleve=logging.DEBUG)
return {}
-def set_health_check(name, health_check, region=None, key=None, keyid=None,
- profile=None):
- '''
+def set_health_check(
+ name, health_check, region=None, key=None, keyid=None, profile=None
+):
+ """
Set attributes on an ELB.
CLI example to set attributes on an ELB:
@@ -669,7 +711,7 @@ def set_health_check(name, health_check, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_elb.set_health_check myelb '{"target": "HTTP:80/"}'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
@@ -677,21 +719,22 @@ def set_health_check(name, health_check, region=None, key=None, keyid=None,
while True:
try:
conn.configure_health_check(name, hc)
- log.info('Configured health check on ELB %s', name)
+ log.info("Configured health check on ELB %s", name)
return True
except boto.exception.BotoServerError as error:
- if retries and e.code == 'Throttling':
- log.debug('Throttled by AWS API, will retry in 5 seconds.')
+ if retries and e.code == "Throttling":
+ log.debug("Throttled by AWS API, will retry in 5 seconds.")
time.sleep(5)
retries -= 1
continue
- log.exception('Failed to configure health check on ELB %s', name)
+ log.exception("Failed to configure health check on ELB %s", name)
return False
-def register_instances(name, instances, region=None, key=None, keyid=None,
- profile=None):
- '''
+def register_instances(
+ name, instances, region=None, key=None, keyid=None, profile=None
+):
+ """
Register instances with an ELB. Instances is either a string
instance id or a list of string instance id's.
@@ -706,7 +749,7 @@ def register_instances(name, instances, region=None, key=None, keyid=None,
salt myminion boto_elb.register_instances myelb instance_id
salt myminion boto_elb.register_instances myelb "[instance_id,instance_id]"
- '''
+ """
# convert instances to list type, enabling consistent use of instances
# variable throughout the register_instances method
if isinstance(instances, six.string_types) or isinstance(instances, six.text_type):
@@ -718,23 +761,24 @@ def register_instances(name, instances, region=None, key=None, keyid=None,
except boto.exception.BotoServerError as error:
log.warning(error)
return False
- registered_instance_ids = [instance.id for instance in
- registered_instances]
+ registered_instance_ids = [instance.id for instance in registered_instances]
# register_failues is a set that will contain any instances that were not
# able to be registered with the given ELB
register_failures = set(instances).difference(set(registered_instance_ids))
if register_failures:
- log.warning('Instance(s): %s not registered with ELB %s.',
- list(register_failures), name)
+ log.warning(
+ "Instance(s): %s not registered with ELB %s.", list(register_failures), name
+ )
register_result = False
else:
register_result = True
return register_result
-def deregister_instances(name, instances, region=None, key=None, keyid=None,
- profile=None):
- '''
+def deregister_instances(
+ name, instances, region=None, key=None, keyid=None, profile=None
+):
+ """
Deregister instances with an ELB. Instances is either a string
instance id or a list of string instance id's.
@@ -750,7 +794,7 @@ def deregister_instances(name, instances, region=None, key=None, keyid=None,
salt myminion boto_elb.deregister_instances myelb instance_id
salt myminion boto_elb.deregister_instances myelb "[instance_id, instance_id]"
- '''
+ """
# convert instances to list type, enabling consistent use of instances
# variable throughout the deregister_instances method
if isinstance(instances, six.string_types) or isinstance(instances, six.text_type):
@@ -764,24 +808,26 @@ def deregister_instances(name, instances, region=None, key=None, keyid=None,
# boto returns error.error_code == 'InvalidInstance'
# deregister_instances returns "None" because the instances are
# effectively deregistered from ELB
- if error.error_code == 'InvalidInstance':
+ if error.error_code == "InvalidInstance":
log.warning(
- 'One or more of instance(s) %s are not part of ELB %s. '
- 'deregister_instances not performed.', instances, name
+ "One or more of instance(s) %s are not part of ELB %s. "
+ "deregister_instances not performed.",
+ instances,
+ name,
)
return None
else:
log.warning(error)
return False
- registered_instance_ids = [instance.id for instance in
- registered_instances]
+ registered_instance_ids = [instance.id for instance in registered_instances]
# deregister_failures is a set that will contain any instances that were
# unable to be deregistered from the given ELB
deregister_failures = set(instances).intersection(set(registered_instance_ids))
if deregister_failures:
log.warning(
- 'Instance(s): %s not deregistered from ELB %s.',
- list(deregister_failures), name
+ "Instance(s): %s not deregistered from ELB %s.",
+ list(deregister_failures),
+ name,
)
deregister_result = False
else:
@@ -789,9 +835,10 @@ def deregister_instances(name, instances, region=None, key=None, keyid=None,
return deregister_result
-def set_instances(name, instances, test=False, region=None, key=None, keyid=None,
- profile=None):
- '''
+def set_instances(
+ name, instances, test=False, region=None, key=None, keyid=None, profile=None
+):
+ """
Set the instances assigned to an ELB to exactly the list given
CLI example:
@@ -799,16 +846,24 @@ def set_instances(name, instances, test=False, region=None, key=None, keyid=None
.. code-block:: bash
salt myminion boto_elb.set_instances myelb region=us-east-1 instances="[instance_id,instance_id]"
- '''
+ """
ret = True
- current = set([i['instance_id'] for i in get_instance_health(name, region, key, keyid, profile)])
+ current = set(
+ [
+ i["instance_id"]
+ for i in get_instance_health(name, region, key, keyid, profile)
+ ]
+ )
desired = set(instances)
add = desired - current
remove = current - desired
if test:
return bool(add or remove)
if remove:
- if deregister_instances(name, list(remove), region, key, keyid, profile) is False:
+ if (
+ deregister_instances(name, list(remove), region, key, keyid, profile)
+ is False
+ ):
ret = False
if add:
if register_instances(name, list(add), region, key, keyid, profile) is False:
@@ -816,8 +871,10 @@ def set_instances(name, instances, test=False, region=None, key=None, keyid=None
return ret
-def get_instance_health(name, region=None, key=None, keyid=None, profile=None, instances=None):
- '''
+def get_instance_health(
+ name, region=None, key=None, keyid=None, profile=None, instances=None
+):
+ """
Get a list of instances and their health state
CLI example:
@@ -826,27 +883,38 @@ def get_instance_health(name, region=None, key=None, keyid=None, profile=None, i
salt myminion boto_elb.get_instance_health myelb
salt myminion boto_elb.get_instance_health myelb region=us-east-1 instances="[instance_id,instance_id]"
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
instance_states = conn.describe_instance_health(name, instances)
ret = []
for _instance in instance_states:
- ret.append({'instance_id': _instance.instance_id,
- 'description': _instance.description,
- 'state': _instance.state,
- 'reason_code': _instance.reason_code
- })
+ ret.append(
+ {
+ "instance_id": _instance.instance_id,
+ "description": _instance.description,
+ "state": _instance.state,
+ "reason_code": _instance.reason_code,
+ }
+ )
return ret
except boto.exception.BotoServerError as error:
log.debug(error)
return []
-def create_policy(name, policy_name, policy_type, policy, region=None,
- key=None, keyid=None, profile=None):
- '''
+def create_policy(
+ name,
+ policy_name,
+ policy_type,
+ policy,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create an ELB policy.
.. versionadded:: 2016.3.0
@@ -856,7 +924,7 @@ def create_policy(name, policy_name, policy_type, policy, region=None,
.. code-block:: bash
salt myminion boto_elb.create_policy myelb mypolicy LBCookieStickinessPolicyType '{"CookieExpirationPeriod": 3600}'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not exists(name, region, key, keyid, profile):
@@ -864,21 +932,24 @@ def create_policy(name, policy_name, policy_type, policy, region=None,
try:
success = conn.create_lb_policy(name, policy_name, policy_type, policy)
if success:
- log.info('Created policy %s on ELB %s', policy_name, name)
+ log.info("Created policy %s on ELB %s", policy_name, name)
return True
else:
- log.error('Failed to create policy %s on ELB %s', policy_name, name)
+ log.error("Failed to create policy %s on ELB %s", policy_name, name)
return False
except boto.exception.BotoServerError as e:
- log.error('Failed to create policy %s on ELB %s: %s',
- policy_name, name, e.message,
- exc_info_on_loglevel=logging.DEBUG)
+ log.error(
+ "Failed to create policy %s on ELB %s: %s",
+ policy_name,
+ name,
+ e.message,
+ exc_info_on_loglevel=logging.DEBUG,
+ )
return False
-def delete_policy(name, policy_name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def delete_policy(name, policy_name, region=None, key=None, keyid=None, profile=None):
+ """
Delete an ELB policy.
.. versionadded:: 2016.3.0
@@ -888,25 +959,30 @@ def delete_policy(name, policy_name, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_elb.delete_policy myelb mypolicy
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not exists(name, region, key, keyid, profile):
return True
try:
conn.delete_lb_policy(name, policy_name)
- log.info('Deleted policy %s on ELB %s', policy_name, name)
+ log.info("Deleted policy %s on ELB %s", policy_name, name)
return True
except boto.exception.BotoServerError as e:
- log.error('Failed to delete policy %s on ELB %s: %s',
- policy_name, name, e.message,
- exc_info_on_loglevel=logging.DEBUG)
+ log.error(
+ "Failed to delete policy %s on ELB %s: %s",
+ policy_name,
+ name,
+ e.message,
+ exc_info_on_loglevel=logging.DEBUG,
+ )
return False
-def set_listener_policy(name, port, policies=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def set_listener_policy(
+ name, port, policies=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Set the policies of an ELB listener.
.. versionadded:: 2016.3.0
@@ -916,7 +992,7 @@ def set_listener_policy(name, port, policies=None, region=None, key=None,
.. code-block:: Bash
salt myminion boto_elb.set_listener_policy myelb 443 "[policy1,policy2]"
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not exists(name, region, key, keyid, profile):
@@ -925,24 +1001,30 @@ def set_listener_policy(name, port, policies=None, region=None, key=None,
policies = []
try:
conn.set_lb_policies_of_listener(name, port, policies)
- log.info('Set policies %s on ELB %s listener %s', policies, name, port)
+ log.info("Set policies %s on ELB %s listener %s", policies, name, port)
except boto.exception.BotoServerError as e:
- log.info('Failed to set policy %s on ELB %s listener %s: %s',
- policies, name, port, e.message,
- exc_info_on_loglevel=logging.DEBUG)
+ log.info(
+ "Failed to set policy %s on ELB %s listener %s: %s",
+ policies,
+ name,
+ port,
+ e.message,
+ exc_info_on_loglevel=logging.DEBUG,
+ )
return False
return True
-def set_backend_policy(name, port, policies=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def set_backend_policy(
+ name, port, policies=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Set the policies of an ELB backend server.
CLI example:
salt myminion boto_elb.set_backend_policy myelb 443 "[policy1,policy2]"
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not exists(name, region, key, keyid, profile):
@@ -951,18 +1033,22 @@ def set_backend_policy(name, port, policies=None, region=None, key=None,
policies = []
try:
conn.set_lb_policies_of_backend_server(name, port, policies)
- log.info('Set policies %s on ELB %s backend server %s',
- policies, name, port)
+ log.info("Set policies %s on ELB %s backend server %s", policies, name, port)
except boto.exception.BotoServerError as e:
- log.info('Failed to set policy %s on ELB %s backend server %s: %s',
- policies, name, port, e.message,
- exc_info_on_loglevel=logging.DEBUG)
+ log.info(
+ "Failed to set policy %s on ELB %s backend server %s: %s",
+ policies,
+ name,
+ port,
+ e.message,
+ exc_info_on_loglevel=logging.DEBUG,
+ )
return False
return True
def set_tags(name, tags, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Add the tags on an ELB
.. versionadded:: 2016.3.0
@@ -978,7 +1064,7 @@ def set_tags(name, tags, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_elb.set_tags my-elb-name "{'Tag1': 'Value', 'Tag2': 'Another Value'}"
- '''
+ """
if exists(name, region, key, keyid, profile):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
@@ -989,7 +1075,7 @@ def set_tags(name, tags, region=None, key=None, keyid=None, profile=None):
def delete_tags(name, tags, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Add the tags on an ELB
name
@@ -1003,7 +1089,7 @@ def delete_tags(name, tags, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_elb.delete_tags my-elb-name ['TagToRemove1', 'TagToRemove2']
- '''
+ """
if exists(name, region, key, keyid, profile):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
ret = _remove_tags(conn, name, tags)
@@ -1013,21 +1099,21 @@ def delete_tags(name, tags, region=None, key=None, keyid=None, profile=None):
def _build_tag_param_list(params, tags):
- '''
+ """
helper function to build a tag parameter list to send
- '''
+ """
keys = sorted(tags.keys())
i = 1
for key in keys:
value = tags[key]
- params['Tags.member.{0}.Key'.format(i)] = key
+ params["Tags.member.{0}.Key".format(i)] = key
if value is not None:
- params['Tags.member.{0}.Value'.format(i)] = value
+ params["Tags.member.{0}.Value".format(i)] = value
i += 1
def _get_all_tags(conn, load_balancer_names=None):
- '''
+ """
Retrieve all the metadata tags associated with your ELB(s).
:type load_balancer_names: list
@@ -1035,17 +1121,18 @@ def _get_all_tags(conn, load_balancer_names=None):
:rtype: list
:return: A list of :class:`boto.ec2.elb.tag.Tag` objects
- '''
+ """
params = {}
if load_balancer_names:
- conn.build_list_params(params, load_balancer_names,
- 'LoadBalancerNames.member.%d')
+ conn.build_list_params(
+ params, load_balancer_names, "LoadBalancerNames.member.%d"
+ )
tags = conn.get_object(
- 'DescribeTags',
+ "DescribeTags",
params,
- __utils__['boto_elb_tag.get_tag_descriptions'](),
- verb='POST'
+ __utils__["boto_elb_tag.get_tag_descriptions"](),
+ verb="POST",
)
if tags[load_balancer_names]:
return tags[load_balancer_names]
@@ -1054,7 +1141,7 @@ def _get_all_tags(conn, load_balancer_names=None):
def _add_tags(conn, load_balancer_names, tags):
- '''
+ """
Create new metadata tags for the specified resource ids.
:type load_balancer_names: list
@@ -1065,16 +1152,15 @@ def _add_tags(conn, load_balancer_names, tags):
If you want to create only a tag name, the
value for that tag should be the empty string
(e.g. '').
- '''
+ """
params = {}
- conn.build_list_params(params, load_balancer_names,
- 'LoadBalancerNames.member.%d')
+ conn.build_list_params(params, load_balancer_names, "LoadBalancerNames.member.%d")
_build_tag_param_list(params, tags)
- return conn.get_status('AddTags', params, verb='POST')
+ return conn.get_status("AddTags", params, verb="POST")
def _remove_tags(conn, load_balancer_names, tags):
- '''
+ """
Delete metadata tags for the specified resource ids.
:type load_balancer_names: list
@@ -1083,10 +1169,8 @@ def _remove_tags(conn, load_balancer_names, tags):
:type tags: list
:param tags: A list containing just tag names for the tags to be
deleted.
- '''
+ """
params = {}
- conn.build_list_params(params, load_balancer_names,
- 'LoadBalancerNames.member.%d')
- conn.build_list_params(params, tags,
- 'Tags.member.%d.Key')
- return conn.get_status('RemoveTags', params, verb='POST')
+ conn.build_list_params(params, load_balancer_names, "LoadBalancerNames.member.%d")
+ conn.build_list_params(params, tags, "Tags.member.%d.Key")
+ return conn.get_status("RemoveTags", params, verb="POST")
diff --git a/salt/modules/boto_elbv2.py b/salt/modules/boto_elbv2.py
index 8d316c4fb9a..e8bac5e666c 100644
--- a/salt/modules/boto_elbv2.py
+++ b/salt/modules/boto_elbv2.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon ALB
.. versionadded:: 2017.7.0
@@ -36,7 +36,7 @@ Connection module for Amazon ALB
:depends: boto3
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
@@ -45,53 +45,59 @@ from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
-log = logging.getLogger(__name__)
+import salt.utils.versions
# Import Salt libs
from salt.ext import six
-import salt.utils.versions
+
+log = logging.getLogger(__name__)
+
# Import third-party libs
try:
# pylint: disable=unused-import
import boto3
import botocore
+
# pylint: enable=unused-import
# TODO Version check using salt.utils.versions
from botocore.exceptions import ClientError
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def __virtual__():
- '''
+ """
Only load if boto3 libraries exist.
- '''
+ """
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
- __utils__['boto3.assign_funcs'](__name__, 'elbv2')
+ __utils__["boto3.assign_funcs"](__name__, "elbv2")
return has_boto_reqs
-def create_target_group(name,
- protocol,
- port,
- vpc_id,
- region=None,
- key=None,
- keyid=None,
- profile=None,
- health_check_protocol='HTTP',
- health_check_port='traffic-port',
- health_check_path='/',
- health_check_interval_seconds=30,
- health_check_timeout_seconds=5,
- healthy_threshold_count=5,
- unhealthy_threshold_count=2):
- '''
+def create_target_group(
+ name,
+ protocol,
+ port,
+ vpc_id,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ health_check_protocol="HTTP",
+ health_check_port="traffic-port",
+ health_check_path="/",
+ health_check_interval_seconds=30,
+ health_check_timeout_seconds=5,
+ healthy_threshold_count=5,
+ unhealthy_threshold_count=2,
+):
+ """
Create target group if not present.
name
@@ -133,42 +139,46 @@ def create_target_group(name,
.. code-block:: bash
salt myminion boto_elbv2.create_target_group learn1give1 protocol=HTTP port=54006 vpc_id=vpc-deadbeef
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if target_group_exists(name, region, key, keyid, profile):
return True
try:
- alb = conn.create_target_group(Name=name, Protocol=protocol, Port=port,
- VpcId=vpc_id, HealthCheckProtocol=health_check_protocol,
- HealthCheckPort=health_check_port,
- HealthCheckPath=health_check_path,
- HealthCheckIntervalSeconds=health_check_interval_seconds,
- HealthCheckTimeoutSeconds=health_check_timeout_seconds,
- HealthyThresholdCount=healthy_threshold_count,
- UnhealthyThresholdCount=unhealthy_threshold_count)
+ alb = conn.create_target_group(
+ Name=name,
+ Protocol=protocol,
+ Port=port,
+ VpcId=vpc_id,
+ HealthCheckProtocol=health_check_protocol,
+ HealthCheckPort=health_check_port,
+ HealthCheckPath=health_check_path,
+ HealthCheckIntervalSeconds=health_check_interval_seconds,
+ HealthCheckTimeoutSeconds=health_check_timeout_seconds,
+ HealthyThresholdCount=healthy_threshold_count,
+ UnhealthyThresholdCount=unhealthy_threshold_count,
+ )
if alb:
- log.info('Created ALB %s: %s', name, alb['TargetGroups'][0]['TargetGroupArn'])
+ log.info(
+ "Created ALB %s: %s", name, alb["TargetGroups"][0]["TargetGroupArn"]
+ )
return True
else:
- log.error('Failed to create ALB %s', name)
+ log.error("Failed to create ALB %s", name)
return False
except ClientError as error:
log.error(
- 'Failed to create ALB %s: %s: %s',
- name, error.response['Error']['Code'],
- error.response['Error']['Message'],
- exc_info_on_loglevel=logging.DEBUG
+ "Failed to create ALB %s: %s: %s",
+ name,
+ error.response["Error"]["Code"],
+ error.response["Error"]["Message"],
+ exc_info_on_loglevel=logging.DEBUG,
)
-def delete_target_group(name,
- region=None,
- key=None,
- keyid=None,
- profile=None):
- '''
+def delete_target_group(name, region=None, key=None, keyid=None, profile=None):
+ """
Delete target group.
name
@@ -182,36 +192,33 @@ def delete_target_group(name,
.. code-block:: bash
salt myminion boto_elbv2.delete_target_group arn:aws:elasticloadbalancing:us-west-2:644138682826:targetgroup/learn1give1-api/414788a16b5cf163
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not target_group_exists(name, region, key, keyid, profile):
return True
try:
- if name.startswith('arn:aws:elasticloadbalancing'):
+ if name.startswith("arn:aws:elasticloadbalancing"):
conn.delete_target_group(TargetGroupArn=name)
- log.info('Deleted target group %s', name)
+ log.info("Deleted target group %s", name)
else:
tg_info = conn.describe_target_groups(Names=[name])
- if len(tg_info['TargetGroups']) != 1:
+ if len(tg_info["TargetGroups"]) != 1:
return False
- arn = tg_info['TargetGroups'][0]['TargetGroupArn']
+ arn = tg_info["TargetGroups"][0]["TargetGroupArn"]
conn.delete_target_group(TargetGroupArn=arn)
- log.info('Deleted target group %s ARN %s', name, arn)
+ log.info("Deleted target group %s ARN %s", name, arn)
return True
except ClientError as error:
- log.error('Failed to delete target group %s', name,
- exc_info_on_loglevel=logging.DEBUG)
+ log.error(
+ "Failed to delete target group %s", name, exc_info_on_loglevel=logging.DEBUG
+ )
return False
-def target_group_exists(name,
- region=None,
- key=None,
- keyid=None,
- profile=None):
- '''
+def target_group_exists(name, region=None, key=None, keyid=None, profile=None):
+ """
Check to see if an target group exists.
CLI example:
@@ -219,31 +226,28 @@ def target_group_exists(name,
.. code-block:: bash
salt myminion boto_elbv2.target_group_exists arn:aws:elasticloadbalancing:us-west-2:644138682826:targetgroup/learn1give1-api/414788a16b5cf163
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- if name.startswith('arn:aws:elasticloadbalancing'):
+ if name.startswith("arn:aws:elasticloadbalancing"):
alb = conn.describe_target_groups(TargetGroupArns=[name])
else:
alb = conn.describe_target_groups(Names=[name])
if alb:
return True
else:
- log.warning('The target group does not exist in region %s', region)
+ log.warning("The target group does not exist in region %s", region)
return False
except ClientError as error:
- log.warning('target_group_exists check for %s returned: %s', name, error)
+ log.warning("target_group_exists check for %s returned: %s", name, error)
return False
-def describe_target_health(name,
- targets=None,
- region=None,
- key=None,
- keyid=None,
- profile=None):
- '''
+def describe_target_health(
+ name, targets=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Get the curret health check status for targets in a target group.
CLI example:
@@ -251,7 +255,7 @@ def describe_target_health(name,
.. code-block:: bash
salt myminion boto_elbv2.describe_target_health arn:aws:elasticloadbalancing:us-west-2:644138682826:targetgroup/learn1give1-api/414788a16b5cf163 targets=["i-isdf23ifjf"]
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -259,12 +263,14 @@ def describe_target_health(name,
targetsdict = []
for target in targets:
targetsdict.append({"Id": target})
- instances = conn.describe_target_health(TargetGroupArn=name, Targets=targetsdict)
+ instances = conn.describe_target_health(
+ TargetGroupArn=name, Targets=targetsdict
+ )
else:
instances = conn.describe_target_health(TargetGroupArn=name)
ret = {}
- for instance in instances['TargetHealthDescriptions']:
- ret.update({instance['Target']['Id']: instance['TargetHealth']['State']})
+ for instance in instances["TargetHealthDescriptions"]:
+ ret.update({instance["Target"]["Id"]: instance["TargetHealth"]["State"]})
return ret
except ClientError as error:
@@ -272,13 +278,8 @@ def describe_target_health(name,
return {}
-def register_targets(name,
- targets,
- region=None,
- key=None,
- keyid=None,
- profile=None):
- '''
+def register_targets(name, targets, region=None, key=None, keyid=None, profile=None):
+ """
Register targets to a target froup of an ALB. ``targets`` is either a
instance id string or a list of instance id's.
@@ -293,7 +294,7 @@ def register_targets(name,
salt myminion boto_elbv2.register_targets myelb instance_id
salt myminion boto_elbv2.register_targets myelb "[instance_id,instance_id]"
- '''
+ """
targetsdict = []
if isinstance(targets, six.string_types) or isinstance(targets, six.text_type):
targetsdict.append({"Id": targets})
@@ -303,7 +304,9 @@ def register_targets(name,
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- registered_targets = conn.register_targets(TargetGroupArn=name, Targets=targetsdict)
+ registered_targets = conn.register_targets(
+ TargetGroupArn=name, Targets=targetsdict
+ )
if registered_targets:
return True
return False
@@ -312,13 +315,8 @@ def register_targets(name,
return False
-def deregister_targets(name,
- targets,
- region=None,
- key=None,
- keyid=None,
- profile=None):
- '''
+def deregister_targets(name, targets, region=None, key=None, keyid=None, profile=None):
+ """
Deregister targets to a target froup of an ALB. ``targets`` is either a
instance id string or a list of instance id's.
@@ -333,7 +331,7 @@ def deregister_targets(name,
salt myminion boto_elbv2.deregister_targets myelb instance_id
salt myminion boto_elbv2.deregister_targets myelb "[instance_id,instance_id]"
- '''
+ """
targetsdict = []
if isinstance(targets, six.string_types) or isinstance(targets, six.text_type):
targetsdict.append({"Id": targets})
@@ -343,7 +341,9 @@ def deregister_targets(name,
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- registered_targets = conn.deregister_targets(TargetGroupArn=name, Targets=targetsdict)
+ registered_targets = conn.deregister_targets(
+ TargetGroupArn=name, Targets=targetsdict
+ )
if registered_targets:
return True
return False
diff --git a/salt/modules/boto_iam.py b/salt/modules/boto_iam.py
index 375db1f75bd..81f43c30dbc 100644
--- a/salt/modules/boto_iam.py
+++ b/salt/modules/boto_iam.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon IAM
.. versionadded:: 2014.7.0
@@ -33,32 +33,36 @@ Connection module for Amazon IAM
region: us-east-1
:depends: boto
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
import time
-# Import salt libs
-from salt.ext import six
import salt.utils.compat
import salt.utils.json
import salt.utils.odict as odict
import salt.utils.versions
+# Import salt libs
+from salt.ext import six
+
# Import third party libs
# pylint: disable=unused-import
-from salt.ext.six.moves.urllib.parse import unquote as _unquote # pylint: disable=no-name-in-module
+from salt.ext.six.moves.urllib.parse import unquote as _unquote
+
try:
import boto
import boto.iam
import boto3
import botocore
- logging.getLogger('boto').setLevel(logging.CRITICAL)
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto").setLevel(logging.CRITICAL)
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@@ -68,23 +72,20 @@ log = logging.getLogger(__name__)
def __virtual__():
- '''
+ """
Only load if boto libraries exist.
- '''
- return salt.utils.versions.check_boto_reqs(
- check_boto3=False
- )
+ """
+ return salt.utils.versions.check_boto_reqs(check_boto3=False)
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
- __utils__['boto.assign_funcs'](__name__, 'iam', pack=__salt__)
+ __utils__["boto.assign_funcs"](__name__, "iam", pack=__salt__)
-def instance_profile_exists(name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def instance_profile_exists(name, region=None, key=None, keyid=None, profile=None):
+ """
Check to see if an instance profile exists.
CLI Example:
@@ -92,7 +93,7 @@ def instance_profile_exists(name, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_iam.instance_profile_exists myiprofile
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -104,9 +105,8 @@ def instance_profile_exists(name, region=None, key=None, keyid=None,
return False
-def create_instance_profile(name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def create_instance_profile(name, region=None, key=None, keyid=None, profile=None):
+ """
Create an instance profile.
CLI Example:
@@ -114,7 +114,7 @@ def create_instance_profile(name, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_iam.create_instance_profile myiprofile
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if instance_profile_exists(name, region, key, keyid, profile):
@@ -123,17 +123,16 @@ def create_instance_profile(name, region=None, key=None, keyid=None,
# This call returns an instance profile if successful and an exception
# if not. It's annoying.
conn.create_instance_profile(name)
- log.info('Created %s instance profile.', name)
+ log.info("Created %s instance profile.", name)
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to create %s instance profile.', name)
+ log.error("Failed to create %s instance profile.", name)
return False
return True
-def delete_instance_profile(name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def delete_instance_profile(name, region=None, key=None, keyid=None, profile=None):
+ """
Delete an instance profile.
CLI Example:
@@ -141,23 +140,23 @@ def delete_instance_profile(name, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_iam.delete_instance_profile myiprofile
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not instance_profile_exists(name, region, key, keyid, profile):
return True
try:
conn.delete_instance_profile(name)
- log.info('Deleted %s instance profile.', name)
+ log.info("Deleted %s instance profile.", name)
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to delete %s instance profile.', name)
+ log.error("Failed to delete %s instance profile.", name)
return False
return True
def role_exists(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if an IAM role exists.
CLI Example:
@@ -165,7 +164,7 @@ def role_exists(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_iam.role_exists myirole
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.get_role(name)
@@ -175,7 +174,7 @@ def role_exists(name, region=None, key=None, keyid=None, profile=None):
def describe_role(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get information for a role.
CLI Example:
@@ -183,34 +182,33 @@ def describe_role(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_iam.describe_role myirole
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.get_role(name)
if not info:
return False
role = info.get_role_response.get_role_result.role
- role['assume_role_policy_document'] = salt.utils.json.loads(_unquote(
- role.assume_role_policy_document
- ))
+ role["assume_role_policy_document"] = salt.utils.json.loads(
+ _unquote(role.assume_role_policy_document)
+ )
# If Sid wasn't defined by the user, boto will still return a Sid in
# each policy. To properly check idempotently, let's remove the Sid
# from the return if it's not actually set.
- for policy_key, policy in role['assume_role_policy_document'].items():
- if policy_key == 'Statement':
+ for policy_key, policy in role["assume_role_policy_document"].items():
+ if policy_key == "Statement":
for val in policy:
- if 'Sid' in val and not val['Sid']:
- del val['Sid']
+ if "Sid" in val and not val["Sid"]:
+ del val["Sid"]
return role
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to get %s information.', name)
+ log.error("Failed to get %s information.", name)
return False
-def create_user(user_name, path=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def create_user(user_name, path=None, region=None, key=None, keyid=None, profile=None):
+ """
Create a user.
.. versionadded:: 2015.8.0
@@ -220,25 +218,32 @@ def create_user(user_name, path=None, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_iam.create_user myuser
- '''
+ """
if not path:
- path = '/'
+ path = "/"
if get_user(user_name, region, key, keyid, profile):
return True
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.create_user(user_name, path)
- log.info('Created IAM user : %s.', user_name)
+ log.info("Created IAM user : %s.", user_name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to create IAM user %s.', user_name)
+ log.error("Failed to create IAM user %s.", user_name)
return False
-def get_all_access_keys(user_name, marker=None, max_items=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def get_all_access_keys(
+ user_name,
+ marker=None,
+ max_items=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Get all access keys from a user.
.. versionadded:: 2015.8.0
@@ -248,18 +253,18 @@ def get_all_access_keys(user_name, marker=None, max_items=None,
.. code-block:: bash
salt myminion boto_iam.get_all_access_keys myuser
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.get_all_access_keys(user_name, marker, max_items)
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to get access keys for IAM user %s.', user_name)
+ log.error("Failed to get access keys for IAM user %s.", user_name)
return six.text_type(e)
def create_access_key(user_name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Create access key id for a user.
.. versionadded:: 2015.8.0
@@ -269,19 +274,20 @@ def create_access_key(user_name, region=None, key=None, keyid=None, profile=None
.. code-block:: bash
salt myminion boto_iam.create_access_key myuser
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.create_access_key(user_name)
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to create access key.')
+ log.error("Failed to create access key.")
return six.text_type(e)
-def delete_access_key(access_key_id, user_name=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def delete_access_key(
+ access_key_id, user_name=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Delete access key id from a user.
.. versionadded:: 2015.8.0
@@ -291,19 +297,18 @@ def delete_access_key(access_key_id, user_name=None, region=None, key=None,
.. code-block:: bash
salt myminion boto_iam.delete_access_key myuser
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.delete_access_key(access_key_id, user_name)
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to delete access key id %s.', access_key_id)
+ log.error("Failed to delete access key id %s.", access_key_id)
return six.text_type(e)
-def delete_user(user_name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def delete_user(user_name, region=None, key=None, keyid=None, profile=None):
+ """
Delete a user.
.. versionadded:: 2015.8.0
@@ -313,22 +318,22 @@ def delete_user(user_name, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_iam.delete_user myuser
- '''
+ """
if not get_user(user_name, region, key, keyid, profile):
return True
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.delete_user(user_name)
- log.info('Deleted IAM user : %s .', user_name)
+ log.info("Deleted IAM user : %s .", user_name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to delete IAM user %s', user_name)
+ log.error("Failed to delete IAM user %s", user_name)
return six.text_type(e)
def get_user(user_name=None, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get user information.
.. versionadded:: 2015.8.0
@@ -338,7 +343,7 @@ def get_user(user_name=None, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_iam.get_user myuser
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.get_user(user_name)
@@ -347,13 +352,14 @@ def get_user(user_name=None, region=None, key=None, keyid=None, profile=None):
return info
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to get IAM user %s info.', user_name)
+ log.error("Failed to get IAM user %s info.", user_name)
return False
-def create_group(group_name, path=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def create_group(
+ group_name, path=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Create a group.
.. versionadded:: 2015.8.0
@@ -363,25 +369,24 @@ def create_group(group_name, path=None, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_iam.create_group group
- '''
+ """
if not path:
- path = '/'
- if get_group(group_name, region=region, key=key, keyid=keyid,
- profile=profile):
+ path = "/"
+ if get_group(group_name, region=region, key=key, keyid=keyid, profile=profile):
return True
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.create_group(group_name, path)
- log.info('Created IAM group : %s.', group_name)
+ log.info("Created IAM group : %s.", group_name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to create IAM group %s.', group_name)
+ log.error("Failed to create IAM group %s.", group_name)
return False
def get_group(group_name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get group information.
.. versionadded:: 2015.8.0
@@ -391,21 +396,21 @@ def get_group(group_name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_iam.get_group mygroup
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.get_group(group_name, max_items=1)
if not info:
return False
- return info['get_group_response']['get_group_result']['group']
+ return info["get_group_response"]["get_group_result"]["group"]
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to get IAM group %s info.', group_name)
+ log.error("Failed to get IAM group %s info.", group_name)
return False
def get_group_members(group_name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get group information.
.. versionadded:: 2016.3.0
@@ -415,7 +420,7 @@ def get_group_members(group_name, region=None, key=None, keyid=None, profile=Non
.. code-block:: bash
salt myminion boto_iam.get_group mygroup
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
marker = None
@@ -425,23 +430,26 @@ def get_group_members(group_name, region=None, key=None, keyid=None, profile=Non
info = conn.get_group(group_name, marker=marker, max_items=1000)
if not info:
return False
- truncated = bool(info['get_group_response']['get_group_result']['is_truncated'])
- if truncated and 'marker' in info['get_group_response']['get_group_result']:
- marker = info['get_group_response']['get_group_result']['marker']
+ truncated = bool(
+ info["get_group_response"]["get_group_result"]["is_truncated"]
+ )
+ if truncated and "marker" in info["get_group_response"]["get_group_result"]:
+ marker = info["get_group_response"]["get_group_result"]["marker"]
else:
marker = None
truncated = False
- users += info['get_group_response']['get_group_result']['users']
+ users += info["get_group_response"]["get_group_result"]["users"]
return users
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to get members for IAM group %s.', group_name)
+ log.error("Failed to get members for IAM group %s.", group_name)
return False
-def add_user_to_group(user_name, group_name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def add_user_to_group(
+ user_name, group_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Add user to group.
.. versionadded:: 2015.8.0
@@ -451,13 +459,14 @@ def add_user_to_group(user_name, group_name, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_iam.add_user_to_group myuser mygroup
- '''
+ """
user = get_user(user_name, region, key, keyid, profile)
if not user:
- log.error('Username : %s does not exist.', user_name)
+ log.error("Username : %s does not exist.", user_name)
return False
- if user_exists_in_group(user_name, group_name, region=region, key=key,
- keyid=keyid, profile=profile):
+ if user_exists_in_group(
+ user_name, group_name, region=region, key=key, keyid=keyid, profile=profile
+ ):
return True
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -467,13 +476,14 @@ def add_user_to_group(user_name, group_name, region=None, key=None, keyid=None,
return info
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to add IAM user %s to group %s.', user_name, group_name)
+ log.error("Failed to add IAM user %s to group %s.", user_name, group_name)
return False
-def user_exists_in_group(user_name, group_name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def user_exists_in_group(
+ user_name, group_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Check if user exists in group.
.. versionadded:: 2015.8.0
@@ -483,23 +493,25 @@ def user_exists_in_group(user_name, group_name, region=None, key=None, keyid=Non
.. code-block:: bash
salt myminion boto_iam.user_exists_in_group myuser mygroup
- '''
+ """
# TODO this should probably use boto.iam.get_groups_for_user
users = get_group_members(
- group_name=group_name, region=region, key=key, keyid=keyid,
- profile=profile
+ group_name=group_name, region=region, key=key, keyid=keyid, profile=profile
)
if users:
for _user in users:
- if user_name == _user['user_name']:
- log.debug('IAM user %s is already in IAM group %s.', user_name, group_name)
+ if user_name == _user["user_name"]:
+ log.debug(
+ "IAM user %s is already in IAM group %s.", user_name, group_name
+ )
return True
return False
-def remove_user_from_group(group_name, user_name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def remove_user_from_group(
+ group_name, user_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Remove user from group.
.. versionadded:: 2015.8.0
@@ -509,13 +521,14 @@ def remove_user_from_group(group_name, user_name, region=None, key=None, keyid=N
.. code-block:: bash
salt myminion boto_iam.remove_user_from_group mygroup myuser
- '''
+ """
user = get_user(user_name, region, key, keyid, profile)
if not user:
- log.error('IAM user %s does not exist.', user_name)
+ log.error("IAM user %s does not exist.", user_name)
return False
- if not user_exists_in_group(user_name, group_name, region=region, key=key,
- keyid=keyid, profile=profile):
+ if not user_exists_in_group(
+ user_name, group_name, region=region, key=key, keyid=keyid, profile=profile
+ ):
return True
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -525,13 +538,20 @@ def remove_user_from_group(group_name, user_name, region=None, key=None, keyid=N
return info
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to remove IAM user %s from group %s', user_name, group_name)
+ log.error("Failed to remove IAM user %s from group %s", user_name, group_name)
return False
-def put_group_policy(group_name, policy_name, policy_json, region=None, key=None,
- keyid=None, profile=None):
- '''
+def put_group_policy(
+ group_name,
+ policy_name,
+ policy_json,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Adds or updates the specified policy document for the specified group.
.. versionadded:: 2015.8.0
@@ -541,31 +561,30 @@ def put_group_policy(group_name, policy_name, policy_json, region=None, key=None
.. code-block:: bash
salt myminion boto_iam.put_group_policy mygroup policyname policyrules
- '''
- group = get_group(group_name, region=region, key=key, keyid=keyid,
- profile=profile)
+ """
+ group = get_group(group_name, region=region, key=key, keyid=keyid, profile=profile)
if not group:
- log.error('Group %s does not exist', group_name)
+ log.error("Group %s does not exist", group_name)
return False
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
if not isinstance(policy_json, six.string_types):
policy_json = salt.utils.json.dumps(policy_json)
- created = conn.put_group_policy(group_name, policy_name,
- policy_json)
+ created = conn.put_group_policy(group_name, policy_name, policy_json)
if created:
- log.info('Created policy for IAM group %s.', group_name)
+ log.info("Created policy for IAM group %s.", group_name)
return True
- log.error('Could not create policy for IAM group %s', group_name)
+ log.error("Could not create policy for IAM group %s", group_name)
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to create policy for IAM group %s', group_name)
+ log.error("Failed to create policy for IAM group %s", group_name)
return False
-def delete_group_policy(group_name, policy_name, region=None, key=None,
- keyid=None, profile=None):
- '''
+def delete_group_policy(
+ group_name, policy_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Delete a group policy.
CLI Example::
@@ -573,28 +592,31 @@ def delete_group_policy(group_name, policy_name, region=None, key=None,
.. code-block:: bash
salt myminion boto_iam.delete_group_policy mygroup mypolicy
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return False
- _policy = get_group_policy(
- group_name, policy_name, region, key, keyid, profile
- )
+ _policy = get_group_policy(group_name, policy_name, region, key, keyid, profile)
if not _policy:
return True
try:
conn.delete_group_policy(group_name, policy_name)
- log.info('Successfully deleted policy %s for IAM group %s.', policy_name, group_name)
+ log.info(
+ "Successfully deleted policy %s for IAM group %s.", policy_name, group_name
+ )
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to delete policy %s for IAM group %s.', policy_name, group_name)
+ log.error(
+ "Failed to delete policy %s for IAM group %s.", policy_name, group_name
+ )
return False
-def get_group_policy(group_name, policy_name, region=None, key=None,
- keyid=None, profile=None):
- '''
+def get_group_policy(
+ group_name, policy_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Retrieves the specified policy document for the specified group.
.. versionadded:: 2015.8.0
@@ -604,11 +626,11 @@ def get_group_policy(group_name, policy_name, region=None, key=None,
.. code-block:: bash
salt myminion boto_iam.get_group_policy mygroup policyname
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.get_group_policy(group_name, policy_name)
- log.debug('info for group policy is : %s', info)
+ log.debug("info for group policy is : %s", info)
if not info:
return False
info = info.get_group_policy_response.get_group_policy_result.policy_document
@@ -617,13 +639,12 @@ def get_group_policy(group_name, policy_name, region=None, key=None,
return info
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to get IAM group %s info.', group_name)
+ log.error("Failed to get IAM group %s info.", group_name)
return False
-def get_all_groups(path_prefix='/', region=None, key=None, keyid=None,
- profile=None):
- '''
+def get_all_groups(path_prefix="/", region=None, key=None, keyid=None, profile=None):
+ """
Get and return all IAM group details, starting at the optional path.
.. versionadded:: 2016.3.0
@@ -631,27 +652,26 @@ def get_all_groups(path_prefix='/', region=None, key=None, keyid=None,
CLI Example:
salt-call boto_iam.get_all_groups
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return None
_groups = conn.get_all_groups(path_prefix=path_prefix)
groups = _groups.list_groups_response.list_groups_result.groups
- marker = getattr(
- _groups.list_groups_response.list_groups_result, 'marker', None
- )
+ marker = getattr(_groups.list_groups_response.list_groups_result, "marker", None)
while marker:
_groups = conn.get_all_groups(path_prefix=path_prefix, marker=marker)
groups = groups + _groups.list_groups_response.list_groups_result.groups
marker = getattr(
- _groups.list_groups_response.list_groups_result, 'marker', None
+ _groups.list_groups_response.list_groups_result, "marker", None
)
return groups
-def get_all_instance_profiles(path_prefix='/', region=None, key=None,
- keyid=None, profile=None):
- '''
+def get_all_instance_profiles(
+ path_prefix="/", region=None, key=None, keyid=None, profile=None
+):
+ """
Get and return all IAM instance profiles, starting at the optional path.
.. versionadded:: 2016.11.0
@@ -659,23 +679,23 @@ def get_all_instance_profiles(path_prefix='/', region=None, key=None,
CLI Example:
salt-call boto_iam.get_all_instance_profiles
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
marker = False
profiles = []
while marker is not None:
marker = marker if marker else None
- p = conn.list_instance_profiles(path_prefix=path_prefix,
- marker=marker)
+ p = conn.list_instance_profiles(path_prefix=path_prefix, marker=marker)
res = p.list_instance_profiles_response.list_instance_profiles_result
profiles += res.instance_profiles
- marker = getattr(res, 'marker', None)
+ marker = getattr(res, "marker", None)
return profiles
-def list_instance_profiles(path_prefix='/', region=None, key=None,
- keyid=None, profile=None):
- '''
+def list_instance_profiles(
+ path_prefix="/", region=None, key=None, keyid=None, profile=None
+):
+ """
List all IAM instance profiles, starting at the optional path.
.. versionadded:: 2016.11.0
@@ -683,14 +703,13 @@ def list_instance_profiles(path_prefix='/', region=None, key=None,
CLI Example:
salt-call boto_iam.list_instance_profiles
- '''
+ """
p = get_all_instance_profiles(path_prefix, region, key, keyid, profile)
- return [i['instance_profile_name'] for i in p]
+ return [i["instance_profile_name"] for i in p]
-def get_all_group_policies(group_name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def get_all_group_policies(group_name, region=None, key=None, keyid=None, profile=None):
+ """
Get a list of policy names from a group.
CLI Example:
@@ -698,7 +717,7 @@ def get_all_group_policies(group_name, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_iam.get_all_group_policies mygroup
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return False
@@ -711,9 +730,8 @@ def get_all_group_policies(group_name, region=None, key=None, keyid=None,
return []
-def delete_group(group_name, region=None, key=None,
- keyid=None, profile=None):
- '''
+def delete_group(group_name, region=None, key=None, keyid=None, profile=None):
+ """
Delete a group policy.
CLI Example::
@@ -721,28 +739,27 @@ def delete_group(group_name, region=None, key=None,
.. code-block:: bash
salt myminion boto_iam.delete_group mygroup
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return False
- _group = get_group(
- group_name, region, key, keyid, profile
- )
+ _group = get_group(group_name, region, key, keyid, profile)
if not _group:
return True
try:
conn.delete_group(group_name)
- log.info('Successfully deleted IAM group %s.', group_name)
+ log.info("Successfully deleted IAM group %s.", group_name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to delete IAM group %s.', group_name)
+ log.error("Failed to delete IAM group %s.", group_name)
return False
-def create_login_profile(user_name, password, region=None, key=None,
- keyid=None, profile=None):
- '''
+def create_login_profile(
+ user_name, password, region=None, key=None, keyid=None, profile=None
+):
+ """
Creates a login profile for the specified user, give the user the
ability to access AWS services and the AWS Management Console.
@@ -753,28 +770,27 @@ def create_login_profile(user_name, password, region=None, key=None,
.. code-block:: bash
salt myminion boto_iam.create_login_profile user_name password
- '''
+ """
user = get_user(user_name, region, key, keyid, profile)
if not user:
- log.error('IAM user %s does not exist', user_name)
+ log.error("IAM user %s does not exist", user_name)
return False
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.create_login_profile(user_name, password)
- log.info('Created profile for IAM user %s.', user_name)
+ log.info("Created profile for IAM user %s.", user_name)
return info
except boto.exception.BotoServerError as e:
log.debug(e)
- if 'Conflict' in e:
- log.info('Profile already exists for IAM user %s.', user_name)
- return 'Conflict'
- log.error('Failed to update profile for IAM user %s.', user_name)
+ if "Conflict" in e:
+ log.info("Profile already exists for IAM user %s.", user_name)
+ return "Conflict"
+ log.error("Failed to update profile for IAM user %s.", user_name)
return False
-def delete_login_profile(user_name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def delete_login_profile(user_name, region=None, key=None, keyid=None, profile=None):
+ """
Deletes a login profile for the specified user.
.. versionadded:: 2016.3.0
@@ -784,28 +800,27 @@ def delete_login_profile(user_name, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_iam.delete_login_profile user_name
- '''
+ """
user = get_user(user_name, region, key, keyid, profile)
if not user:
- log.error('IAM user %s does not exist', user_name)
+ log.error("IAM user %s does not exist", user_name)
return False
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.delete_login_profile(user_name)
- log.info('Deleted login profile for IAM user %s.', user_name)
+ log.info("Deleted login profile for IAM user %s.", user_name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- if 'Not Found' in e:
- log.info('Login profile already deleted for IAM user %s.', user_name)
+ if "Not Found" in e:
+ log.info("Login profile already deleted for IAM user %s.", user_name)
return True
- log.error('Failed to delete login profile for IAM user %s.', user_name)
+ log.error("Failed to delete login profile for IAM user %s.", user_name)
return False
-def get_all_mfa_devices(user_name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def get_all_mfa_devices(user_name, region=None, key=None, keyid=None, profile=None):
+ """
Get all MFA devices associated with an IAM user.
.. versionadded:: 2016.3.0
@@ -815,28 +830,31 @@ def get_all_mfa_devices(user_name, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_iam.get_all_mfa_devices user_name
- '''
+ """
user = get_user(user_name, region, key, keyid, profile)
if not user:
- log.error('IAM user %s does not exist', user_name)
+ log.error("IAM user %s does not exist", user_name)
return False
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
result = conn.get_all_mfa_devices(user_name)
- devices = result['list_mfa_devices_response']['list_mfa_devices_result']['mfa_devices']
+ devices = result["list_mfa_devices_response"]["list_mfa_devices_result"][
+ "mfa_devices"
+ ]
return devices
except boto.exception.BotoServerError as e:
log.debug(e)
- if 'Not Found' in e:
- log.info('Could not find IAM user %s.', user_name)
+ if "Not Found" in e:
+ log.info("Could not find IAM user %s.", user_name)
return []
- log.error('Failed to get all MFA devices for IAM user %s.', user_name)
+ log.error("Failed to get all MFA devices for IAM user %s.", user_name)
return False
-def deactivate_mfa_device(user_name, serial, region=None, key=None, keyid=None,
- profile=None):
- '''
+def deactivate_mfa_device(
+ user_name, serial, region=None, key=None, keyid=None, profile=None
+):
+ """
Deactivates the specified MFA device and removes it from association with
the user.
@@ -847,27 +865,31 @@ def deactivate_mfa_device(user_name, serial, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_iam.deactivate_mfa_device user_name serial_num
- '''
+ """
user = get_user(user_name, region, key, keyid, profile)
if not user:
- log.error('IAM user %s does not exist', user_name)
+ log.error("IAM user %s does not exist", user_name)
return False
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.deactivate_mfa_device(user_name, serial)
- log.info('Deactivated MFA device %s for IAM user %s.', serial, user_name)
+ log.info("Deactivated MFA device %s for IAM user %s.", serial, user_name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- if 'Not Found' in e:
- log.info('MFA device %s not associated with IAM user %s.', serial, user_name)
+ if "Not Found" in e:
+ log.info(
+ "MFA device %s not associated with IAM user %s.", serial, user_name
+ )
return True
- log.error('Failed to deactivate MFA device %s for IAM user %s.', serial, user_name)
+ log.error(
+ "Failed to deactivate MFA device %s for IAM user %s.", serial, user_name
+ )
return False
def delete_virtual_mfa_device(serial, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Deletes the specified virtual MFA device.
CLI Example:
@@ -875,31 +897,37 @@ def delete_virtual_mfa_device(serial, region=None, key=None, keyid=None, profile
.. code-block:: bash
salt myminion boto_iam.delete_virtual_mfa_device serial_num
- '''
- conn = __utils__['boto3.get_connection_func']('iam')()
+ """
+ conn = __utils__["boto3.get_connection_func"]("iam")()
try:
conn.delete_virtual_mfa_device(SerialNumber=serial)
- log.info('Deleted virtual MFA device %s.', serial)
+ log.info("Deleted virtual MFA device %s.", serial)
return True
except botocore.exceptions.ClientError as e:
log.debug(e)
- if 'NoSuchEntity' in six.text_type(e):
- log.info('Virtual MFA device %s not found.', serial)
+ if "NoSuchEntity" in six.text_type(e):
+ log.info("Virtual MFA device %s not found.", serial)
return True
- log.error('Failed to delete virtual MFA device %s.', serial)
+ log.error("Failed to delete virtual MFA device %s.", serial)
return False
-def update_account_password_policy(allow_users_to_change_password=None,
- hard_expiry=None, max_password_age=None,
- minimum_password_length=None,
- password_reuse_prevention=None,
- require_lowercase_characters=None,
- require_numbers=None, require_symbols=None,
- require_uppercase_characters=None,
- region=None, key=None, keyid=None,
- profile=None):
- '''
+def update_account_password_policy(
+ allow_users_to_change_password=None,
+ hard_expiry=None,
+ max_password_age=None,
+ minimum_password_length=None,
+ password_reuse_prevention=None,
+ require_lowercase_characters=None,
+ require_numbers=None,
+ require_symbols=None,
+ require_uppercase_characters=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Update the password policy for the AWS account.
.. versionadded:: 2015.8.0
@@ -909,27 +937,31 @@ def update_account_password_policy(allow_users_to_change_password=None,
.. code-block:: bash
salt myminion boto_iam.update_account_password_policy True
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- conn.update_account_password_policy(allow_users_to_change_password,
- hard_expiry, max_password_age,
- minimum_password_length,
- password_reuse_prevention,
- require_lowercase_characters,
- require_numbers, require_symbols,
- require_uppercase_characters)
- log.info('The password policy has been updated.')
+ conn.update_account_password_policy(
+ allow_users_to_change_password,
+ hard_expiry,
+ max_password_age,
+ minimum_password_length,
+ password_reuse_prevention,
+ require_lowercase_characters,
+ require_numbers,
+ require_symbols,
+ require_uppercase_characters,
+ )
+ log.info("The password policy has been updated.")
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- msg = 'Failed to update the password policy'
+ msg = "Failed to update the password policy"
log.error(msg)
return False
def get_account_policy(region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get account policy for the AWS account.
.. versionadded:: 2015.8.0
@@ -939,21 +971,30 @@ def get_account_policy(region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_iam.get_account_policy
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.get_account_password_policy()
- return info.get_account_password_policy_response.get_account_password_policy_result.password_policy
+ return (
+ info.get_account_password_policy_response.get_account_password_policy_result.password_policy
+ )
except boto.exception.BotoServerError as e:
log.debug(e)
- msg = 'Failed to update the password policy.'
+ msg = "Failed to update the password policy."
log.error(msg)
return False
-def create_role(name, policy_document=None, path=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def create_role(
+ name,
+ policy_document=None,
+ path=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create an instance role.
CLI Example:
@@ -961,7 +1002,7 @@ def create_role(name, policy_document=None, path=None, region=None, key=None,
.. code-block:: bash
salt myminion boto_iam.create_role myrole
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if role_exists(name, region, key, keyid, profile):
@@ -969,18 +1010,17 @@ def create_role(name, policy_document=None, path=None, region=None, key=None,
if not policy_document:
policy_document = None
try:
- conn.create_role(name, assume_role_policy_document=policy_document,
- path=path)
- log.info('Created IAM role %s.', name)
+ conn.create_role(name, assume_role_policy_document=policy_document, path=path)
+ log.info("Created IAM role %s.", name)
return True
except boto.exception.BotoServerError as e:
log.error(e)
- log.error('Failed to create IAM role %s.', name)
+ log.error("Failed to create IAM role %s.", name)
return False
def delete_role(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete an IAM role.
CLI Example:
@@ -988,23 +1028,23 @@ def delete_role(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_iam.delete_role myirole
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not role_exists(name, region, key, keyid, profile):
return True
try:
conn.delete_role(name)
- log.info('Deleted %s IAM role.', name)
+ log.info("Deleted %s IAM role.", name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to delete %s IAM role.', name)
+ log.error("Failed to delete %s IAM role.", name)
return False
def profile_associated(role_name, profile_name, region, key, keyid, profile):
- '''
+ """
Check to see if an instance profile is associated with an IAM role.
CLI Example:
@@ -1012,7 +1052,7 @@ def profile_associated(role_name, profile_name, region, key, keyid, profile):
.. code-block:: bash
salt myminion boto_iam.profile_associated myirole myiprofile
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
# The IAM module of boto doesn't return objects. Instead you need to grab
@@ -1031,9 +1071,10 @@ def profile_associated(role_name, profile_name, region, key, keyid, profile):
return False
-def associate_profile_to_role(profile_name, role_name, region=None, key=None,
- keyid=None, profile=None):
- '''
+def associate_profile_to_role(
+ profile_name, role_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Associate an instance profile with an IAM role.
CLI Example:
@@ -1041,32 +1082,41 @@ def associate_profile_to_role(profile_name, role_name, region=None, key=None,
.. code-block:: bash
salt myminion boto_iam.associate_profile_to_role myirole myiprofile
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not role_exists(role_name, region, key, keyid, profile):
- log.error('IAM role %s does not exist.', role_name)
+ log.error("IAM role %s does not exist.", role_name)
return False
if not instance_profile_exists(profile_name, region, key, keyid, profile):
- log.error('Instance profile %s does not exist.', profile_name)
+ log.error("Instance profile %s does not exist.", profile_name)
return False
- associated = profile_associated(role_name, profile_name, region, key, keyid, profile)
+ associated = profile_associated(
+ role_name, profile_name, region, key, keyid, profile
+ )
if associated:
return True
else:
try:
conn.add_role_to_instance_profile(profile_name, role_name)
- log.info('Added %s instance profile to IAM role %s.', profile_name, role_name)
+ log.info(
+ "Added %s instance profile to IAM role %s.", profile_name, role_name
+ )
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to add %s instance profile to IAM role %s', profile_name, role_name)
+ log.error(
+ "Failed to add %s instance profile to IAM role %s",
+ profile_name,
+ role_name,
+ )
return False
-def disassociate_profile_from_role(profile_name, role_name, region=None,
- key=None, keyid=None, profile=None):
- '''
+def disassociate_profile_from_role(
+ profile_name, role_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Disassociate an instance profile from an IAM role.
CLI Example:
@@ -1074,32 +1124,39 @@ def disassociate_profile_from_role(profile_name, role_name, region=None,
.. code-block:: bash
salt myminion boto_iam.disassociate_profile_from_role myirole myiprofile
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not role_exists(role_name, region, key, keyid, profile):
- log.error('IAM role %s does not exist.', role_name)
+ log.error("IAM role %s does not exist.", role_name)
return False
if not instance_profile_exists(profile_name, region, key, keyid, profile):
- log.error('Instance profile %s does not exist.', profile_name)
+ log.error("Instance profile %s does not exist.", profile_name)
return False
- associated = profile_associated(role_name, profile_name, region, key, keyid, profile)
+ associated = profile_associated(
+ role_name, profile_name, region, key, keyid, profile
+ )
if not associated:
return True
else:
try:
conn.remove_role_from_instance_profile(profile_name, role_name)
- log.info('Removed %s instance profile from IAM role %s.', profile_name, role_name)
+ log.info(
+ "Removed %s instance profile from IAM role %s.", profile_name, role_name
+ )
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to remove %s instance profile from IAM role %s.', profile_name, role_name)
+ log.error(
+ "Failed to remove %s instance profile from IAM role %s.",
+ profile_name,
+ role_name,
+ )
return False
-def list_role_policies(role_name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def list_role_policies(role_name, region=None, key=None, keyid=None, profile=None):
+ """
Get a list of policy names from a role.
CLI Example:
@@ -1107,7 +1164,7 @@ def list_role_policies(role_name, region=None, key=None, keyid=None,
.. code-block:: bash
salt myminion boto_iam.list_role_policies myirole
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -1119,9 +1176,10 @@ def list_role_policies(role_name, region=None, key=None, keyid=None,
return []
-def get_role_policy(role_name, policy_name, region=None, key=None,
- keyid=None, profile=None):
- '''
+def get_role_policy(
+ role_name, policy_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Get a role policy.
CLI Example:
@@ -1129,7 +1187,7 @@ def get_role_policy(role_name, policy_name, region=None, key=None,
.. code-block:: bash
salt myminion boto_iam.get_role_policy myirole mypolicy
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
@@ -1144,9 +1202,10 @@ def get_role_policy(role_name, policy_name, region=None, key=None,
return {}
-def create_role_policy(role_name, policy_name, policy, region=None, key=None,
- keyid=None, profile=None):
- '''
+def create_role_policy(
+ role_name, policy_name, policy, region=None, key=None, keyid=None, profile=None
+):
+ """
Create or modify a role policy.
CLI Example:
@@ -1154,36 +1213,38 @@ def create_role_policy(role_name, policy_name, policy, region=None, key=None,
.. code-block:: bash
salt myminion boto_iam.create_role_policy myirole mypolicy '{"MyPolicy": "Statement": [{"Action": ["sqs:*"], "Effect": "Allow", "Resource": ["arn:aws:sqs:*:*:*"], "Sid": "MyPolicySqs1"}]}'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
_policy = get_role_policy(role_name, policy_name, region, key, keyid, profile)
- mode = 'create'
+ mode = "create"
if _policy:
if _policy == policy:
return True
- mode = 'modify'
+ mode = "modify"
if isinstance(policy, six.string_types):
policy = salt.utils.json.loads(policy, object_pairs_hook=odict.OrderedDict)
try:
_policy = salt.utils.json.dumps(policy)
conn.put_role_policy(role_name, policy_name, _policy)
- if mode == 'create':
- msg = 'Successfully added policy %s to IAM role %s.'
+ if mode == "create":
+ msg = "Successfully added policy %s to IAM role %s."
else:
- msg = 'Successfully modified policy %s for IAM role %s.'
+ msg = "Successfully modified policy %s for IAM role %s."
log.info(msg, policy_name, role_name)
return True
except boto.exception.BotoServerError as e:
log.error(e)
- log.error('Failed to %s policy %s for IAM role %s.',
- mode, policy_name, role_name)
+ log.error(
+ "Failed to %s policy %s for IAM role %s.", mode, policy_name, role_name
+ )
return False
-def delete_role_policy(role_name, policy_name, region=None, key=None,
- keyid=None, profile=None):
- '''
+def delete_role_policy(
+ role_name, policy_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Delete a role policy.
CLI Example:
@@ -1191,7 +1252,7 @@ def delete_role_policy(role_name, policy_name, region=None, key=None,
.. code-block:: bash
salt myminion boto_iam.delete_role_policy myirole mypolicy
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
_policy = get_role_policy(role_name, policy_name, region, key, keyid, profile)
@@ -1199,19 +1260,20 @@ def delete_role_policy(role_name, policy_name, region=None, key=None,
return True
try:
conn.delete_role_policy(role_name, policy_name)
- log.info('Successfully deleted policy %s for IAM role %s.',
- policy_name, role_name)
+ log.info(
+ "Successfully deleted policy %s for IAM role %s.", policy_name, role_name
+ )
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to delete policy %s for IAM role %s.',
- policy_name, role_name)
+ log.error("Failed to delete policy %s for IAM role %s.", policy_name, role_name)
return False
-def update_assume_role_policy(role_name, policy_document, region=None,
- key=None, keyid=None, profile=None):
- '''
+def update_assume_role_policy(
+ role_name, policy_document, region=None, key=None, keyid=None, profile=None
+):
+ """
Update an assume role policy for a role.
.. versionadded:: 2015.8.0
@@ -1221,25 +1283,26 @@ def update_assume_role_policy(role_name, policy_document, region=None,
.. code-block:: bash
salt myminion boto_iam.update_assume_role_policy myrole '{"Statement":"..."}'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(policy_document, six.string_types):
- policy_document = salt.utils.json.loads(policy_document,
- object_pairs_hook=odict.OrderedDict)
+ policy_document = salt.utils.json.loads(
+ policy_document, object_pairs_hook=odict.OrderedDict
+ )
try:
_policy_document = salt.utils.json.dumps(policy_document)
conn.update_assume_role_policy(role_name, _policy_document)
- log.info('Successfully updated assume role policy for IAM role %s.', role_name)
+ log.info("Successfully updated assume role policy for IAM role %s.", role_name)
return True
except boto.exception.BotoServerError as e:
log.error(e)
- log.error('Failed to update assume role policy for IAM role %s.', role_name)
+ log.error("Failed to update assume role policy for IAM role %s.", role_name)
return False
def build_policy(region=None, key=None, keyid=None, profile=None):
- '''
+ """
Build a default assume role policy.
.. versionadded:: 2015.8.0
@@ -1249,11 +1312,11 @@ def build_policy(region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_iam.build_policy
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- if hasattr(conn, 'build_policy'):
+ if hasattr(conn, "build_policy"):
policy = salt.utils.json.loads(conn.build_policy())
- elif hasattr(conn, '_build_policy'):
+ elif hasattr(conn, "_build_policy"):
policy = salt.utils.json.loads(conn._build_policy())
else:
return {}
@@ -1262,20 +1325,21 @@ def build_policy(region=None, key=None, keyid=None, profile=None):
# into strings, so let's do the same here.
for key, policy_val in policy.items():
for statement in policy_val:
- if (isinstance(statement['Action'], list)
- and len(statement['Action']) == 1):
- statement['Action'] = statement['Action'][0]
- if (isinstance(statement['Principal']['Service'], list)
- and len(statement['Principal']['Service']) == 1):
- statement['Principal']['Service'] = statement['Principal']['Service'][0]
+ if isinstance(statement["Action"], list) and len(statement["Action"]) == 1:
+ statement["Action"] = statement["Action"][0]
+ if (
+ isinstance(statement["Principal"]["Service"], list)
+ and len(statement["Principal"]["Service"]) == 1
+ ):
+ statement["Principal"]["Service"] = statement["Principal"]["Service"][0]
# build_policy doesn't add a version field, which AWS is going to set to a
# default value, when we get it back, so let's set it.
- policy['Version'] = '2008-10-17'
+ policy["Version"] = "2008-10-17"
return policy
def get_account_id(region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get a the AWS account id associated with the used credentials.
CLI Example:
@@ -1283,39 +1347,36 @@ def get_account_id(region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_iam.get_account_id
- '''
- cache_key = 'boto_iam.account_id'
+ """
+ cache_key = "boto_iam.account_id"
if cache_key not in __context__:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.get_user()
# The get_user call returns an user ARN:
# arn:aws:iam::027050522557:user/salt-test
- arn = ret['get_user_response']['get_user_result']['user']['arn']
- account_id = arn.split(':')[4]
+ arn = ret["get_user_response"]["get_user_result"]["user"]["arn"]
+ account_id = arn.split(":")[4]
except boto.exception.BotoServerError:
# If call failed, then let's try to get the ARN from the metadata
- timeout = boto.config.getfloat(
- 'Boto', 'metadata_service_timeout', 1.0
- )
- attempts = boto.config.getint(
- 'Boto', 'metadata_service_num_attempts', 1
- )
+ timeout = boto.config.getfloat("Boto", "metadata_service_timeout", 1.0)
+ attempts = boto.config.getint("Boto", "metadata_service_num_attempts", 1)
identity = boto.utils.get_instance_identity(
timeout=timeout, num_retries=attempts
)
try:
- account_id = identity['document']['accountId']
+ account_id = identity["document"]["accountId"]
except KeyError:
- log.error('Failed to get account id from instance_identity in'
- ' boto_iam.get_account_id.')
+ log.error(
+ "Failed to get account id from instance_identity in"
+ " boto_iam.get_account_id."
+ )
__context__[cache_key] = account_id
return __context__[cache_key]
-def get_all_roles(path_prefix=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def get_all_roles(path_prefix=None, region=None, key=None, keyid=None, profile=None):
+ """
Get and return all IAM role details, starting at the optional path.
.. versionadded:: 2016.3.0
@@ -1323,27 +1384,22 @@ def get_all_roles(path_prefix=None, region=None, key=None, keyid=None,
CLI Example:
salt-call boto_iam.get_all_roles
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return None
_roles = conn.list_roles(path_prefix=path_prefix)
roles = _roles.list_roles_response.list_roles_result.roles
- marker = getattr(
- _roles.list_roles_response.list_roles_result, 'marker', None
- )
+ marker = getattr(_roles.list_roles_response.list_roles_result, "marker", None)
while marker:
_roles = conn.list_roles(path_prefix=path_prefix, marker=marker)
roles = roles + _roles.list_roles_response.list_roles_result.roles
- marker = getattr(
- _roles.list_roles_response.list_roles_result, 'marker', None
- )
+ marker = getattr(_roles.list_roles_response.list_roles_result, "marker", None)
return roles
-def get_all_users(path_prefix='/', region=None, key=None, keyid=None,
- profile=None):
- '''
+def get_all_users(path_prefix="/", region=None, key=None, keyid=None, profile=None):
+ """
Get and return all IAM user details, starting at the optional path.
.. versionadded:: 2016.3.0
@@ -1351,26 +1407,30 @@ def get_all_users(path_prefix='/', region=None, key=None, keyid=None,
CLI Example:
salt-call boto_iam.get_all_users
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return None
_users = conn.get_all_users(path_prefix=path_prefix)
users = _users.list_users_response.list_users_result.users
- marker = getattr(
- _users.list_users_response.list_users_result, 'marker', None
- )
+ marker = getattr(_users.list_users_response.list_users_result, "marker", None)
while marker:
_users = conn.get_all_users(path_prefix=path_prefix, marker=marker)
users = users + _users.list_users_response.list_users_result.users
- marker = getattr(
- _users.list_users_response.list_users_result, 'marker', None
- )
+ marker = getattr(_users.list_users_response.list_users_result, "marker", None)
return users
-def get_all_user_policies(user_name, marker=None, max_items=None, region=None, key=None, keyid=None, profile=None):
- '''
+def get_all_user_policies(
+ user_name,
+ marker=None,
+ max_items=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Get all user policies.
.. versionadded:: 2015.8.0
@@ -1380,7 +1440,7 @@ def get_all_user_policies(user_name, marker=None, max_items=None, region=None, k
.. code-block:: bash
salt myminion boto_iam.get_all_user_policies myuser
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.get_all_user_policies(user_name, marker, max_items)
@@ -1390,12 +1450,14 @@ def get_all_user_policies(user_name, marker=None, max_items=None, region=None, k
return _list.policy_names
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to get policies for user %s.', user_name)
+ log.error("Failed to get policies for user %s.", user_name)
return False
-def get_user_policy(user_name, policy_name, region=None, key=None, keyid=None, profile=None):
- '''
+def get_user_policy(
+ user_name, policy_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Retrieves the specified policy document for the specified user.
.. versionadded:: 2015.8.0
@@ -1405,11 +1467,11 @@ def get_user_policy(user_name, policy_name, region=None, key=None, keyid=None, p
.. code-block:: bash
salt myminion boto_iam.get_user_policy myuser mypolicyname
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.get_user_policy(user_name, policy_name)
- log.debug('Info for IAM user %s policy %s: %s.', user_name, policy_name, info)
+ log.debug("Info for IAM user %s policy %s: %s.", user_name, policy_name, info)
if not info:
return False
info = info.get_user_policy_response.get_user_policy_result.policy_document
@@ -1418,12 +1480,14 @@ def get_user_policy(user_name, policy_name, region=None, key=None, keyid=None, p
return info
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to get policy %s for IAM user %s.', policy_name, user_name)
+ log.error("Failed to get policy %s for IAM user %s.", policy_name, user_name)
return False
-def put_user_policy(user_name, policy_name, policy_json, region=None, key=None, keyid=None, profile=None):
- '''
+def put_user_policy(
+ user_name, policy_name, policy_json, region=None, key=None, keyid=None, profile=None
+):
+ """
Adds or updates the specified policy document for the specified user.
.. versionadded:: 2015.8.0
@@ -1433,29 +1497,30 @@ def put_user_policy(user_name, policy_name, policy_json, region=None, key=None,
.. code-block:: bash
salt myminion boto_iam.put_user_policy myuser policyname policyrules
- '''
+ """
user = get_user(user_name, region, key, keyid, profile)
if not user:
- log.error('IAM user %s does not exist', user_name)
+ log.error("IAM user %s does not exist", user_name)
return False
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
if not isinstance(policy_json, six.string_types):
policy_json = salt.utils.json.dumps(policy_json)
- created = conn.put_user_policy(user_name, policy_name,
- policy_json)
+ created = conn.put_user_policy(user_name, policy_name, policy_json)
if created:
- log.info('Created policy %s for IAM user %s.', policy_name, user_name)
+ log.info("Created policy %s for IAM user %s.", policy_name, user_name)
return True
- log.error('Could not create policy %s for IAM user %s.', policy_name, user_name)
+ log.error("Could not create policy %s for IAM user %s.", policy_name, user_name)
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to create policy %s for IAM user %s.', policy_name, user_name)
+ log.error("Failed to create policy %s for IAM user %s.", policy_name, user_name)
return False
-def delete_user_policy(user_name, policy_name, region=None, key=None, keyid=None, profile=None):
- '''
+def delete_user_policy(
+ user_name, policy_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Delete a user policy.
CLI Example:
@@ -1463,28 +1528,37 @@ def delete_user_policy(user_name, policy_name, region=None, key=None, keyid=None
.. code-block:: bash
salt myminion boto_iam.delete_user_policy myuser mypolicy
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return False
- _policy = get_user_policy(
- user_name, policy_name, region, key, keyid, profile
- )
+ _policy = get_user_policy(user_name, policy_name, region, key, keyid, profile)
if not _policy:
return True
try:
conn.delete_user_policy(user_name, policy_name)
- log.info('Successfully deleted policy %s for IAM user %s.', policy_name, user_name)
+ log.info(
+ "Successfully deleted policy %s for IAM user %s.", policy_name, user_name
+ )
return True
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to delete policy %s for IAM user %s.', policy_name, user_name)
+ log.error("Failed to delete policy %s for IAM user %s.", policy_name, user_name)
return False
-def upload_server_cert(cert_name, cert_body, private_key, cert_chain=None, path=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def upload_server_cert(
+ cert_name,
+ cert_body,
+ private_key,
+ cert_chain=None,
+ path=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Upload a certificate to Amazon.
.. versionadded:: 2015.8.0
@@ -1505,7 +1579,7 @@ def upload_server_cert(cert_name, cert_body, private_key, cert_chain=None, path=
:param keyid: The keyid to be used in order to connect
:param profile: The profile that contains a dict of region, key, keyid
:return: True / False
- '''
+ """
exists = get_server_certificate(cert_name, region, key, keyid, profile)
if exists:
@@ -1513,16 +1587,16 @@ def upload_server_cert(cert_name, cert_body, private_key, cert_chain=None, path=
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.upload_server_cert(cert_name, cert_body, private_key, cert_chain)
- log.info('Created certificate %s.', cert_name)
+ log.info("Created certificate %s.", cert_name)
return info
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to failed to create certificate %s.', cert_name)
+ log.error("Failed to failed to create certificate %s.", cert_name)
return False
def get_server_certificate(cert_name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Returns certificate information from Amazon
.. versionadded:: 2015.8.0
@@ -1532,7 +1606,7 @@ def get_server_certificate(cert_name, region=None, key=None, keyid=None, profile
.. code-block:: bash
salt myminion boto_iam.get_server_certificate mycert_name
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.get_server_certificate(cert_name)
@@ -1541,12 +1615,12 @@ def get_server_certificate(cert_name, region=None, key=None, keyid=None, profile
return info
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to get certificate %s information.', cert_name)
+ log.error("Failed to get certificate %s information.", cert_name)
return False
def delete_server_cert(cert_name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Deletes a certificate from Amazon.
.. versionadded:: 2015.8.0
@@ -1556,19 +1630,18 @@ def delete_server_cert(cert_name, region=None, key=None, keyid=None, profile=Non
.. code-block:: bash
salt myminion boto_iam.delete_server_cert mycert_name
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.delete_server_cert(cert_name)
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to delete certificate %s.', cert_name)
+ log.error("Failed to delete certificate %s.", cert_name)
return False
-def export_users(path_prefix='/', region=None, key=None, keyid=None,
- profile=None):
- '''
+def export_users(path_prefix="/", region=None, key=None, keyid=None, profile=None):
+ """
Get all IAM user details. Produces results that can be used to create an
sls file.
@@ -1577,7 +1650,7 @@ def export_users(path_prefix='/', region=None, key=None, keyid=None,
CLI Example:
salt-call boto_iam.export_users --out=txt | sed "s/local: //" > iam_users.sls
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return None
@@ -1586,34 +1659,35 @@ def export_users(path_prefix='/', region=None, key=None, keyid=None,
for user in users:
name = user.user_name
_policies = conn.get_all_user_policies(name, max_items=100)
- _policies = _policies.list_user_policies_response.list_user_policies_result.policy_names
+ _policies = (
+ _policies.list_user_policies_response.list_user_policies_result.policy_names
+ )
policies = {}
for policy_name in _policies:
_policy = conn.get_user_policy(name, policy_name)
- _policy = salt.utils.json.loads(_unquote(
+ _policy = salt.utils.json.loads(
+ _unquote(
_policy.get_user_policy_response.get_user_policy_result.policy_document
- ))
+ )
+ )
policies[policy_name] = _policy
user_sls = []
user_sls.append({"name": name})
user_sls.append({"policies": policies})
user_sls.append({"path": user.path})
results["manage user " + name] = {"boto_iam.user_present": user_sls}
- return __utils__['yaml.safe_dump'](
- results,
- default_flow_style=False,
- indent=2)
+ return __utils__["yaml.safe_dump"](results, default_flow_style=False, indent=2)
-def export_roles(path_prefix='/', region=None, key=None, keyid=None, profile=None):
- '''
+def export_roles(path_prefix="/", region=None, key=None, keyid=None, profile=None):
+ """
Get all IAM role details. Produces results that can be used to create an
sls file.
CLI Example:
salt-call boto_iam.export_roles --out=txt | sed "s/local: //" > iam_roles.sls
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return None
@@ -1622,39 +1696,43 @@ def export_roles(path_prefix='/', region=None, key=None, keyid=None, profile=Non
for role in roles:
name = role.role_name
_policies = conn.list_role_policies(name, max_items=100)
- _policies = _policies.list_role_policies_response.list_role_policies_result.policy_names
+ _policies = (
+ _policies.list_role_policies_response.list_role_policies_result.policy_names
+ )
policies = {}
for policy_name in _policies:
_policy = conn.get_role_policy(name, policy_name)
- _policy = salt.utils.json.loads(_unquote(
- _policy.get_role_policy_response.get_role_policy_result.policy_document
- ))
+ _policy = salt.utils.json.loads(
+ _unquote(
+ _policy.get_role_policy_response.get_role_policy_result.policy_document
+ )
+ )
policies[policy_name] = _policy
role_sls = []
role_sls.append({"name": name})
role_sls.append({"policies": policies})
- role_sls.append({'policy_document': salt.utils.json.loads(_unquote(role.assume_role_policy_document))})
+ role_sls.append(
+ {
+ "policy_document": salt.utils.json.loads(
+ _unquote(role.assume_role_policy_document)
+ )
+ }
+ )
role_sls.append({"path": role.path})
results["manage role " + name] = {"boto_iam_role.present": role_sls}
- return __utils__['yaml.safe_dump'](
- results,
- default_flow_style=False,
- indent=2)
+ return __utils__["yaml.safe_dump"](results, default_flow_style=False, indent=2)
def _get_policy_arn(name, region=None, key=None, keyid=None, profile=None):
- if name.startswith('arn:aws:iam:'):
+ if name.startswith("arn:aws:iam:"):
return name
- account_id = get_account_id(
- region=region, key=key, keyid=keyid, profile=profile
- )
- return 'arn:aws:iam::{0}:policy/{1}'.format(account_id, name)
+ account_id = get_account_id(region=region, key=key, keyid=keyid, profile=profile)
+ return "arn:aws:iam::{0}:policy/{1}".format(account_id, name)
-def policy_exists(policy_name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def policy_exists(policy_name, region=None, key=None, keyid=None, profile=None):
+ """
Check to see if policy exists.
CLI Example:
@@ -1662,20 +1740,22 @@ def policy_exists(policy_name,
.. code-block:: bash
salt myminion boto_iam.instance_profile_exists myiprofile
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- conn.get_policy(_get_policy_arn(policy_name,
- region=region, key=key, keyid=keyid, profile=profile))
+ conn.get_policy(
+ _get_policy_arn(
+ policy_name, region=region, key=key, keyid=keyid, profile=profile
+ )
+ )
return True
except boto.exception.BotoServerError:
return False
-def get_policy(policy_name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def get_policy(policy_name, region=None, key=None, keyid=None, profile=None):
+ """
Check to see if policy exists.
CLI Example:
@@ -1683,20 +1763,31 @@ def get_policy(policy_name,
.. code-block:: bash
salt myminion boto_iam.instance_profile_exists myiprofile
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- ret = conn.get_policy(_get_policy_arn(policy_name,
- region=region, key=key, keyid=keyid, profile=profile))
- return ret.get('get_policy_response', {}).get('get_policy_result', {})
+ ret = conn.get_policy(
+ _get_policy_arn(
+ policy_name, region=region, key=key, keyid=keyid, profile=profile
+ )
+ )
+ return ret.get("get_policy_response", {}).get("get_policy_result", {})
except boto.exception.BotoServerError:
return None
-def create_policy(policy_name, policy_document, path=None, description=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_policy(
+ policy_name,
+ policy_document,
+ path=None,
+ description=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create a policy.
CLI Example:
@@ -1704,30 +1795,29 @@ def create_policy(policy_name, policy_document, path=None, description=None,
.. code-block:: bash
salt myminios boto_iam.create_policy mypolicy '{"Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["s3:Get*", "s3:List*"], "Resource": ["arn:aws:s3:::my-bucket/shared/*"]},]}'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not isinstance(policy_document, six.string_types):
policy_document = salt.utils.json.dumps(policy_document)
params = {}
- for arg in 'path', 'description':
+ for arg in "path", "description":
if locals()[arg] is not None:
params[arg] = locals()[arg]
if policy_exists(policy_name, region, key, keyid, profile):
return True
try:
conn.create_policy(policy_name, policy_document, **params)
- log.info('Created IAM policy %s.', policy_name)
+ log.info("Created IAM policy %s.", policy_name)
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to create IAM policy %s.', policy_name)
+ log.error("Failed to create IAM policy %s.", policy_name)
return False
return True
-def delete_policy(policy_name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_policy(policy_name, region=None, key=None, keyid=None, profile=None):
+ """
Delete a policy.
CLI Example:
@@ -1735,7 +1825,7 @@ def delete_policy(policy_name,
.. code-block:: bash
salt myminion boto_iam.delete_policy mypolicy
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
@@ -1743,17 +1833,17 @@ def delete_policy(policy_name,
return True
try:
conn.delete_policy(policy_arn)
- log.info('Deleted %s policy.', policy_name)
+ log.info("Deleted %s policy.", policy_name)
except boto.exception.BotoServerError as e:
- aws = __utils__['boto.get_error'](e)
+ aws = __utils__["boto.get_error"](e)
log.debug(aws)
- log.error('Failed to delete %s policy: %s.', policy_name, aws.get('message'))
+ log.error("Failed to delete %s policy: %s.", policy_name, aws.get("message"))
return False
return True
def list_policies(region=None, key=None, keyid=None, profile=None):
- '''
+ """
List policies.
CLI Example:
@@ -1761,24 +1851,29 @@ def list_policies(region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_iam.list_policies
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
policies = []
- for ret in __utils__['boto.paged_call'](conn.list_policies):
- policies.append(ret.get('list_policies_response', {}).get('list_policies_result', {}).get('policies'))
+ for ret in __utils__["boto.paged_call"](conn.list_policies):
+ policies.append(
+ ret.get("list_policies_response", {})
+ .get("list_policies_result", {})
+ .get("policies")
+ )
return policies
except boto.exception.BotoServerError as e:
log.debug(e)
- msg = 'Failed to list policy versions.'
+ msg = "Failed to list policy versions."
log.error(msg)
return []
-def policy_version_exists(policy_name, version_id,
- region=None, key=None, keyid=None, profile=None):
- '''
+def policy_version_exists(
+ policy_name, version_id, region=None, key=None, keyid=None, profile=None
+):
+ """
Check to see if policy exists.
CLI Example:
@@ -1786,7 +1881,7 @@ def policy_version_exists(policy_name, version_id,
.. code-block:: bash
salt myminion boto_iam.instance_profile_exists myiprofile
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
@@ -1797,9 +1892,10 @@ def policy_version_exists(policy_name, version_id,
return False
-def get_policy_version(policy_name, version_id,
- region=None, key=None, keyid=None, profile=None):
- '''
+def get_policy_version(
+ policy_name, version_id, region=None, key=None, keyid=None, profile=None
+):
+ """
Check to see if policy exists.
CLI Example:
@@ -1807,22 +1903,37 @@ def get_policy_version(policy_name, version_id,
.. code-block:: bash
salt myminion boto_iam.instance_profile_exists myiprofile
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- ret = conn.get_policy_version(_get_policy_arn(policy_name,
- region=region, key=key, keyid=keyid, profile=profile), version_id)
- retval = ret.get('get_policy_version_response', {}).get('get_policy_version_result', {}).get('policy_version', {})
- retval['document'] = _unquote(retval.get('document'))
- return {'policy_version': retval}
+ ret = conn.get_policy_version(
+ _get_policy_arn(
+ policy_name, region=region, key=key, keyid=keyid, profile=profile
+ ),
+ version_id,
+ )
+ retval = (
+ ret.get("get_policy_version_response", {})
+ .get("get_policy_version_result", {})
+ .get("policy_version", {})
+ )
+ retval["document"] = _unquote(retval.get("document"))
+ return {"policy_version": retval}
except boto.exception.BotoServerError:
return None
-def create_policy_version(policy_name, policy_document, set_as_default=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_policy_version(
+ policy_name,
+ policy_document,
+ set_as_default=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create a policy version.
CLI Example:
@@ -1830,30 +1941,36 @@ def create_policy_version(policy_name, policy_document, set_as_default=None,
.. code-block:: bash
salt myminios boto_iam.create_policy_version mypolicy '{"Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["s3:Get*", "s3:List*"], "Resource": ["arn:aws:s3:::my-bucket/shared/*"]},]}'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not isinstance(policy_document, six.string_types):
policy_document = salt.utils.json.dumps(policy_document)
params = {}
- for arg in ('set_as_default',):
+ for arg in ("set_as_default",):
if locals()[arg] is not None:
params[arg] = locals()[arg]
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
try:
ret = conn.create_policy_version(policy_arn, policy_document, **params)
- vid = ret.get('create_policy_version_response', {}).get('create_policy_version_result', {}).get('policy_version', {}).get('version_id')
- log.info('Created IAM policy %s version %s.', policy_name, vid)
- return {'created': True, 'version_id': vid}
+ vid = (
+ ret.get("create_policy_version_response", {})
+ .get("create_policy_version_result", {})
+ .get("policy_version", {})
+ .get("version_id")
+ )
+ log.info("Created IAM policy %s version %s.", policy_name, vid)
+ return {"created": True, "version_id": vid}
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to create IAM policy %s version %s.', policy_name, vid)
- return {'created': False, 'error': __utils__['boto.get_error'](e)}
+ log.error("Failed to create IAM policy %s version %s.", policy_name, vid)
+ return {"created": False, "error": __utils__["boto.get_error"](e)}
-def delete_policy_version(policy_name, version_id,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_policy_version(
+ policy_name, version_id, region=None, key=None, keyid=None, profile=None
+):
+ """
Delete a policy version.
CLI Example:
@@ -1861,7 +1978,7 @@ def delete_policy_version(policy_name, version_id,
.. code-block:: bash
salt myminion boto_iam.delete_policy_version mypolicy v1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
@@ -1869,19 +1986,22 @@ def delete_policy_version(policy_name, version_id,
return True
try:
conn.delete_policy_version(policy_arn, version_id)
- log.info('Deleted IAM policy %s version %s.', policy_name, version_id)
+ log.info("Deleted IAM policy %s version %s.", policy_name, version_id)
except boto.exception.BotoServerError as e:
- aws = __utils__['boto.get_error'](e)
+ aws = __utils__["boto.get_error"](e)
log.debug(aws)
- log.error('Failed to delete IAM policy %s version %s: %s',
- policy_name, version_id, aws.get('message'))
+ log.error(
+ "Failed to delete IAM policy %s version %s: %s",
+ policy_name,
+ version_id,
+ aws.get("message"),
+ )
return False
return True
-def list_policy_versions(policy_name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def list_policy_versions(policy_name, region=None, key=None, keyid=None, profile=None):
+ """
List versions of a policy.
CLI Example:
@@ -1889,22 +2009,27 @@ def list_policy_versions(policy_name,
.. code-block:: bash
salt myminion boto_iam.list_policy_versions mypolicy
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
try:
ret = conn.list_policy_versions(policy_arn)
- return ret.get('list_policy_versions_response', {}).get('list_policy_versions_result', {}).get('versions')
+ return (
+ ret.get("list_policy_versions_response", {})
+ .get("list_policy_versions_result", {})
+ .get("versions")
+ )
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to list versions for IAM policy %s.', policy_name)
+ log.error("Failed to list versions for IAM policy %s.", policy_name)
return []
-def set_default_policy_version(policy_name, version_id,
- region=None, key=None, keyid=None, profile=None):
- '''
+def set_default_policy_version(
+ policy_name, version_id, region=None, key=None, keyid=None, profile=None
+):
+ """
Set the default version of a policy.
CLI Example:
@@ -1912,25 +2037,30 @@ def set_default_policy_version(policy_name, version_id,
.. code-block:: bash
salt myminion boto_iam.set_default_policy_version mypolicy v1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
try:
conn.set_default_policy_version(policy_arn, version_id)
- log.info('Set %s policy to version %s.', policy_name, version_id)
+ log.info("Set %s policy to version %s.", policy_name, version_id)
except boto.exception.BotoServerError as e:
- aws = __utils__['boto.get_error'](e)
+ aws = __utils__["boto.get_error"](e)
log.debug(aws)
- log.error('Failed to set %s policy to version %s: %s',
- policy_name, version_id, aws.get('message'))
+ log.error(
+ "Failed to set %s policy to version %s: %s",
+ policy_name,
+ version_id,
+ aws.get("message"),
+ )
return False
return True
-def attach_user_policy(policy_name, user_name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def attach_user_policy(
+ policy_name, user_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Attach a managed policy to a user.
CLI Example:
@@ -1938,23 +2068,24 @@ def attach_user_policy(policy_name, user_name,
.. code-block:: bash
salt myminion boto_iam.attach_user_policy mypolicy myuser
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
try:
conn.attach_user_policy(policy_arn, user_name)
- log.info('Attached policy %s to IAM user %s.', policy_name, user_name)
+ log.info("Attached policy %s to IAM user %s.", policy_name, user_name)
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to attach %s policy to IAM user %s.', policy_name, user_name)
+ log.error("Failed to attach %s policy to IAM user %s.", policy_name, user_name)
return False
return True
-def detach_user_policy(policy_name, user_name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def detach_user_policy(
+ policy_name, user_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Detach a managed policy to a user.
CLI Example:
@@ -1962,23 +2093,26 @@ def detach_user_policy(policy_name, user_name,
.. code-block:: bash
salt myminion boto_iam.detach_user_policy mypolicy myuser
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
try:
conn.detach_user_policy(policy_arn, user_name)
- log.info('Detached %s policy from IAM user %s.', policy_name, user_name)
+ log.info("Detached %s policy from IAM user %s.", policy_name, user_name)
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to detach %s policy from IAM user %s.', policy_name, user_name)
+ log.error(
+ "Failed to detach %s policy from IAM user %s.", policy_name, user_name
+ )
return False
return True
-def attach_group_policy(policy_name, group_name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def attach_group_policy(
+ policy_name, group_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Attach a managed policy to a group.
CLI Example:
@@ -1986,23 +2120,26 @@ def attach_group_policy(policy_name, group_name,
.. code-block:: bash
salt myminion boto_iam.attach_group_policy mypolicy mygroup
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
try:
conn.attach_group_policy(policy_arn, group_name)
- log.info('Attached policy %s to IAM group %s.', policy_name, group_name)
+ log.info("Attached policy %s to IAM group %s.", policy_name, group_name)
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to attach policy %s to IAM group %s.', policy_name, group_name)
+ log.error(
+ "Failed to attach policy %s to IAM group %s.", policy_name, group_name
+ )
return False
return True
-def detach_group_policy(policy_name, group_name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def detach_group_policy(
+ policy_name, group_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Detach a managed policy to a group.
CLI Example:
@@ -2010,23 +2147,26 @@ def detach_group_policy(policy_name, group_name,
.. code-block:: bash
salt myminion boto_iam.detach_group_policy mypolicy mygroup
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
try:
conn.detach_group_policy(policy_arn, group_name)
- log.info('Detached policy %s from IAM group %s.', policy_name, group_name)
+ log.info("Detached policy %s from IAM group %s.", policy_name, group_name)
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to detach policy %s from IAM group %s.', policy_name, group_name)
+ log.error(
+ "Failed to detach policy %s from IAM group %s.", policy_name, group_name
+ )
return False
return True
-def attach_role_policy(policy_name, role_name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def attach_role_policy(
+ policy_name, role_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Attach a managed policy to a role.
CLI Example:
@@ -2034,23 +2174,24 @@ def attach_role_policy(policy_name, role_name,
.. code-block:: bash
salt myminion boto_iam.attach_role_policy mypolicy myrole
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
try:
conn.attach_role_policy(policy_arn, role_name)
- log.info('Attached policy %s to IAM role %s.', policy_name, role_name)
+ log.info("Attached policy %s to IAM role %s.", policy_name, role_name)
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to attach policy %s to IAM role %s.', policy_name, role_name)
+ log.error("Failed to attach policy %s to IAM role %s.", policy_name, role_name)
return False
return True
-def detach_role_policy(policy_name, role_name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def detach_role_policy(
+ policy_name, role_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Detach a managed policy to a role.
CLI Example:
@@ -2058,23 +2199,32 @@ def detach_role_policy(policy_name, role_name,
.. code-block:: bash
salt myminion boto_iam.detach_role_policy mypolicy myrole
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
try:
conn.detach_role_policy(policy_arn, role_name)
- log.info('Detached policy %s from IAM role %s.', policy_name, role_name)
+ log.info("Detached policy %s from IAM role %s.", policy_name, role_name)
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to detach policy %s from IAM role %s.', policy_name, role_name)
+ log.error(
+ "Failed to detach policy %s from IAM role %s.", policy_name, role_name
+ )
return False
return True
-def list_entities_for_policy(policy_name, path_prefix=None, entity_filter=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def list_entities_for_policy(
+ policy_name,
+ path_prefix=None,
+ entity_filter=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
List entities that a policy is attached to.
CLI Example:
@@ -2082,12 +2232,12 @@ def list_entities_for_policy(policy_name, path_prefix=None, entity_filter=None,
.. code-block:: bash
salt myminion boto_iam.list_entities_for_policy mypolicy
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
params = {}
- for arg in ('path_prefix', 'entity_filter'):
+ for arg in ("path_prefix", "entity_filter"):
if locals()[arg] is not None:
params[arg] = locals()[arg]
@@ -2095,28 +2245,43 @@ def list_entities_for_policy(policy_name, path_prefix=None, entity_filter=None,
while retries:
try:
allret = {
- 'policy_groups': [],
- 'policy_users': [],
- 'policy_roles': [],
+ "policy_groups": [],
+ "policy_users": [],
+ "policy_roles": [],
}
- for ret in __utils__['boto.paged_call'](conn.list_entities_for_policy, policy_arn=policy_arn, **params):
+ for ret in __utils__["boto.paged_call"](
+ conn.list_entities_for_policy, policy_arn=policy_arn, **params
+ ):
for k, v in six.iteritems(allret):
- v.extend(ret.get('list_entities_for_policy_response', {}).get('list_entities_for_policy_result', {}).get(k))
+ v.extend(
+ ret.get("list_entities_for_policy_response", {})
+ .get("list_entities_for_policy_result", {})
+ .get(k)
+ )
return allret
except boto.exception.BotoServerError as e:
- if e.error_code == 'Throttling':
+ if e.error_code == "Throttling":
log.debug("Throttled by AWS API, will retry in 5 seconds...")
time.sleep(5)
retries -= 1
continue
- log.error('Failed to list entities for IAM policy %s: %s', policy_name, e.message)
+ log.error(
+ "Failed to list entities for IAM policy %s: %s", policy_name, e.message
+ )
return {}
return {}
-def list_attached_user_policies(user_name, path_prefix=None, entity_filter=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def list_attached_user_policies(
+ user_name,
+ path_prefix=None,
+ entity_filter=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
List entities attached to the given user.
CLI Example:
@@ -2124,30 +2289,45 @@ def list_attached_user_policies(user_name, path_prefix=None, entity_filter=None,
.. code-block:: bash
salt myminion boto_iam.list_entities_for_policy mypolicy
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- params = {'UserName': user_name}
+ params = {"UserName": user_name}
if path_prefix is not None:
- params['PathPrefix'] = path_prefix
+ params["PathPrefix"] = path_prefix
policies = []
try:
# Using conn.get_response is a bit of a hack, but it avoids having to
# rewrite this whole module based on boto3
- for ret in __utils__['boto.paged_call'](conn.get_response, 'ListAttachedUserPolicies', params, list_marker='AttachedPolicies'):
- policies.extend(ret.get('list_attached_user_policies_response', {}).get('list_attached_user_policies_result', {}
- ).get('attached_policies', []))
+ for ret in __utils__["boto.paged_call"](
+ conn.get_response,
+ "ListAttachedUserPolicies",
+ params,
+ list_marker="AttachedPolicies",
+ ):
+ policies.extend(
+ ret.get("list_attached_user_policies_response", {})
+ .get("list_attached_user_policies_result", {})
+ .get("attached_policies", [])
+ )
return policies
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to list attached policies for IAM user %s.', user_name)
+ log.error("Failed to list attached policies for IAM user %s.", user_name)
return []
-def list_attached_group_policies(group_name, path_prefix=None, entity_filter=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def list_attached_group_policies(
+ group_name,
+ path_prefix=None,
+ entity_filter=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
List entities attached to the given group.
CLI Example:
@@ -2155,30 +2335,45 @@ def list_attached_group_policies(group_name, path_prefix=None, entity_filter=Non
.. code-block:: bash
salt myminion boto_iam.list_entities_for_policy mypolicy
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- params = {'GroupName': group_name}
+ params = {"GroupName": group_name}
if path_prefix is not None:
- params['PathPrefix'] = path_prefix
+ params["PathPrefix"] = path_prefix
policies = []
try:
# Using conn.get_response is a bit of a hack, but it avoids having to
# rewrite this whole module based on boto3
- for ret in __utils__['boto.paged_call'](conn.get_response, 'ListAttachedGroupPolicies', params, list_marker='AttachedPolicies'):
- policies.extend(ret.get('list_attached_group_policies_response', {}).get('list_attached_group_policies_result', {}
- ).get('attached_policies', []))
+ for ret in __utils__["boto.paged_call"](
+ conn.get_response,
+ "ListAttachedGroupPolicies",
+ params,
+ list_marker="AttachedPolicies",
+ ):
+ policies.extend(
+ ret.get("list_attached_group_policies_response", {})
+ .get("list_attached_group_policies_result", {})
+ .get("attached_policies", [])
+ )
return policies
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to list attached policies for IAM group %s.', group_name)
+ log.error("Failed to list attached policies for IAM group %s.", group_name)
return []
-def list_attached_role_policies(role_name, path_prefix=None, entity_filter=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def list_attached_role_policies(
+ role_name,
+ path_prefix=None,
+ entity_filter=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
List entities attached to the given role.
CLI Example:
@@ -2186,29 +2381,39 @@ def list_attached_role_policies(role_name, path_prefix=None, entity_filter=None,
.. code-block:: bash
salt myminion boto_iam.list_entities_for_policy mypolicy
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- params = {'RoleName': role_name}
+ params = {"RoleName": role_name}
if path_prefix is not None:
- params['PathPrefix'] = path_prefix
+ params["PathPrefix"] = path_prefix
policies = []
try:
# Using conn.get_response is a bit of a hack, but it avoids having to
# rewrite this whole module based on boto3
- for ret in __utils__['boto.paged_call'](conn.get_response, 'ListAttachedRolePolicies', params, list_marker='AttachedPolicies'):
- policies.extend(ret.get('list_attached_role_policies_response', {}).get('list_attached_role_policies_result', {}
- ).get('attached_policies', []))
+ for ret in __utils__["boto.paged_call"](
+ conn.get_response,
+ "ListAttachedRolePolicies",
+ params,
+ list_marker="AttachedPolicies",
+ ):
+ policies.extend(
+ ret.get("list_attached_role_policies_response", {})
+ .get("list_attached_role_policies_result", {})
+ .get("attached_policies", [])
+ )
return policies
except boto.exception.BotoServerError as e:
log.debug(e)
- log.error('Failed to list attached policies for IAM role %s.', role_name)
+ log.error("Failed to list attached policies for IAM role %s.", role_name)
return []
-def create_saml_provider(name, saml_metadata_document, region=None, key=None, keyid=None, profile=None):
- '''
+def create_saml_provider(
+ name, saml_metadata_document, region=None, key=None, keyid=None, profile=None
+):
+ """
Create SAML provider
CLI Example:
@@ -2216,21 +2421,21 @@ def create_saml_provider(name, saml_metadata_document, region=None, key=None, ke
.. code-block:: bash
salt myminion boto_iam.create_saml_provider my_saml_provider_name saml_metadata_document
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.create_saml_provider(saml_metadata_document, name)
- log.info('Successfully created %s SAML provider.', name)
+ log.info("Successfully created %s SAML provider.", name)
return True
except boto.exception.BotoServerError as e:
- aws = __utils__['boto.get_error'](e)
+ aws = __utils__["boto.get_error"](e)
log.debug(aws)
- log.error('Failed to create SAML provider %s.', name)
+ log.error("Failed to create SAML provider %s.", name)
return False
def get_saml_provider_arn(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get SAML provider
CLI Example:
@@ -2238,23 +2443,27 @@ def get_saml_provider_arn(name, region=None, key=None, keyid=None, profile=None)
.. code-block:: bash
salt myminion boto_iam.get_saml_provider_arn my_saml_provider_name
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
response = conn.list_saml_providers()
- for saml_provider in response.list_saml_providers_response.list_saml_providers_result.saml_provider_list:
- if saml_provider['arn'].endswith(':saml-provider/' + name):
- return saml_provider['arn']
+ for (
+ saml_provider
+ ) in (
+ response.list_saml_providers_response.list_saml_providers_result.saml_provider_list
+ ):
+ if saml_provider["arn"].endswith(":saml-provider/" + name):
+ return saml_provider["arn"]
return False
except boto.exception.BotoServerError as e:
- aws = __utils__['boto.get_error'](e)
+ aws = __utils__["boto.get_error"](e)
log.debug(aws)
- log.error('Failed to get ARN of SAML provider %s.', name)
+ log.error("Failed to get ARN of SAML provider %s.", name)
return False
def delete_saml_provider(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete SAML provider
CLI Example:
@@ -2262,25 +2471,27 @@ def delete_saml_provider(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_iam.delete_saml_provider my_saml_provider_name
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- saml_provider_arn = get_saml_provider_arn(name, region=region, key=key, keyid=keyid, profile=profile)
+ saml_provider_arn = get_saml_provider_arn(
+ name, region=region, key=key, keyid=keyid, profile=profile
+ )
if not saml_provider_arn:
- log.info('SAML provider %s not found.', name)
+ log.info("SAML provider %s not found.", name)
return True
conn.delete_saml_provider(saml_provider_arn)
- log.info('Successfully deleted SAML provider %s.', name)
+ log.info("Successfully deleted SAML provider %s.", name)
return True
except boto.exception.BotoServerError as e:
- aws = __utils__['boto.get_error'](e)
+ aws = __utils__["boto.get_error"](e)
log.debug(aws)
- log.error('Failed to delete SAML provider %s.', name)
+ log.error("Failed to delete SAML provider %s.", name)
return False
def list_saml_providers(region=None, key=None, keyid=None, profile=None):
- '''
+ """
List SAML providers.
CLI Example:
@@ -2288,22 +2499,24 @@ def list_saml_providers(region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_iam.list_saml_providers
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
providers = []
info = conn.list_saml_providers()
- for arn in info['list_saml_providers_response']['list_saml_providers_result']['saml_provider_list']:
- providers.append(arn['arn'].rsplit('/', 1)[1])
+ for arn in info["list_saml_providers_response"]["list_saml_providers_result"][
+ "saml_provider_list"
+ ]:
+ providers.append(arn["arn"].rsplit("/", 1)[1])
return providers
except boto.exception.BotoServerError as e:
- log.debug(__utils__['boto.get_error'](e))
- log.error('Failed to get list of SAML providers.')
+ log.debug(__utils__["boto.get_error"](e))
+ log.error("Failed to get list of SAML providers.")
return False
def get_saml_provider(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get SAML provider document.
CLI Example:
@@ -2311,19 +2524,23 @@ def get_saml_provider(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_iam.get_saml_provider arn
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
provider = conn.get_saml_provider(name)
- return provider['get_saml_provider_response']['get_saml_provider_result']['saml_metadata_document']
+ return provider["get_saml_provider_response"]["get_saml_provider_result"][
+ "saml_metadata_document"
+ ]
except boto.exception.BotoServerError as e:
- log.debug(__utils__['boto.get_error'](e))
- log.error('Failed to get SAML provider document %s.', name)
+ log.debug(__utils__["boto.get_error"](e))
+ log.error("Failed to get SAML provider document %s.", name)
return False
-def update_saml_provider(name, saml_metadata_document, region=None, key=None, keyid=None, profile=None):
- '''
+def update_saml_provider(
+ name, saml_metadata_document, region=None, key=None, keyid=None, profile=None
+):
+ """
Update SAML provider.
CLI Example:
@@ -2331,17 +2548,19 @@ def update_saml_provider(name, saml_metadata_document, region=None, key=None, ke
.. code-block:: bash
salt myminion boto_iam.update_saml_provider my_saml_provider_name saml_metadata_document
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- saml_provider_arn = get_saml_provider_arn(name, region=region, key=key, keyid=keyid, profile=profile)
+ saml_provider_arn = get_saml_provider_arn(
+ name, region=region, key=key, keyid=keyid, profile=profile
+ )
if not saml_provider_arn:
- log.info('SAML provider %s not found.', name)
+ log.info("SAML provider %s not found.", name)
return False
if conn.update_saml_provider(name, saml_metadata_document):
return True
return False
except boto.exception.BotoServerError as e:
- log.debug(__utils__['boto.get_error'](e))
- log.error('Failed to update SAML provider %s.', name)
+ log.debug(__utils__["boto.get_error"](e))
+ log.error("Failed to update SAML provider %s.", name)
return False
diff --git a/salt/modules/boto_iot.py b/salt/modules/boto_iot.py
index 977eb434714..3cc8a979afa 100644
--- a/salt/modules/boto_iot.py
+++ b/salt/modules/boto_iot.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon IoT
.. versionadded:: 2016.3.0
@@ -45,33 +45,38 @@ The dependencies listed above can be installed via package or pip.
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
-import logging
+
import datetime
+import logging
# Import Salt libs
import salt.utils.compat
import salt.utils.json
import salt.utils.versions
-log = logging.getLogger(__name__)
-
# Import third party libs
from salt.ext.six import string_types
+
+log = logging.getLogger(__name__)
+
+
# pylint: disable=import-error
try:
- #pylint: disable=unused-import
+ # pylint: disable=unused-import
import boto
import boto3
- #pylint: enable=unused-import
+
+ # pylint: enable=unused-import
from botocore.exceptions import ClientError
from botocore import __version__ as found_botocore_version
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@@ -79,28 +84,24 @@ except ImportError:
def __virtual__():
- '''
+ """
Only load if boto libraries exist and if boto libraries are greater than
a given version.
- '''
+ """
# the boto_lambda execution module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
- return salt.utils.versions.check_boto_reqs(
- boto3_ver='1.2.1',
- botocore_ver='1.4.41'
- )
+ return salt.utils.versions.check_boto_reqs(boto3_ver="1.2.1", botocore_ver="1.4.41")
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
- __utils__['boto3.assign_funcs'](__name__, 'iot')
+ __utils__["boto3.assign_funcs"](__name__, "iot")
-def thing_type_exists(thingTypeName,
- region=None, key=None, keyid=None, profile=None):
- '''
+def thing_type_exists(thingTypeName, region=None, key=None, keyid=None, profile=None):
+ """
Given a thing type name, check to see if the given thing type exists
Returns True if the given thing type exists and returns False if the
@@ -114,25 +115,24 @@ def thing_type_exists(thingTypeName,
salt myminion boto_iot.thing_type_exists mythingtype
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
res = conn.describe_thing_type(thingTypeName=thingTypeName)
- if res.get('thingTypeName'):
- return {'exists': True}
+ if res.get("thingTypeName"):
+ return {"exists": True}
else:
- return {'exists': False}
+ return {"exists": False}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
- return {'exists': False}
- return {'error': err}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "ResourceNotFoundException":
+ return {"exists": False}
+ return {"error": err}
-def describe_thing_type(thingTypeName,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe_thing_type(thingTypeName, region=None, key=None, keyid=None, profile=None):
+ """
Given a thing type name describe its properties.
Returns a dictionary of interesting properties.
@@ -145,32 +145,38 @@ def describe_thing_type(thingTypeName,
salt myminion boto_iot.describe_thing_type mythingtype
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
res = conn.describe_thing_type(thingTypeName=thingTypeName)
if res:
- res.pop('ResponseMetadata', None)
- thingTypeMetadata = res.get('thingTypeMetadata')
+ res.pop("ResponseMetadata", None)
+ thingTypeMetadata = res.get("thingTypeMetadata")
if thingTypeMetadata:
- for dtype in ('creationDate', 'deprecationDate'):
+ for dtype in ("creationDate", "deprecationDate"):
dval = thingTypeMetadata.get(dtype)
if dval and isinstance(dval, datetime.date):
- thingTypeMetadata[dtype] = '{0}'.format(dval)
- return {'thing_type': res}
+ thingTypeMetadata[dtype] = "{0}".format(dval)
+ return {"thing_type": res}
else:
- return {'thing_type': None}
+ return {"thing_type": None}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
- return {'thing_type': None}
- return {'error': err}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "ResourceNotFoundException":
+ return {"thing_type": None}
+ return {"error": err}
-def create_thing_type(thingTypeName, thingTypeDescription,
- searchableAttributesList, region=None, key=None,
- keyid=None, profile=None):
- '''
+def create_thing_type(
+ thingTypeName,
+ thingTypeDescription,
+ searchableAttributesList,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid config, create a thing type.
Returns {created: true} if the thing type was created and returns
@@ -185,33 +191,35 @@ def create_thing_type(thingTypeName, thingTypeDescription,
salt myminion boto_iot.create_thing_type mythingtype \\
thingtype_description_string '["searchable_attr_1", "searchable_attr_2"]'
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
thingTypeProperties = dict(
thingTypeDescription=thingTypeDescription,
- searchableAttributes=searchableAttributesList
+ searchableAttributes=searchableAttributesList,
)
thingtype = conn.create_thing_type(
- thingTypeName=thingTypeName,
- thingTypeProperties=thingTypeProperties
+ thingTypeName=thingTypeName, thingTypeProperties=thingTypeProperties
)
if thingtype:
- log.info('The newly created thing type ARN is %s', thingtype['thingTypeArn'])
+ log.info(
+ "The newly created thing type ARN is %s", thingtype["thingTypeArn"]
+ )
- return {'created': True, 'thingTypeArn': thingtype['thingTypeArn']}
+ return {"created": True, "thingTypeArn": thingtype["thingTypeArn"]}
else:
- log.warning('thing type was not created')
- return {'created': False}
+ log.warning("thing type was not created")
+ return {"created": False}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def deprecate_thing_type(thingTypeName, undoDeprecate=False,
- region=None, key=None, keyid=None, profile=None):
- '''
+def deprecate_thing_type(
+ thingTypeName, undoDeprecate=False, region=None, key=None, keyid=None, profile=None
+):
+ """
Given a thing type name, deprecate it when undoDeprecate is False
and undeprecate it when undoDeprecate is True.
@@ -226,23 +234,21 @@ def deprecate_thing_type(thingTypeName, undoDeprecate=False,
salt myminion boto_iot.deprecate_thing_type mythingtype
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.deprecate_thing_type(
- thingTypeName=thingTypeName,
- undoDeprecate=undoDeprecate
+ thingTypeName=thingTypeName, undoDeprecate=undoDeprecate
)
deprecated = True if undoDeprecate is False else False
- return {'deprecated': deprecated}
+ return {"deprecated": deprecated}
except ClientError as e:
- return {'deprecated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deprecated": False, "error": __utils__["boto3.get_error"](e)}
-def delete_thing_type(thingTypeName,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_thing_type(thingTypeName, region=None, key=None, keyid=None, profile=None):
+ """
Given a thing type name, delete it.
Returns {deleted: true} if the thing type was deleted and returns
@@ -256,22 +262,21 @@ def delete_thing_type(thingTypeName,
salt myminion boto_iot.delete_thing_type mythingtype
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_thing_type(thingTypeName=thingTypeName)
- return {'deleted': True}
+ return {"deleted": True}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
- return {'deleted': True}
- return {'deleted': False, 'error': err}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "ResourceNotFoundException":
+ return {"deleted": True}
+ return {"deleted": False, "error": err}
-def policy_exists(policyName,
- region=None, key=None, keyid=None, profile=None):
- '''
+def policy_exists(policyName, region=None, key=None, keyid=None, profile=None):
+ """
Given a policy name, check to see if the given policy exists.
Returns True if the given policy exists and returns False if the given
@@ -283,22 +288,23 @@ def policy_exists(policyName,
salt myminion boto_iot.policy_exists mypolicy
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.get_policy(policyName=policyName)
- return {'exists': True}
+ return {"exists": True}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
- return {'exists': False}
- return {'error': err}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "ResourceNotFoundException":
+ return {"exists": False}
+ return {"error": err}
-def create_policy(policyName, policyDocument,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_policy(
+ policyName, policyDocument, region=None, key=None, keyid=None, profile=None
+):
+ """
Given a valid config, create a policy.
Returns {created: true} if the policy was created and returns
@@ -314,28 +320,30 @@ def create_policy(policyName, policyDocument,
"Action":["iot:Publish"],\\
"Resource":["arn:::::topic/foo/bar"]}]}'
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not isinstance(policyDocument, string_types):
policyDocument = salt.utils.json.dumps(policyDocument)
- policy = conn.create_policy(policyName=policyName,
- policyDocument=policyDocument)
+ policy = conn.create_policy(
+ policyName=policyName, policyDocument=policyDocument
+ )
if policy:
- log.info('The newly created policy version is %s', policy['policyVersionId'])
+ log.info(
+ "The newly created policy version is %s", policy["policyVersionId"]
+ )
- return {'created': True, 'versionId': policy['policyVersionId']}
+ return {"created": True, "versionId": policy["policyVersionId"]}
else:
- log.warning('Policy was not created')
- return {'created': False}
+ log.warning("Policy was not created")
+ return {"created": False}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def delete_policy(policyName,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_policy(policyName, region=None, key=None, keyid=None, profile=None):
+ """
Given a policy name, delete it.
Returns {deleted: true} if the policy was deleted and returns
@@ -347,19 +355,18 @@ def delete_policy(policyName,
salt myminion boto_iot.delete_policy mypolicy
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_policy(policyName=policyName)
- return {'deleted': True}
+ return {"deleted": True}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def describe_policy(policyName,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe_policy(policyName, region=None, key=None, keyid=None, profile=None):
+ """
Given a policy name describe its properties.
Returns a dictionary of interesting properties.
@@ -370,27 +377,27 @@ def describe_policy(policyName,
salt myminion boto_iot.describe_policy mypolicy
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy = conn.get_policy(policyName=policyName)
if policy:
- keys = ('policyName', 'policyArn', 'policyDocument',
- 'defaultVersionId')
- return {'policy': dict([(k, policy.get(k)) for k in keys])}
+ keys = ("policyName", "policyArn", "policyDocument", "defaultVersionId")
+ return {"policy": dict([(k, policy.get(k)) for k in keys])}
else:
- return {'policy': None}
+ return {"policy": None}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
- return {'policy': None}
- return {'error': __utils__['boto3.get_error'](e)}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "ResourceNotFoundException":
+ return {"policy": None}
+ return {"error": __utils__["boto3.get_error"](e)}
-def policy_version_exists(policyName, policyVersionId,
- region=None, key=None, keyid=None, profile=None):
- '''
+def policy_version_exists(
+ policyName, policyVersionId, region=None, key=None, keyid=None, profile=None
+):
+ """
Given a policy name and version ID, check to see if the given policy version exists.
Returns True if the given policy version exists and returns False if the given
@@ -402,23 +409,31 @@ def policy_version_exists(policyName, policyVersionId,
salt myminion boto_iot.policy_version_exists mypolicy versionid
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- policy = conn.get_policy_version(policyName=policyName,
- policyversionId=policyVersionId)
- return {'exists': bool(policy)}
+ policy = conn.get_policy_version(
+ policyName=policyName, policyversionId=policyVersionId
+ )
+ return {"exists": bool(policy)}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
- return {'exists': False}
- return {'error': __utils__['boto3.get_error'](e)}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "ResourceNotFoundException":
+ return {"exists": False}
+ return {"error": __utils__["boto3.get_error"](e)}
-def create_policy_version(policyName, policyDocument, setAsDefault=False,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_policy_version(
+ policyName,
+ policyDocument,
+ setAsDefault=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid config, create a new version of a policy.
Returns {created: true} if the policy version was created and returns
@@ -431,29 +446,34 @@ def create_policy_version(policyName, policyDocument, setAsDefault=False,
salt myminion boto_iot.create_policy_version my_policy \\
'{"Statement":[{"Effect":"Allow","Action":["iot:Publish"],"Resource":["arn:::::topic/foo/bar"]}]}'
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not isinstance(policyDocument, string_types):
policyDocument = salt.utils.json.dumps(policyDocument)
- policy = conn.create_policy_version(policyName=policyName,
- policyDocument=policyDocument,
- setAsDefault=setAsDefault)
+ policy = conn.create_policy_version(
+ policyName=policyName,
+ policyDocument=policyDocument,
+ setAsDefault=setAsDefault,
+ )
if policy:
- log.info('The newly created policy version is %s', policy['policyVersionId'])
+ log.info(
+ "The newly created policy version is %s", policy["policyVersionId"]
+ )
- return {'created': True, 'name': policy['policyVersionId']}
+ return {"created": True, "name": policy["policyVersionId"]}
else:
- log.warning('Policy version was not created')
- return {'created': False}
+ log.warning("Policy version was not created")
+ return {"created": False}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def delete_policy_version(policyName, policyVersionId,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_policy_version(
+ policyName, policyVersionId, region=None, key=None, keyid=None, profile=None
+):
+ """
Given a policy name and version, delete it.
Returns {deleted: true} if the policy version was deleted and returns
@@ -465,20 +485,22 @@ def delete_policy_version(policyName, policyVersionId,
salt myminion boto_iot.delete_policy_version mypolicy version
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- conn.delete_policy_version(policyName=policyName,
- policyVersionId=policyVersionId)
- return {'deleted': True}
+ conn.delete_policy_version(
+ policyName=policyName, policyVersionId=policyVersionId
+ )
+ return {"deleted": True}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def describe_policy_version(policyName, policyVersionId,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe_policy_version(
+ policyName, policyVersionId, region=None, key=None, keyid=None, profile=None
+):
+ """
Given a policy name and version describe its properties.
Returns a dictionary of interesting properties.
@@ -489,27 +511,33 @@ def describe_policy_version(policyName, policyVersionId,
salt myminion boto_iot.describe_policy_version mypolicy version
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- policy = conn.get_policy_version(policyName=policyName,
- policyVersionId=policyVersionId)
+ policy = conn.get_policy_version(
+ policyName=policyName, policyVersionId=policyVersionId
+ )
if policy:
- keys = ('policyName', 'policyArn', 'policyDocument',
- 'policyVersionId', 'isDefaultVersion')
- return {'policy': dict([(k, policy.get(k)) for k in keys])}
+ keys = (
+ "policyName",
+ "policyArn",
+ "policyDocument",
+ "policyVersionId",
+ "isDefaultVersion",
+ )
+ return {"policy": dict([(k, policy.get(k)) for k in keys])}
else:
- return {'policy': None}
+ return {"policy": None}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
- return {'policy': None}
- return {'error': __utils__['boto3.get_error'](e)}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "ResourceNotFoundException":
+ return {"policy": None}
+ return {"error": __utils__["boto3.get_error"](e)}
def list_policies(region=None, key=None, keyid=None, profile=None):
- '''
+ """
List all policies
Returns list of policies
@@ -528,24 +556,23 @@ def list_policies(region=None, key=None, keyid=None, profile=None):
- {...}
- {...}
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policies = []
- for ret in __utils__['boto3.paged_call'](conn.list_policies,
- marker_flag='nextMarker',
- marker_arg='marker'):
- policies.extend(ret['policies'])
+ for ret in __utils__["boto3.paged_call"](
+ conn.list_policies, marker_flag="nextMarker", marker_arg="marker"
+ ):
+ policies.extend(ret["policies"])
if not bool(policies):
- log.warning('No policies found')
- return {'policies': policies}
+ log.warning("No policies found")
+ return {"policies": policies}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def list_policy_versions(policyName,
- region=None, key=None, keyid=None, profile=None):
- '''
+def list_policy_versions(policyName, region=None, key=None, keyid=None, profile=None):
+ """
List the versions available for the given policy.
CLI Example:
@@ -562,25 +589,28 @@ def list_policy_versions(policyName,
- {...}
- {...}
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
vers = []
- for ret in __utils__['boto3.paged_call'](conn.list_policy_versions,
- marker_flag='nextMarker',
- marker_arg='marker',
- policyName=policyName):
- vers.extend(ret['policyVersions'])
+ for ret in __utils__["boto3.paged_call"](
+ conn.list_policy_versions,
+ marker_flag="nextMarker",
+ marker_arg="marker",
+ policyName=policyName,
+ ):
+ vers.extend(ret["policyVersions"])
if not bool(vers):
- log.warning('No versions found')
- return {'policyVersions': vers}
+ log.warning("No versions found")
+ return {"policyVersions": vers}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def set_default_policy_version(policyName, policyVersionId,
- region=None, key=None, keyid=None, profile=None):
- '''
+def set_default_policy_version(
+ policyName, policyVersionId, region=None, key=None, keyid=None, profile=None
+):
+ """
Sets the specified version of the specified policy as the policy's default
(operative) version. This action affects all certificates that the policy is
attached to.
@@ -595,19 +625,19 @@ def set_default_policy_version(policyName, policyVersionId,
salt myminion boto_iot.set_default_policy_version mypolicy versionid
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- conn.set_default_policy_version(policyName=policyName,
- policyVersionId=str(policyVersionId)) # future lint: disable=blacklisted-function
- return {'changed': True}
+ conn.set_default_policy_version(
+ policyName=policyName, policyVersionId=str(policyVersionId)
+ ) # future lint: disable=blacklisted-function
+ return {"changed": True}
except ClientError as e:
- return {'changed': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"changed": False, "error": __utils__["boto3.get_error"](e)}
-def list_principal_policies(principal,
- region=None, key=None, keyid=None, profile=None):
- '''
+def list_principal_policies(principal, region=None, key=None, keyid=None, profile=None):
+ """
List the policies attached to the given principal.
CLI Example:
@@ -624,25 +654,28 @@ def list_principal_policies(principal,
- {...}
- {...}
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
vers = []
- for ret in __utils__['boto3.paged_call'](conn.list_principal_policies,
- principal=principal,
- marker_flag='nextMarker',
- marker_arg='marker'):
- vers.extend(ret['policies'])
+ for ret in __utils__["boto3.paged_call"](
+ conn.list_principal_policies,
+ principal=principal,
+ marker_flag="nextMarker",
+ marker_arg="marker",
+ ):
+ vers.extend(ret["policies"])
if not bool(vers):
- log.warning('No policies found')
- return {'policies': vers}
+ log.warning("No policies found")
+ return {"policies": vers}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def attach_principal_policy(policyName, principal,
- region=None, key=None, keyid=None, profile=None):
- '''
+def attach_principal_policy(
+ policyName, principal, region=None, key=None, keyid=None, profile=None
+):
+ """
Attach the specified policy to the specified principal (certificate or other
credential.)
@@ -656,19 +689,19 @@ def attach_principal_policy(policyName, principal,
salt myminion boto_iot.attach_principal_policy mypolicy mycognitoID
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- conn.attach_principal_policy(policyName=policyName,
- principal=principal)
- return {'attached': True}
+ conn.attach_principal_policy(policyName=policyName, principal=principal)
+ return {"attached": True}
except ClientError as e:
- return {'attached': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"attached": False, "error": __utils__["boto3.get_error"](e)}
-def detach_principal_policy(policyName, principal,
- region=None, key=None, keyid=None, profile=None):
- '''
+def detach_principal_policy(
+ policyName, principal, region=None, key=None, keyid=None, profile=None
+):
+ """
Detach the specified policy from the specified principal (certificate or other
credential.)
@@ -681,19 +714,17 @@ def detach_principal_policy(policyName, principal,
salt myminion boto_iot.detach_principal_policy mypolicy mycognitoID
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- conn.detach_principal_policy(policyName=policyName,
- principal=principal)
- return {'detached': True}
+ conn.detach_principal_policy(policyName=policyName, principal=principal)
+ return {"detached": True}
except ClientError as e:
- return {'detached': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"detached": False, "error": __utils__["boto3.get_error"](e)}
-def topic_rule_exists(ruleName,
- region=None, key=None, keyid=None, profile=None):
- '''
+def topic_rule_exists(ruleName, region=None, key=None, keyid=None, profile=None):
+ """
Given a rule name, check to see if the given rule exists.
Returns True if the given rule exists and returns False if the given
@@ -705,28 +736,36 @@ def topic_rule_exists(ruleName,
salt myminion boto_iot.topic_rule_exists myrule
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
rule = conn.get_topic_rule(ruleName=ruleName)
- return {'exists': True}
+ return {"exists": True}
except ClientError as e:
# Nonexistent rules show up as unauthorized exceptions. It's unclear how
# to distinguish this from a real authorization exception. In practical
# use, it's more useful to assume lack of existence than to assume a
# genuine authorization problem; authorization problems should not be
# the common case.
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'UnauthorizedException':
- return {'exists': False}
- return {'error': __utils__['boto3.get_error'](e)}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "UnauthorizedException":
+ return {"exists": False}
+ return {"error": __utils__["boto3.get_error"](e)}
-def create_topic_rule(ruleName, sql, actions, description,
- ruleDisabled=False,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_topic_rule(
+ ruleName,
+ sql,
+ actions,
+ description,
+ ruleDisabled=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid config, create a topic rule.
Returns {created: true} if the rule was created and returns
@@ -740,26 +779,36 @@ def create_topic_rule(ruleName, sql, actions, description,
'[{"lambda":{"functionArn":"arn:::::something"}},{"sns":{\\
"targetArn":"arn:::::something","roleArn":"arn:::::something"}}]'
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- conn.create_topic_rule(ruleName=ruleName,
- topicRulePayload={
- 'sql': sql,
- 'description': description,
- 'actions': actions,
- 'ruleDisabled': ruleDisabled
- })
- return {'created': True}
+ conn.create_topic_rule(
+ ruleName=ruleName,
+ topicRulePayload={
+ "sql": sql,
+ "description": description,
+ "actions": actions,
+ "ruleDisabled": ruleDisabled,
+ },
+ )
+ return {"created": True}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def replace_topic_rule(ruleName, sql, actions, description,
- ruleDisabled=False,
- region=None, key=None, keyid=None, profile=None):
- '''
+def replace_topic_rule(
+ ruleName,
+ sql,
+ actions,
+ description,
+ ruleDisabled=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid config, replace a topic rule with the new values.
Returns {created: true} if the rule was created and returns
@@ -773,25 +822,26 @@ def replace_topic_rule(ruleName, sql, actions, description,
'[{"lambda":{"functionArn":"arn:::::something"}},{"sns":{\\
"targetArn":"arn:::::something","roleArn":"arn:::::something"}}]'
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- conn.replace_topic_rule(ruleName=ruleName,
- topicRulePayload={
- 'sql': sql,
- 'description': description,
- 'actions': actions,
- 'ruleDisabled': ruleDisabled
- })
- return {'replaced': True}
+ conn.replace_topic_rule(
+ ruleName=ruleName,
+ topicRulePayload={
+ "sql": sql,
+ "description": description,
+ "actions": actions,
+ "ruleDisabled": ruleDisabled,
+ },
+ )
+ return {"replaced": True}
except ClientError as e:
- return {'replaced': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"replaced": False, "error": __utils__["boto3.get_error"](e)}
-def delete_topic_rule(ruleName,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_topic_rule(ruleName, region=None, key=None, keyid=None, profile=None):
+ """
Given a rule name, delete it.
Returns {deleted: true} if the rule was deleted and returns
@@ -803,19 +853,18 @@ def delete_topic_rule(ruleName,
salt myminion boto_iot.delete_rule myrule
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_topic_rule(ruleName=ruleName)
- return {'deleted': True}
+ return {"deleted": True}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def describe_topic_rule(ruleName,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe_topic_rule(ruleName, region=None, key=None, keyid=None, profile=None):
+ """
Given a topic rule name describe its properties.
Returns a dictionary of interesting properties.
@@ -826,25 +875,25 @@ def describe_topic_rule(ruleName,
salt myminion boto_iot.describe_topic_rule myrule
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
rule = conn.get_topic_rule(ruleName=ruleName)
- if rule and 'rule' in rule:
- rule = rule['rule']
- keys = ('ruleName', 'sql', 'description',
- 'actions', 'ruleDisabled')
- return {'rule': dict([(k, rule.get(k)) for k in keys])}
+ if rule and "rule" in rule:
+ rule = rule["rule"]
+ keys = ("ruleName", "sql", "description", "actions", "ruleDisabled")
+ return {"rule": dict([(k, rule.get(k)) for k in keys])}
else:
- return {'rule': None}
+ return {"rule": None}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def list_topic_rules(topic=None, ruleDisabled=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def list_topic_rules(
+ topic=None, ruleDisabled=None, region=None, key=None, keyid=None, profile=None
+):
+ """
List all rules (for a given topic, if specified)
Returns list of rules
@@ -863,22 +912,24 @@ def list_topic_rules(topic=None, ruleDisabled=None,
- {...}
- {...}
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
if topic is not None:
- kwargs['topic'] = topic
+ kwargs["topic"] = topic
if ruleDisabled is not None:
- kwargs['ruleDisabled'] = ruleDisabled
+ kwargs["ruleDisabled"] = ruleDisabled
rules = []
- for ret in __utils__['boto3.paged_call'](conn.list_topic_rules,
- marker_flag='nextToken',
- marker_arg='nextToken',
- **kwargs):
- rules.extend(ret['rules'])
+ for ret in __utils__["boto3.paged_call"](
+ conn.list_topic_rules,
+ marker_flag="nextToken",
+ marker_arg="nextToken",
+ **kwargs
+ ):
+ rules.extend(ret["rules"])
if not bool(rules):
- log.warning('No rules found')
- return {'rules': rules}
+ log.warning("No rules found")
+ return {"rules": rules}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
diff --git a/salt/modules/boto_kinesis.py b/salt/modules/boto_kinesis.py
index a78973d9fcb..44426ebd1ba 100644
--- a/salt/modules/boto_kinesis.py
+++ b/salt/modules/boto_kinesis.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon Kinesis
.. versionadded:: 2017.7.0
@@ -41,27 +41,30 @@ Connection module for Amazon Kinesis
:depends: boto3
-'''
+"""
# keep lint from choking on _get_conn
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
-import time
import random
import sys
+import time
+
+import salt.utils.versions
# Import Salt libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
-import salt.utils.versions
# Import third party libs
# pylint: disable=unused-import
try:
import boto3
import botocore
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@@ -69,60 +72,66 @@ except ImportError:
log = logging.getLogger(__name__)
-__virtualname__ = 'boto_kinesis'
+__virtualname__ = "boto_kinesis"
def __virtual__():
- '''
+ """
Only load if boto3 libraries exist.
- '''
+ """
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
- __utils__['boto3.assign_funcs'](__name__, 'kinesis')
+ __utils__["boto3.assign_funcs"](__name__, "kinesis")
return __virtualname__
return has_boto_reqs
def _get_basic_stream(stream_name, conn):
- '''
+ """
Stream info from AWS, via describe_stream
Only returns the first "page" of shards (up to 100); use _get_full_stream() for all shards.
CLI example::
salt myminion boto_kinesis._get_basic_stream my_stream existing_conn
- '''
+ """
return _execute_with_retries(conn, "describe_stream", StreamName=stream_name)
def _get_full_stream(stream_name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get complete stream info from AWS, via describe_stream, including all shards.
CLI example::
salt myminion boto_kinesis._get_full_stream my_stream region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
- stream = _get_basic_stream(stream_name, conn)['result']
+ stream = _get_basic_stream(stream_name, conn)["result"]
full_stream = stream
# iterate through if there are > 100 shards (max that AWS will return from describe_stream)
while stream["StreamDescription"]["HasMoreShards"]:
- stream = _execute_with_retries(conn,
- "describe_stream",
- StreamName=stream_name,
- ExclusiveStartShardId=stream["StreamDescription"]["Shards"][-1]["ShardId"])
- stream = stream['result']
- full_stream["StreamDescription"]["Shards"] += stream["StreamDescription"]["Shards"]
+ stream = _execute_with_retries(
+ conn,
+ "describe_stream",
+ StreamName=stream_name,
+ ExclusiveStartShardId=stream["StreamDescription"]["Shards"][-1]["ShardId"],
+ )
+ stream = stream["result"]
+ full_stream["StreamDescription"]["Shards"] += stream["StreamDescription"][
+ "Shards"
+ ]
- r['result'] = full_stream
+ r["result"] = full_stream
return r
-def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profile=None):
- '''
+def get_stream_when_active(
+ stream_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Get complete stream info from AWS, returning only when the stream is in the ACTIVE state.
Continues to retry when stream is updating or creating.
If the stream is deleted during retries, the loop will catch the error and break.
@@ -130,7 +139,7 @@ def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profi
CLI example::
salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stream_status = None
@@ -142,148 +151,159 @@ def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profi
time.sleep(_jittered_backoff(attempt, max_retry_delay))
attempt += 1
stream_response = _get_basic_stream(stream_name, conn)
- if 'error' in stream_response:
+ if "error" in stream_response:
return stream_response
- stream_status = stream_response['result']["StreamDescription"]["StreamStatus"]
+ stream_status = stream_response["result"]["StreamDescription"]["StreamStatus"]
# now it's active, get the full stream if necessary
- if stream_response['result']["StreamDescription"]["HasMoreShards"]:
+ if stream_response["result"]["StreamDescription"]["HasMoreShards"]:
stream_response = _get_full_stream(stream_name, region, key, keyid, profile)
return stream_response
def exists(stream_name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check if the stream exists. Returns False and the error if it does not.
CLI example::
salt myminion boto_kinesis.exists my_stream region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)
- if 'error' in stream:
- r['result'] = False
- r['error'] = stream['error']
+ if "error" in stream:
+ r["result"] = False
+ r["error"] = stream["error"]
else:
- r['result'] = True
+ r["result"] = True
return r
-def create_stream(stream_name, num_shards, region=None, key=None, keyid=None, profile=None):
- '''
+def create_stream(
+ stream_name, num_shards, region=None, key=None, keyid=None, profile=None
+):
+ """
Create a stream with name stream_name and initial number of shards num_shards.
CLI example::
salt myminion boto_kinesis.create_stream my_stream N region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- r = _execute_with_retries(conn,
- "create_stream",
- ShardCount=num_shards,
- StreamName=stream_name)
- if 'error' not in r:
- r['result'] = True
+ r = _execute_with_retries(
+ conn, "create_stream", ShardCount=num_shards, StreamName=stream_name
+ )
+ if "error" not in r:
+ r["result"] = True
return r
def delete_stream(stream_name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete the stream with name stream_name. This cannot be undone! All data will be lost!!
CLI example::
salt myminion boto_kinesis.delete_stream my_stream region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- r = _execute_with_retries(conn,
- "delete_stream",
- StreamName=stream_name)
- if 'error' not in r:
- r['result'] = True
+ r = _execute_with_retries(conn, "delete_stream", StreamName=stream_name)
+ if "error" not in r:
+ r["result"] = True
return r
-def increase_stream_retention_period(stream_name, retention_hours,
- region=None, key=None, keyid=None, profile=None):
- '''
+def increase_stream_retention_period(
+ stream_name, retention_hours, region=None, key=None, keyid=None, profile=None
+):
+ """
Increase stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.increase_stream_retention_period my_stream N region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- r = _execute_with_retries(conn,
- "increase_stream_retention_period",
- StreamName=stream_name,
- RetentionPeriodHours=retention_hours)
- if 'error' not in r:
- r['result'] = True
+ r = _execute_with_retries(
+ conn,
+ "increase_stream_retention_period",
+ StreamName=stream_name,
+ RetentionPeriodHours=retention_hours,
+ )
+ if "error" not in r:
+ r["result"] = True
return r
-def decrease_stream_retention_period(stream_name, retention_hours,
- region=None, key=None, keyid=None, profile=None):
- '''
+def decrease_stream_retention_period(
+ stream_name, retention_hours, region=None, key=None, keyid=None, profile=None
+):
+ """
Decrease stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.decrease_stream_retention_period my_stream N region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- r = _execute_with_retries(conn,
- "decrease_stream_retention_period",
- StreamName=stream_name,
- RetentionPeriodHours=retention_hours)
- if 'error' not in r:
- r['result'] = True
+ r = _execute_with_retries(
+ conn,
+ "decrease_stream_retention_period",
+ StreamName=stream_name,
+ RetentionPeriodHours=retention_hours,
+ )
+ if "error" not in r:
+ r["result"] = True
return r
-def enable_enhanced_monitoring(stream_name, metrics,
- region=None, key=None, keyid=None, profile=None):
- '''
+def enable_enhanced_monitoring(
+ stream_name, metrics, region=None, key=None, keyid=None, profile=None
+):
+ """
Enable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.enable_enhanced_monitoring my_stream ["metrics", "to", "enable"] region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- r = _execute_with_retries(conn,
- "enable_enhanced_monitoring",
- StreamName=stream_name,
- ShardLevelMetrics=metrics)
+ r = _execute_with_retries(
+ conn,
+ "enable_enhanced_monitoring",
+ StreamName=stream_name,
+ ShardLevelMetrics=metrics,
+ )
- if 'error' not in r:
- r['result'] = True
+ if "error" not in r:
+ r["result"] = True
return r
-def disable_enhanced_monitoring(stream_name, metrics,
- region=None, key=None, keyid=None, profile=None):
- '''
+def disable_enhanced_monitoring(
+ stream_name, metrics, region=None, key=None, keyid=None, profile=None
+):
+ """
Disable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.disable_enhanced_monitoring my_stream ["metrics", "to", "disable"] region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- r = _execute_with_retries(conn,
- "disable_enhanced_monitoring",
- StreamName=stream_name,
- ShardLevelMetrics=metrics)
+ r = _execute_with_retries(
+ conn,
+ "disable_enhanced_monitoring",
+ StreamName=stream_name,
+ ShardLevelMetrics=metrics,
+ )
- if 'error' not in r:
- r['result'] = True
+ if "error" not in r:
+ r["result"] = True
return r
@@ -308,15 +328,18 @@ def get_info_for_reshard(stream_details):
continue
stream_details["OpenShards"].append(shard)
shard["HashKeyRange"]["StartingHashKey"] = long_int(
- shard["HashKeyRange"]["StartingHashKey"])
+ shard["HashKeyRange"]["StartingHashKey"]
+ )
shard["HashKeyRange"]["EndingHashKey"] = long_int(
- shard["HashKeyRange"]["EndingHashKey"])
+ shard["HashKeyRange"]["EndingHashKey"]
+ )
if shard["HashKeyRange"]["StartingHashKey"] < min_hash_key:
min_hash_key = shard["HashKeyRange"]["StartingHashKey"]
if shard["HashKeyRange"]["EndingHashKey"] > max_hash_key:
max_hash_key = shard["HashKeyRange"]["EndingHashKey"]
- stream_details["OpenShards"].sort(key=lambda shard: long_int(
- shard["HashKeyRange"]["StartingHashKey"]))
+ stream_details["OpenShards"].sort(
+ key=lambda shard: long_int(shard["HashKeyRange"]["StartingHashKey"])
+ )
return min_hash_key, max_hash_key, stream_details
@@ -338,8 +361,15 @@ def long_int(hash_key):
return int(hash_key)
-def reshard(stream_name, desired_size, force=False,
- region=None, key=None, keyid=None, profile=None):
+def reshard(
+ stream_name,
+ desired_size,
+ force=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
"""
Reshard a kinesis stream. Each call to this function will wait until the stream is ACTIVE,
then make a single split or merge operation. This function decides where to split or merge
@@ -357,14 +387,18 @@ def reshard(stream_name, desired_size, force=False,
r = {}
stream_response = get_stream_when_active(stream_name, region, key, keyid, profile)
- if 'error' in stream_response:
+ if "error" in stream_response:
return stream_response
- stream_details = stream_response['result']["StreamDescription"]
+ stream_details = stream_response["result"]["StreamDescription"]
min_hash_key, max_hash_key, stream_details = get_info_for_reshard(stream_details)
- log.debug("found %s open shards, min_hash_key %s max_hash_key %s",
- len(stream_details["OpenShards"]), min_hash_key, max_hash_key)
+ log.debug(
+ "found %s open shards, min_hash_key %s max_hash_key %s",
+ len(stream_details["OpenShards"]),
+ min_hash_key,
+ max_hash_key,
+ )
# find the first open shard that doesn't match the desired pattern. When we find it,
# either split or merge (depending on if it's too big or too small), and then return.
@@ -380,26 +414,32 @@ def reshard(stream_name, desired_size, force=False,
# this weird math matches what AWS does when you create a kinesis stream
# with an initial number of shards.
expected_starting_hash_key = (
- max_hash_key - min_hash_key) / desired_size * shard_num + shard_num
- expected_ending_hash_key = (
- max_hash_key - min_hash_key) / desired_size * (shard_num + 1) + shard_num
+ max_hash_key - min_hash_key
+ ) / desired_size * shard_num + shard_num
+ expected_ending_hash_key = (max_hash_key - min_hash_key) / desired_size * (
+ shard_num + 1
+ ) + shard_num
# fix an off-by-one at the end
if expected_ending_hash_key > max_hash_key:
expected_ending_hash_key = max_hash_key
log.debug(
"Shard %s (%s) should start at %s: %s",
- shard_num, shard_id, expected_starting_hash_key,
- starting_hash_key == expected_starting_hash_key
+ shard_num,
+ shard_id,
+ expected_starting_hash_key,
+ starting_hash_key == expected_starting_hash_key,
)
log.debug(
"Shard %s (%s) should end at %s: %s",
- shard_num, shard_id, expected_ending_hash_key,
- ending_hash_key == expected_ending_hash_key
+ shard_num,
+ shard_id,
+ expected_ending_hash_key,
+ ending_hash_key == expected_ending_hash_key,
)
if starting_hash_key != expected_starting_hash_key:
- r['error'] = "starting hash keys mismatch, don't know what to do!"
+ r["error"] = "starting hash keys mismatch, don't know what to do!"
return r
if ending_hash_key == expected_ending_hash_key:
@@ -408,49 +448,69 @@ def reshard(stream_name, desired_size, force=False,
if ending_hash_key > expected_ending_hash_key + 1:
# split at expected_ending_hash_key
if force:
- log.debug("%s should end at %s, actual %s, splitting",
- shard_id, expected_ending_hash_key, ending_hash_key)
- r = _execute_with_retries(conn,
- "split_shard",
- StreamName=stream_name,
- ShardToSplit=shard_id,
- NewStartingHashKey=str(expected_ending_hash_key + 1)) # future lint: disable=blacklisted-function
+ log.debug(
+ "%s should end at %s, actual %s, splitting",
+ shard_id,
+ expected_ending_hash_key,
+ ending_hash_key,
+ )
+ r = _execute_with_retries(
+ conn,
+ "split_shard",
+ StreamName=stream_name,
+ ShardToSplit=shard_id,
+ NewStartingHashKey=str(expected_ending_hash_key + 1),
+ ) # future lint: disable=blacklisted-function
else:
- log.debug("%s should end at %s, actual %s would split",
- shard_id, expected_ending_hash_key, ending_hash_key)
+ log.debug(
+ "%s should end at %s, actual %s would split",
+ shard_id,
+ expected_ending_hash_key,
+ ending_hash_key,
+ )
- if 'error' not in r:
- r['result'] = True
+ if "error" not in r:
+ r["result"] = True
return r
else:
# merge
next_shard_id = _get_next_open_shard(stream_details, shard_id)
if not next_shard_id:
- r['error'] = "failed to find next shard after {0}".format(shard_id)
+ r["error"] = "failed to find next shard after {0}".format(shard_id)
return r
if force:
- log.debug("%s should continue past %s, merging with %s",
- shard_id, ending_hash_key, next_shard_id)
- r = _execute_with_retries(conn,
- "merge_shards",
- StreamName=stream_name,
- ShardToMerge=shard_id,
- AdjacentShardToMerge=next_shard_id)
+ log.debug(
+ "%s should continue past %s, merging with %s",
+ shard_id,
+ ending_hash_key,
+ next_shard_id,
+ )
+ r = _execute_with_retries(
+ conn,
+ "merge_shards",
+ StreamName=stream_name,
+ ShardToMerge=shard_id,
+ AdjacentShardToMerge=next_shard_id,
+ )
else:
- log.debug("%s should continue past %s, would merge with %s",
- shard_id, ending_hash_key, next_shard_id)
+ log.debug(
+ "%s should continue past %s, would merge with %s",
+ shard_id,
+ ending_hash_key,
+ next_shard_id,
+ )
- if 'error' not in r:
- r['result'] = True
+ if "error" not in r:
+ r["result"] = True
return r
log.debug("No split or merge action necessary")
- r['result'] = False
+ r["result"] = False
return r
def list_streams(region=None, key=None, keyid=None, profile=None):
- '''
+ """
Return a list of all streams visible to the current account
CLI example:
@@ -458,29 +518,35 @@ def list_streams(region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_kinesis.list_streams
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
streams = []
- exclusive_start_stream_name = ''
+ exclusive_start_stream_name = ""
while exclusive_start_stream_name is not None:
- args = {'ExclusiveStartStreamName': exclusive_start_stream_name} if exclusive_start_stream_name else {}
- ret = _execute_with_retries(conn, 'list_streams', **args)
- if 'error' in ret:
+ args = (
+ {"ExclusiveStartStreamName": exclusive_start_stream_name}
+ if exclusive_start_stream_name
+ else {}
+ )
+ ret = _execute_with_retries(conn, "list_streams", **args)
+ if "error" in ret:
return ret
- ret = ret['result'] if ret and ret.get('result') else {}
- streams += ret.get('StreamNames', [])
- exclusive_start_stream_name = streams[-1] if ret.get('HasMoreStreams', False) in (True, 'true') else None
- return {'result': streams}
+ ret = ret["result"] if ret and ret.get("result") else {}
+ streams += ret.get("StreamNames", [])
+ exclusive_start_stream_name = (
+ streams[-1] if ret.get("HasMoreStreams", False) in (True, "true") else None
+ )
+ return {"result": streams}
def _get_next_open_shard(stream_details, shard_id):
- '''
+ """
Return the next open shard after shard_id
CLI example::
salt myminion boto_kinesis._get_next_open_shard existing_stream_details shard_id
- '''
+ """
found = False
for shard in stream_details["OpenShards"]:
current_shard_id = shard["ShardId"]
@@ -492,7 +558,7 @@ def _get_next_open_shard(stream_details, shard_id):
def _execute_with_retries(conn, function, **kwargs):
- '''
+ """
Retry if we're rate limited by AWS or blocked by another call.
Give up and return error message if resource not found or argument is invalid.
@@ -514,7 +580,7 @@ def _execute_with_retries(conn, function, **kwargs):
salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
- '''
+ """
r = {}
max_attempts = 18
max_retry_delay = 10
@@ -522,33 +588,38 @@ def _execute_with_retries(conn, function, **kwargs):
log.info("attempt: %s function: %s", attempt, function)
try:
fn = getattr(conn, function)
- r['result'] = fn(**kwargs)
+ r["result"] = fn(**kwargs)
return r
except botocore.exceptions.ClientError as e:
- error_code = e.response['Error']['Code']
- if "LimitExceededException" in error_code or "ResourceInUseException" in error_code:
+ error_code = e.response["Error"]["Code"]
+ if (
+ "LimitExceededException" in error_code
+ or "ResourceInUseException" in error_code
+ ):
# could be rate limited by AWS or another command is blocking,
# retry with exponential backoff
log.debug("Retrying due to AWS exception", exc_info=True)
time.sleep(_jittered_backoff(attempt, max_retry_delay))
else:
# ResourceNotFoundException or InvalidArgumentException
- r['error'] = e.response['Error']
- log.error(r['error'])
- r['result'] = None
+ r["error"] = e.response["Error"]
+ log.error(r["error"])
+ r["result"] = None
return r
- r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts)
- log.error(r['error'])
+ r["error"] = "Tried to execute function {0} {1} times, but was unable".format(
+ function, max_attempts
+ )
+ log.error(r["error"])
return r
def _jittered_backoff(attempt, max_retry_delay):
- '''
+ """
Basic exponential backoff
CLI example::
salt myminion boto_kinesis._jittered_backoff current_attempt_number max_delay_in_seconds
- '''
+ """
return min(random.random() * (2 ** attempt), max_retry_delay)
diff --git a/salt/modules/boto_kms.py b/salt/modules/boto_kms.py
index a5996d7f79f..db58cfbe867 100644
--- a/salt/modules/boto_kms.py
+++ b/salt/modules/boto_kms.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon KMS
.. versionadded:: 2015.8.0
@@ -32,7 +32,7 @@ Connection module for Amazon KMS
region: us-east-1
:depends: boto
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
@@ -41,10 +41,11 @@ from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
+import salt.serializers.json
+
# Import Salt libs
import salt.utils.compat
import salt.utils.odict as odict
-import salt.serializers.json
import salt.utils.versions
log = logging.getLogger(__name__)
@@ -54,114 +55,134 @@ try:
# pylint: disable=unused-import
import boto
import boto.kms
+
# pylint: enable=unused-import
- logging.getLogger('boto').setLevel(logging.CRITICAL)
+ logging.getLogger("boto").setLevel(logging.CRITICAL)
HAS_BOTO = True
except (ImportError, AttributeError):
HAS_BOTO = False
def __virtual__():
- '''
+ """
Only load if boto libraries exist.
- '''
- return salt.utils.versions.check_boto_reqs(
- boto_ver='2.38.0',
- check_boto3=False
- )
+ """
+ return salt.utils.versions.check_boto_reqs(boto_ver="2.38.0", check_boto3=False)
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
- __utils__['boto.assign_funcs'](__name__, 'kms', pack=__salt__)
+ __utils__["boto.assign_funcs"](__name__, "kms", pack=__salt__)
-def create_alias(alias_name, target_key_id, region=None, key=None, keyid=None,
- profile=None):
- '''
+def create_alias(
+ alias_name, target_key_id, region=None, key=None, keyid=None, profile=None
+):
+ """
Create a display name for a key.
CLI example::
salt myminion boto_kms.create_alias 'alias/mykey' key_id
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
conn.create_alias(alias_name, target_key_id)
- r['result'] = True
+ r["result"] = True
except boto.exception.BotoServerError as e:
- r['result'] = False
- r['error'] = __utils__['boto.get_error'](e)
+ r["result"] = False
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def create_grant(key_id, grantee_principal, retiring_principal=None,
- operations=None, constraints=None, grant_tokens=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_grant(
+ key_id,
+ grantee_principal,
+ retiring_principal=None,
+ operations=None,
+ constraints=None,
+ grant_tokens=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Adds a grant to a key to specify who can access the key and under what
conditions.
CLI example::
salt myminion boto_kms.create_grant 'alias/mykey' 'arn:aws:iam::1111111:/role/myrole' operations='["Encrypt","Decrypt"]'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- if key_id.startswith('alias/'):
+ if key_id.startswith("alias/"):
key_id = _get_key_id(key_id)
r = {}
try:
- r['grant'] = conn.create_grant(
+ r["grant"] = conn.create_grant(
key_id,
grantee_principal,
retiring_principal=retiring_principal,
operations=operations,
constraints=constraints,
- grant_tokens=grant_tokens
+ grant_tokens=grant_tokens,
)
except boto.exception.BotoServerError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def create_key(policy=None, description=None, key_usage=None, region=None,
- key=None, keyid=None, profile=None):
- '''
+def create_key(
+ policy=None,
+ description=None,
+ key_usage=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Creates a master key.
CLI example::
salt myminion boto_kms.create_key '{"Statement":...}' "My master key"
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
_policy = salt.serializers.json.serialize(policy)
try:
key_metadata = conn.create_key(
- _policy,
- description=description,
- key_usage=key_usage
+ _policy, description=description, key_usage=key_usage
)
- r['key_metadata'] = key_metadata['KeyMetadata']
+ r["key_metadata"] = key_metadata["KeyMetadata"]
except boto.exception.BotoServerError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def decrypt(ciphertext_blob, encryption_context=None, grant_tokens=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def decrypt(
+ ciphertext_blob,
+ encryption_context=None,
+ grant_tokens=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Decrypt ciphertext.
CLI example::
salt myminion boto_kms.decrypt encrypted_ciphertext
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
@@ -169,158 +190,162 @@ def decrypt(ciphertext_blob, encryption_context=None, grant_tokens=None,
plaintext = conn.decrypt(
ciphertext_blob,
encryption_context=encryption_context,
- grant_tokens=grant_tokens
+ grant_tokens=grant_tokens,
)
- r['plaintext'] = plaintext['Plaintext']
+ r["plaintext"] = plaintext["Plaintext"]
except boto.exception.BotoServerError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
def key_exists(key_id, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check for the existence of a key.
CLI example::
salt myminion boto_kms.key_exists 'alias/mykey'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key = conn.describe_key(key_id)
# TODO: add to context cache
- r['result'] = True
+ r["result"] = True
except boto.exception.BotoServerError as e:
if isinstance(e, boto.kms.exceptions.NotFoundException):
- r['result'] = False
+ r["result"] = False
return r
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
def _get_key_id(alias, region=None, key=None, keyid=None, profile=None):
- '''
+ """
From an alias, get a key_id.
- '''
- key_metadata = describe_key(
- alias, region, key, keyid, profile
- )['key_metadata']
- return key_metadata['KeyId']
+ """
+ key_metadata = describe_key(alias, region, key, keyid, profile)["key_metadata"]
+ return key_metadata["KeyId"]
def describe_key(key_id, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Get detailed information about a key.
CLI example::
salt myminion boto_kms.describe_key 'alias/mykey'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key = conn.describe_key(key_id)
# TODO: add to context cache
- r['key_metadata'] = key['KeyMetadata']
+ r["key_metadata"] = key["KeyMetadata"]
except boto.exception.BotoServerError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
def disable_key(key_id, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Mark key as disabled.
CLI example::
salt myminion boto_kms.disable_key 'alias/mykey'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key = conn.disable_key(key_id)
- r['result'] = True
+ r["result"] = True
except boto.exception.BotoServerError as e:
- r['result'] = False
- r['error'] = __utils__['boto.get_error'](e)
+ r["result"] = False
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def disable_key_rotation(key_id, region=None, key=None, keyid=None,
- profile=None):
- '''
+def disable_key_rotation(key_id, region=None, key=None, keyid=None, profile=None):
+ """
Disable key rotation for specified key.
CLI example::
salt myminion boto_kms.disable_key_rotation 'alias/mykey'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key = conn.disable_key_rotation(key_id)
- r['result'] = True
+ r["result"] = True
except boto.exception.BotoServerError as e:
- r['result'] = False
- r['error'] = __utils__['boto.get_error'](e)
+ r["result"] = False
+ r["error"] = __utils__["boto.get_error"](e)
return r
def enable_key(key_id, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Mark key as enabled.
CLI example::
salt myminion boto_kms.enable_key 'alias/mykey'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key = conn.enable_key(key_id)
- r['result'] = True
+ r["result"] = True
except boto.exception.BotoServerError as e:
- r['result'] = False
- r['error'] = __utils__['boto.get_error'](e)
+ r["result"] = False
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def enable_key_rotation(key_id, region=None, key=None, keyid=None,
- profile=None):
- '''
+def enable_key_rotation(key_id, region=None, key=None, keyid=None, profile=None):
+ """
Disable key rotation for specified key.
CLI example::
salt myminion boto_kms.enable_key_rotation 'alias/mykey'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key = conn.enable_key_rotation(key_id)
- r['result'] = True
+ r["result"] = True
except boto.exception.BotoServerError as e:
- r['result'] = False
- r['error'] = __utils__['boto.get_error'](e)
+ r["result"] = False
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def encrypt(key_id, plaintext, encryption_context=None, grant_tokens=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def encrypt(
+ key_id,
+ plaintext,
+ encryption_context=None,
+ grant_tokens=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Encrypt plaintext into cipher text using specified key.
CLI example::
salt myminion boto_kms.encrypt 'alias/mykey' 'myplaindata' '{"aws:username":"myuser"}'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
@@ -329,24 +354,32 @@ def encrypt(key_id, plaintext, encryption_context=None, grant_tokens=None,
key_id,
plaintext,
encryption_context=encryption_context,
- grant_tokens=grant_tokens
+ grant_tokens=grant_tokens,
)
- r['ciphertext'] = ciphertext['CiphertextBlob']
+ r["ciphertext"] = ciphertext["CiphertextBlob"]
except boto.exception.BotoServerError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def generate_data_key(key_id, encryption_context=None, number_of_bytes=None,
- key_spec=None, grant_tokens=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def generate_data_key(
+ key_id,
+ encryption_context=None,
+ number_of_bytes=None,
+ key_spec=None,
+ grant_tokens=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Generate a secure data key.
CLI example::
salt myminion boto_kms.generate_data_key 'alias/mykey' number_of_bytes=1024 key_spec=AES_128
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
@@ -356,25 +389,32 @@ def generate_data_key(key_id, encryption_context=None, number_of_bytes=None,
encryption_context=encryption_context,
number_of_bytes=number_of_bytes,
key_spec=key_spec,
- grant_tokens=grant_tokens
+ grant_tokens=grant_tokens,
)
- r['data_key'] = data_key
+ r["data_key"] = data_key
except boto.exception.BotoServerError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
def generate_data_key_without_plaintext(
- key_id, encryption_context=None, number_of_bytes=None, key_spec=None,
- grant_tokens=None, region=None, key=None, keyid=None, profile=None
- ):
- '''
+ key_id,
+ encryption_context=None,
+ number_of_bytes=None,
+ key_spec=None,
+ grant_tokens=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Generate a secure data key without a plaintext copy of the key.
CLI example::
salt myminion boto_kms.generate_data_key_without_plaintext 'alias/mykey' number_of_bytes=1024 key_spec=AES_128
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
@@ -384,229 +424,231 @@ def generate_data_key_without_plaintext(
encryption_context=encryption_context,
number_of_bytes=number_of_bytes,
key_spec=key_spec,
- grant_tokens=grant_tokens
+ grant_tokens=grant_tokens,
)
- r['data_key'] = data_key
+ r["data_key"] = data_key
except boto.exception.BotoServerError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def generate_random(number_of_bytes=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def generate_random(
+ number_of_bytes=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Generate a random string.
CLI example::
salt myminion boto_kms.generate_random number_of_bytes=1024
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
random = conn.generate_random(number_of_bytes)
- r['random'] = random['Plaintext']
+ r["random"] = random["Plaintext"]
except boto.exception.BotoServerError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def get_key_policy(key_id, policy_name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def get_key_policy(
+ key_id, policy_name, region=None, key=None, keyid=None, profile=None
+):
+ """
Get the policy for the specified key.
CLI example::
salt myminion boto_kms.get_key_policy 'alias/mykey' mypolicy
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key_policy = conn.get_key_policy(key_id, policy_name)
- r['key_policy'] = salt.serializers.json.deserialize(
- key_policy['Policy'],
- object_pairs_hook=odict.OrderedDict
+ r["key_policy"] = salt.serializers.json.deserialize(
+ key_policy["Policy"], object_pairs_hook=odict.OrderedDict
)
except boto.exception.BotoServerError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def get_key_rotation_status(key_id, region=None, key=None, keyid=None,
- profile=None):
- '''
+def get_key_rotation_status(key_id, region=None, key=None, keyid=None, profile=None):
+ """
Get status of whether or not key rotation is enabled for a key.
CLI example::
salt myminion boto_kms.get_key_rotation_status 'alias/mykey'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key_rotation_status = conn.get_key_rotation_status(key_id)
- r['result'] = key_rotation_status['KeyRotationEnabled']
+ r["result"] = key_rotation_status["KeyRotationEnabled"]
except boto.exception.BotoServerError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def list_grants(key_id, limit=None, marker=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def list_grants(
+ key_id, limit=None, marker=None, region=None, key=None, keyid=None, profile=None
+):
+ """
List grants for the specified key.
CLI example::
salt myminion boto_kms.list_grants 'alias/mykey'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- if key_id.startswith('alias/'):
+ if key_id.startswith("alias/"):
key_id = _get_key_id(key_id)
r = {}
try:
_grants = []
next_marker = None
while True:
- grants = conn.list_grants(
- key_id,
- limit=limit,
- marker=next_marker
- )
- for grant in grants['Grants']:
+ grants = conn.list_grants(key_id, limit=limit, marker=next_marker)
+ for grant in grants["Grants"]:
_grants.append(grant)
- if 'NextMarker' in grants:
- next_marker = grants['NextMarker']
+ if "NextMarker" in grants:
+ next_marker = grants["NextMarker"]
else:
break
- r['grants'] = _grants
+ r["grants"] = _grants
except boto.exception.BotoServerError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def list_key_policies(key_id, limit=None, marker=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def list_key_policies(
+ key_id, limit=None, marker=None, region=None, key=None, keyid=None, profile=None
+):
+ """
List key_policies for the specified key.
CLI example::
salt myminion boto_kms.list_key_policies 'alias/mykey'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- if key_id.startswith('alias/'):
+ if key_id.startswith("alias/"):
key_id = _get_key_id(key_id)
r = {}
try:
- key_policies = conn.list_key_policies(
- key_id,
- limit=limit,
- marker=marker
- )
+ key_policies = conn.list_key_policies(key_id, limit=limit, marker=marker)
# TODO: handle limit, marker and truncation automatically.
- r['key_policies'] = key_policies['PolicyNames']
+ r["key_policies"] = key_policies["PolicyNames"]
except boto.exception.BotoServerError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def put_key_policy(key_id, policy_name, policy, region=None, key=None,
- keyid=None, profile=None):
- '''
+def put_key_policy(
+ key_id, policy_name, policy, region=None, key=None, keyid=None, profile=None
+):
+ """
Attach a key policy to the specified key.
CLI example::
salt myminion boto_kms.put_key_policy 'alias/mykey' default '{"Statement":...}'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
- conn.put_key_policy(key_id, policy_name, salt.serializers.json.serialize(policy))
- r['result'] = True
+ conn.put_key_policy(
+ key_id, policy_name, salt.serializers.json.serialize(policy)
+ )
+ r["result"] = True
except boto.exception.BotoServerError as e:
- r['result'] = False
- r['error'] = __utils__['boto.get_error'](e)
+ r["result"] = False
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def re_encrypt(ciphertext_blob, destination_key_id,
- source_encryption_context=None,
- destination_encryption_context=None, grant_tokens=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def re_encrypt(
+ ciphertext_blob,
+ destination_key_id,
+ source_encryption_context=None,
+ destination_encryption_context=None,
+ grant_tokens=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Reencrypt encrypted data with a new master key.
CLI example::
salt myminion boto_kms.re_encrypt 'encrypted_data' 'alias/mynewkey' default '{"Statement":...}'
- '''
- conn = _get_conn(
- region=region,
- key=key,
- keyid=keyid,
- profile=profile
- )
+ """
+ conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
ciphertext = conn.re_encrypt(
- ciphertext_blob, destination_key_id, source_encryption_context,
- destination_encryption_context, grant_tokens
+ ciphertext_blob,
+ destination_key_id,
+ source_encryption_context,
+ destination_encryption_context,
+ grant_tokens,
)
- r['ciphertext'] = ciphertext
+ r["ciphertext"] = ciphertext
except boto.exception.BotoServerError as e:
- r['error'] = __utils__['boto.get_error'](e)
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def revoke_grant(key_id, grant_id, region=None, key=None, keyid=None,
- profile=None):
- '''
+def revoke_grant(key_id, grant_id, region=None, key=None, keyid=None, profile=None):
+ """
Revoke a grant from a key.
CLI example::
salt myminion boto_kms.revoke_grant 'alias/mykey' 8u89hf-j09j...
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- if key_id.startswith('alias/'):
+ if key_id.startswith("alias/"):
key_id = _get_key_id(key_id)
r = {}
try:
conn.revoke_grant(key_id, grant_id)
- r['result'] = True
+ r["result"] = True
except boto.exception.BotoServerError as e:
- r['result'] = False
- r['error'] = __utils__['boto.get_error'](e)
+ r["result"] = False
+ r["error"] = __utils__["boto.get_error"](e)
return r
-def update_key_description(key_id, description, region=None, key=None,
- keyid=None, profile=None):
- '''
+def update_key_description(
+ key_id, description, region=None, key=None, keyid=None, profile=None
+):
+ """
Update a key's description.
CLI example::
salt myminion boto_kms.update_key_description 'alias/mykey' 'My key'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
conn.update_key_description(key_id, description)
- r['result'] = True
+ r["result"] = True
except boto.exception.BotoServerError as e:
- r['result'] = False
- r['error'] = __utils__['boto.get_error'](e)
+ r["result"] = False
+ r["error"] = __utils__["boto.get_error"](e)
return r
diff --git a/salt/modules/boto_lambda.py b/salt/modules/boto_lambda.py
index f28ec96ae84..96e601141ea 100644
--- a/salt/modules/boto_lambda.py
+++ b/salt/modules/boto_lambda.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon Lambda
.. versionadded:: 2016.3.0
@@ -74,23 +74,25 @@ as a passed in dict, or as a string to pull from pillars or minion config:
error:
message: error message
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
-import logging
-import time
-import random
-# Import Salt libs
-from salt.ext import six
+import logging
+import random
+import time
+
import salt.utils.compat
import salt.utils.files
import salt.utils.json
import salt.utils.versions
from salt.exceptions import SaltInvocationError
+
+# Import Salt libs
+from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error
log = logging.getLogger(__name__)
@@ -102,11 +104,13 @@ try:
# pylint: disable=unused-import
import boto
import boto3
+
# pylint: enable=unused-import
from botocore.exceptions import ClientError
from botocore import __version__ as found_botocore_version
- logging.getLogger('boto').setLevel(logging.CRITICAL)
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto").setLevel(logging.CRITICAL)
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@@ -114,44 +118,40 @@ except ImportError:
def __virtual__():
- '''
+ """
Only load if boto libraries exist and if boto libraries are greater than
a given version.
- '''
+ """
# the boto_lambda execution module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
# botocore version >= 1.5.2 is required due to lambda environment variables
return salt.utils.versions.check_boto_reqs(
- boto_ver='2.8.0',
- boto3_ver='1.2.5',
- botocore_ver='1.5.2'
+ boto_ver="2.8.0", boto3_ver="1.2.5", botocore_ver="1.5.2"
)
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
- __utils__['boto3.assign_funcs'](__name__, 'lambda')
+ __utils__["boto3.assign_funcs"](__name__, "lambda")
-def _find_function(name,
- region=None, key=None, keyid=None, profile=None):
- '''
+def _find_function(name, region=None, key=None, keyid=None, profile=None):
+ """
Given function name, find and return matching Lambda information.
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- for funcs in __utils__['boto3.paged_call'](conn.list_functions):
- for func in funcs['Functions']:
- if func['FunctionName'] == name:
+ for funcs in __utils__["boto3.paged_call"](conn.list_functions):
+ for func in funcs["Functions"]:
+ if func["FunctionName"] == name:
return func
return None
-def function_exists(FunctionName, region=None, key=None,
- keyid=None, profile=None):
- '''
+def function_exists(FunctionName, region=None, key=None, keyid=None, profile=None):
+ """
Given a function name, check to see if the given function name exists.
Returns True if the given function exists and returns False if the given
@@ -163,32 +163,33 @@ def function_exists(FunctionName, region=None, key=None,
salt myminion boto_lambda.function_exists myfunction
- '''
+ """
try:
- func = _find_function(FunctionName,
- region=region, key=key, keyid=keyid, profile=profile)
- return {'exists': bool(func)}
+ func = _find_function(
+ FunctionName, region=region, key=key, keyid=keyid, profile=profile
+ )
+ return {"exists": bool(func)}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
def _get_role_arn(name, region=None, key=None, keyid=None, profile=None):
- if name.startswith('arn:aws:iam:'):
+ if name.startswith("arn:aws:iam:"):
return name
- account_id = __salt__['boto_iam.get_account_id'](
+ account_id = __salt__["boto_iam.get_account_id"](
region=region, key=key, keyid=keyid, profile=profile
)
- if profile and 'region' in profile:
- region = profile['region']
+ if profile and "region" in profile:
+ region = profile["region"]
if region is None:
- region = 'us-east-1'
- return 'arn:aws:iam::{0}:role/{1}'.format(account_id, name)
+ region = "us-east-1"
+ return "arn:aws:iam::{0}:role/{1}".format(account_id, name)
def _filedata(infile):
- with salt.utils.files.fopen(infile, 'rb') as f:
+ with salt.utils.files.fopen(infile, "rb") as f:
return f.read()
@@ -198,23 +199,47 @@ def _resolve_vpcconfig(conf, region=None, key=None, keyid=None, profile=None):
if not conf:
return None
if not isinstance(conf, dict):
- raise SaltInvocationError('VpcConfig must be a dict.')
- sns = [__salt__['boto_vpc.get_resource_id']('subnet', s, region=region, key=key,
- keyid=keyid, profile=profile).get('id') for s in conf.pop('SubnetNames', [])]
- sgs = [__salt__['boto_secgroup.get_group_id'](s, region=region, key=key, keyid=keyid,
- profile=profile) for s in conf.pop('SecurityGroupNames', [])]
- conf.setdefault('SubnetIds', []).extend(sns)
- conf.setdefault('SecurityGroupIds', []).extend(sgs)
+ raise SaltInvocationError("VpcConfig must be a dict.")
+ sns = [
+ __salt__["boto_vpc.get_resource_id"](
+ "subnet", s, region=region, key=key, keyid=keyid, profile=profile
+ ).get("id")
+ for s in conf.pop("SubnetNames", [])
+ ]
+ sgs = [
+ __salt__["boto_secgroup.get_group_id"](
+ s, region=region, key=key, keyid=keyid, profile=profile
+ )
+ for s in conf.pop("SecurityGroupNames", [])
+ ]
+ conf.setdefault("SubnetIds", []).extend(sns)
+ conf.setdefault("SecurityGroupIds", []).extend(sgs)
return conf
-def create_function(FunctionName, Runtime, Role, Handler, ZipFile=None,
- S3Bucket=None, S3Key=None, S3ObjectVersion=None,
- Description="", Timeout=3, MemorySize=128, Publish=False,
- WaitForRole=False, RoleRetries=5,
- region=None, key=None, keyid=None, profile=None,
- VpcConfig=None, Environment=None):
- '''
+def create_function(
+ FunctionName,
+ Runtime,
+ Role,
+ Handler,
+ ZipFile=None,
+ S3Bucket=None,
+ S3Key=None,
+ S3ObjectVersion=None,
+ Description="",
+ Timeout=3,
+ MemorySize=128,
+ Publish=False,
+ WaitForRole=False,
+ RoleRetries=5,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ VpcConfig=None,
+ Environment=None,
+):
+ """
.. versionadded:: 2017.7.0
Given a valid config, create a function.
@@ -240,68 +265,90 @@ def create_function(FunctionName, Runtime, Role, Handler, ZipFile=None,
salt myminion boto_lamba.create_function my_function python2.7 my_role my_file.my_function my_function.zip
- '''
+ """
- role_arn = _get_role_arn(Role, region=region, key=key,
- keyid=keyid, profile=profile)
+ role_arn = _get_role_arn(Role, region=region, key=key, keyid=keyid, profile=profile)
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if ZipFile:
if S3Bucket or S3Key or S3ObjectVersion:
- raise SaltInvocationError('Either ZipFile must be specified, or '
- 'S3Bucket and S3Key must be provided.')
+ raise SaltInvocationError(
+ "Either ZipFile must be specified, or "
+ "S3Bucket and S3Key must be provided."
+ )
code = {
- 'ZipFile': _filedata(ZipFile),
+ "ZipFile": _filedata(ZipFile),
}
else:
if not S3Bucket or not S3Key:
- raise SaltInvocationError('Either ZipFile must be specified, or '
- 'S3Bucket and S3Key must be provided.')
+ raise SaltInvocationError(
+ "Either ZipFile must be specified, or "
+ "S3Bucket and S3Key must be provided."
+ )
code = {
- 'S3Bucket': S3Bucket,
- 'S3Key': S3Key,
+ "S3Bucket": S3Bucket,
+ "S3Key": S3Key,
}
if S3ObjectVersion:
- code['S3ObjectVersion'] = S3ObjectVersion
+ code["S3ObjectVersion"] = S3ObjectVersion
kwargs = {}
if VpcConfig is not None:
- kwargs['VpcConfig'] = _resolve_vpcconfig(VpcConfig, region=region, key=key, keyid=keyid, profile=profile)
+ kwargs["VpcConfig"] = _resolve_vpcconfig(
+ VpcConfig, region=region, key=key, keyid=keyid, profile=profile
+ )
if Environment is not None:
- kwargs['Environment'] = Environment
+ kwargs["Environment"] = Environment
if WaitForRole:
retrycount = RoleRetries
else:
retrycount = 1
for retry in range(retrycount, 0, -1):
try:
- func = conn.create_function(FunctionName=FunctionName, Runtime=Runtime, Role=role_arn, Handler=Handler,
- Code=code, Description=Description, Timeout=Timeout, MemorySize=MemorySize,
- Publish=Publish, **kwargs)
+ func = conn.create_function(
+ FunctionName=FunctionName,
+ Runtime=Runtime,
+ Role=role_arn,
+ Handler=Handler,
+ Code=code,
+ Description=Description,
+ Timeout=Timeout,
+ MemorySize=MemorySize,
+ Publish=Publish,
+ **kwargs
+ )
except ClientError as e:
- if retry > 1 and e.response.get('Error', {}).get('Code') == 'InvalidParameterValueException':
+ if (
+ retry > 1
+ and e.response.get("Error", {}).get("Code")
+ == "InvalidParameterValueException"
+ ):
log.info(
- 'Function not created but IAM role may not have propagated, will retry')
+ "Function not created but IAM role may not have propagated, will retry"
+ )
# exponential backoff
- time.sleep((2 ** (RoleRetries - retry)) +
- (random.randint(0, 1000) / 1000))
+ time.sleep(
+ (2 ** (RoleRetries - retry)) + (random.randint(0, 1000) / 1000)
+ )
continue
else:
raise
else:
break
if func:
- log.info('The newly created function name is %s', func['FunctionName'])
+ log.info("The newly created function name is %s", func["FunctionName"])
- return {'created': True, 'name': func['FunctionName']}
+ return {"created": True, "name": func["FunctionName"]}
else:
- log.warning('Function was not created')
- return {'created': False}
+ log.warning("Function was not created")
+ return {"created": False}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def delete_function(FunctionName, Qualifier=None, region=None, key=None, keyid=None, profile=None):
- '''
+def delete_function(
+ FunctionName, Qualifier=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Given a function name and optional version qualifier, delete it.
Returns {deleted: true} if the function was deleted and returns
@@ -313,23 +360,21 @@ def delete_function(FunctionName, Qualifier=None, region=None, key=None, keyid=N
salt myminion boto_lambda.delete_function myfunction
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if Qualifier:
- conn.delete_function(
- FunctionName=FunctionName, Qualifier=Qualifier)
+ conn.delete_function(FunctionName=FunctionName, Qualifier=Qualifier)
else:
conn.delete_function(FunctionName=FunctionName)
- return {'deleted': True}
+ return {"deleted": True}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def describe_function(FunctionName, region=None, key=None,
- keyid=None, profile=None):
- '''
+def describe_function(FunctionName, region=None, key=None, keyid=None, profile=None):
+ """
Given a function name describe its properties.
Returns a dictionary of interesting properties.
@@ -340,28 +385,52 @@ def describe_function(FunctionName, region=None, key=None,
salt myminion boto_lambda.describe_function myfunction
- '''
+ """
try:
- func = _find_function(FunctionName,
- region=region, key=key, keyid=keyid, profile=profile)
+ func = _find_function(
+ FunctionName, region=region, key=key, keyid=keyid, profile=profile
+ )
if func:
- keys = ('FunctionName', 'Runtime', 'Role', 'Handler', 'CodeSha256',
- 'CodeSize', 'Description', 'Timeout', 'MemorySize',
- 'FunctionArn', 'LastModified', 'VpcConfig', 'Environment')
- return {'function': dict([(k, func.get(k)) for k in keys])}
+ keys = (
+ "FunctionName",
+ "Runtime",
+ "Role",
+ "Handler",
+ "CodeSha256",
+ "CodeSize",
+ "Description",
+ "Timeout",
+ "MemorySize",
+ "FunctionArn",
+ "LastModified",
+ "VpcConfig",
+ "Environment",
+ )
+ return {"function": dict([(k, func.get(k)) for k in keys])}
else:
- return {'function': None}
+ return {"function": None}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def update_function_config(FunctionName, Role=None, Handler=None,
- Description=None, Timeout=None, MemorySize=None,
- region=None, key=None, keyid=None, profile=None,
- VpcConfig=None, WaitForRole=False, RoleRetries=5,
- Environment=None):
- '''
+def update_function_config(
+ FunctionName,
+ Role=None,
+ Handler=None,
+ Description=None,
+ Timeout=None,
+ MemorySize=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ VpcConfig=None,
+ WaitForRole=False,
+ RoleRetries=5,
+ Environment=None,
+):
+ """
.. versionadded:: 2017.7.0
Update the named lambda function to the configuration.
@@ -387,24 +456,28 @@ def update_function_config(FunctionName, Role=None, Handler=None,
salt myminion boto_lamba.update_function_config my_function my_role my_file.my_function "my lambda function"
- '''
+ """
args = dict(FunctionName=FunctionName)
- options = {'Handler': Handler,
- 'Description': Description,
- 'Timeout': Timeout,
- 'MemorySize': MemorySize,
- 'VpcConfig': VpcConfig,
- 'Environment': Environment}
+ options = {
+ "Handler": Handler,
+ "Description": Description,
+ "Timeout": Timeout,
+ "MemorySize": MemorySize,
+ "VpcConfig": VpcConfig,
+ "Environment": Environment,
+ }
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
for val, var in six.iteritems(options):
if var:
args[val] = var
if Role:
- args['Role'] = _get_role_arn(Role, region, key, keyid, profile)
+ args["Role"] = _get_role_arn(Role, region, key, keyid, profile)
if VpcConfig:
- args['VpcConfig'] = _resolve_vpcconfig(VpcConfig, region=region, key=key, keyid=keyid, profile=profile)
+ args["VpcConfig"] = _resolve_vpcconfig(
+ VpcConfig, region=region, key=key, keyid=keyid, profile=profile
+ )
try:
if WaitForRole:
retrycount = RoleRetries
@@ -414,33 +487,60 @@ def update_function_config(FunctionName, Role=None, Handler=None,
try:
r = conn.update_function_configuration(**args)
except ClientError as e:
- if retry > 1 and e.response.get('Error', {}).get('Code') == 'InvalidParameterValueException':
+ if (
+ retry > 1
+ and e.response.get("Error", {}).get("Code")
+ == "InvalidParameterValueException"
+ ):
log.info(
- 'Function not updated but IAM role may not have propagated, will retry')
+ "Function not updated but IAM role may not have propagated, will retry"
+ )
# exponential backoff
- time.sleep((2 ** (RoleRetries - retry)) +
- (random.randint(0, 1000) / 1000))
+ time.sleep(
+ (2 ** (RoleRetries - retry)) + (random.randint(0, 1000) / 1000)
+ )
continue
else:
raise
else:
break
if r:
- keys = ('FunctionName', 'Runtime', 'Role', 'Handler', 'CodeSha256',
- 'CodeSize', 'Description', 'Timeout', 'MemorySize',
- 'FunctionArn', 'LastModified', 'VpcConfig', 'Environment')
- return {'updated': True, 'function': dict([(k, r.get(k)) for k in keys])}
+ keys = (
+ "FunctionName",
+ "Runtime",
+ "Role",
+ "Handler",
+ "CodeSha256",
+ "CodeSize",
+ "Description",
+ "Timeout",
+ "MemorySize",
+ "FunctionArn",
+ "LastModified",
+ "VpcConfig",
+ "Environment",
+ )
+ return {"updated": True, "function": dict([(k, r.get(k)) for k in keys])}
else:
- log.warning('Function was not updated')
- return {'updated': False}
+ log.warning("Function was not updated")
+ return {"updated": False}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def update_function_code(FunctionName, ZipFile=None, S3Bucket=None, S3Key=None,
- S3ObjectVersion=None, Publish=False,
- region=None, key=None, keyid=None, profile=None):
- '''
+def update_function_code(
+ FunctionName,
+ ZipFile=None,
+ S3Bucket=None,
+ S3Key=None,
+ S3ObjectVersion=None,
+ Publish=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Upload the given code to the named lambda function.
Returns {updated: true} if the function was updated and returns
@@ -452,45 +552,72 @@ def update_function_code(FunctionName, ZipFile=None, S3Bucket=None, S3Key=None,
salt myminion boto_lamba.update_function_code my_function ZipFile=function.zip
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
if ZipFile:
if S3Bucket or S3Key or S3ObjectVersion:
- raise SaltInvocationError('Either ZipFile must be specified, or '
- 'S3Bucket and S3Key must be provided.')
- r = conn.update_function_code(FunctionName=FunctionName,
- ZipFile=_filedata(ZipFile),
- Publish=Publish)
+ raise SaltInvocationError(
+ "Either ZipFile must be specified, or "
+ "S3Bucket and S3Key must be provided."
+ )
+ r = conn.update_function_code(
+ FunctionName=FunctionName, ZipFile=_filedata(ZipFile), Publish=Publish
+ )
else:
if not S3Bucket or not S3Key:
- raise SaltInvocationError('Either ZipFile must be specified, or '
- 'S3Bucket and S3Key must be provided.')
+ raise SaltInvocationError(
+ "Either ZipFile must be specified, or "
+ "S3Bucket and S3Key must be provided."
+ )
args = {
- 'S3Bucket': S3Bucket,
- 'S3Key': S3Key,
+ "S3Bucket": S3Bucket,
+ "S3Key": S3Key,
}
if S3ObjectVersion:
- args['S3ObjectVersion'] = S3ObjectVersion
- r = conn.update_function_code(FunctionName=FunctionName,
- Publish=Publish, **args)
+ args["S3ObjectVersion"] = S3ObjectVersion
+ r = conn.update_function_code(
+ FunctionName=FunctionName, Publish=Publish, **args
+ )
if r:
- keys = ('FunctionName', 'Runtime', 'Role', 'Handler', 'CodeSha256',
- 'CodeSize', 'Description', 'Timeout', 'MemorySize',
- 'FunctionArn', 'LastModified', 'VpcConfig', 'Environment')
- return {'updated': True, 'function': dict([(k, r.get(k)) for k in keys])}
+ keys = (
+ "FunctionName",
+ "Runtime",
+ "Role",
+ "Handler",
+ "CodeSha256",
+ "CodeSize",
+ "Description",
+ "Timeout",
+ "MemorySize",
+ "FunctionArn",
+ "LastModified",
+ "VpcConfig",
+ "Environment",
+ )
+ return {"updated": True, "function": dict([(k, r.get(k)) for k in keys])}
else:
- log.warning('Function was not updated')
- return {'updated': False}
+ log.warning("Function was not updated")
+ return {"updated": False}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def add_permission(FunctionName, StatementId, Action, Principal, SourceArn=None,
- SourceAccount=None, Qualifier=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def add_permission(
+ FunctionName,
+ StatementId,
+ Action,
+ Principal,
+ SourceArn=None,
+ SourceAccount=None,
+ Qualifier=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Add a permission to a lambda function.
Returns {added: true} if the permission was added and returns
@@ -504,25 +631,38 @@ def add_permission(FunctionName, StatementId, Action, Principal, SourceArn=None,
s3.amazonaws.com aws:arn::::bucket-name \\
aws-account-id
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
- for key in ('SourceArn', 'SourceAccount', 'Qualifier'):
+ for key in ("SourceArn", "SourceAccount", "Qualifier"):
if locals()[key] is not None:
- kwargs[key] = str(locals()[key]) # future lint: disable=blacklisted-function
- conn.add_permission(FunctionName=FunctionName, StatementId=StatementId,
- Action=Action, Principal=str(Principal), # future lint: disable=blacklisted-function
- **kwargs)
- return {'updated': True}
+ kwargs[key] = str(
+ locals()[key]
+ ) # future lint: disable=blacklisted-function
+ conn.add_permission(
+ FunctionName=FunctionName,
+ StatementId=StatementId,
+ Action=Action,
+ Principal=str(Principal), # future lint: disable=blacklisted-function
+ **kwargs
+ )
+ return {"updated": True}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def remove_permission(FunctionName, StatementId, Qualifier=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def remove_permission(
+ FunctionName,
+ StatementId,
+ Qualifier=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Remove a permission from a lambda function.
Returns {removed: true} if the permission was removed and returns
@@ -534,23 +674,25 @@ def remove_permission(FunctionName, StatementId, Qualifier=None,
salt myminion boto_lamba.remove_permission my_function my_id
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
if Qualifier is not None:
- kwargs['Qualifier'] = Qualifier
- conn.remove_permission(FunctionName=FunctionName, StatementId=StatementId,
- **kwargs)
- return {'updated': True}
+ kwargs["Qualifier"] = Qualifier
+ conn.remove_permission(
+ FunctionName=FunctionName, StatementId=StatementId, **kwargs
+ )
+ return {"updated": True}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def get_permissions(FunctionName, Qualifier=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def get_permissions(
+ FunctionName, Qualifier=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Get resource permissions for the given lambda function
Returns dictionary of permissions, by statement ID
@@ -562,51 +704,50 @@ def get_permissions(FunctionName, Qualifier=None,
salt myminion boto_lamba.get_permissions my_function
permissions: {...}
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
if Qualifier is not None:
- kwargs['Qualifier'] = Qualifier
+ kwargs["Qualifier"] = Qualifier
# The get_policy call is not symmetric with add/remove_permissions. So
# massage it until it is, for better ease of use.
- policy = conn.get_policy(FunctionName=FunctionName,
- **kwargs)
- policy = policy.get('Policy', {})
+ policy = conn.get_policy(FunctionName=FunctionName, **kwargs)
+ policy = policy.get("Policy", {})
if isinstance(policy, six.string_types):
policy = salt.utils.json.loads(policy)
if policy is None:
policy = {}
permissions = {}
- for statement in policy.get('Statement', []):
- condition = statement.get('Condition', {})
- principal = statement.get('Principal', {})
- if 'AWS' in principal:
- principal = principal['AWS'].split(':')[4]
+ for statement in policy.get("Statement", []):
+ condition = statement.get("Condition", {})
+ principal = statement.get("Principal", {})
+ if "AWS" in principal:
+ principal = principal["AWS"].split(":")[4]
else:
- principal = principal.get('Service')
+ principal = principal.get("Service")
permission = {
- 'Action': statement.get('Action'),
- 'Principal': principal,
+ "Action": statement.get("Action"),
+ "Principal": principal,
}
- if 'ArnLike' in condition:
- permission['SourceArn'] = condition[
- 'ArnLike'].get('AWS:SourceArn')
- if 'StringEquals' in condition:
- permission['SourceAccount'] = condition[
- 'StringEquals'].get('AWS:SourceAccount')
- permissions[statement.get('Sid')] = permission
- return {'permissions': permissions}
+ if "ArnLike" in condition:
+ permission["SourceArn"] = condition["ArnLike"].get("AWS:SourceArn")
+ if "StringEquals" in condition:
+ permission["SourceAccount"] = condition["StringEquals"].get(
+ "AWS:SourceAccount"
+ )
+ permissions[statement.get("Sid")] = permission
+ return {"permissions": permissions}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
- return {'permissions': None}
- return {'permissions': None, 'error': err}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "ResourceNotFoundException":
+ return {"permissions": None}
+ return {"permissions": None, "error": err}
def list_functions(region=None, key=None, keyid=None, profile=None):
- '''
+ """
List all Lambda functions visible in the current scope.
CLI Example:
@@ -615,18 +756,19 @@ def list_functions(region=None, key=None, keyid=None, profile=None):
salt myminion boto_lambda.list_functions
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
ret = []
- for funcs in __utils__['boto3.paged_call'](conn.list_functions):
- ret += funcs['Functions']
+ for funcs in __utils__["boto3.paged_call"](conn.list_functions):
+ ret += funcs["Functions"]
return ret
-def list_function_versions(FunctionName,
- region=None, key=None, keyid=None, profile=None):
- '''
+def list_function_versions(
+ FunctionName, region=None, key=None, keyid=None, profile=None
+):
+ """
List the versions available for the given function.
Returns list of function versions
@@ -639,23 +781,32 @@ def list_function_versions(FunctionName,
- {...}
- {...}
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
vers = []
- for ret in __utils__['boto3.paged_call'](conn.list_versions_by_function,
- FunctionName=FunctionName):
- vers.extend(ret['Versions'])
+ for ret in __utils__["boto3.paged_call"](
+ conn.list_versions_by_function, FunctionName=FunctionName
+ ):
+ vers.extend(ret["Versions"])
if not bool(vers):
- log.warning('No versions found')
- return {'Versions': vers}
+ log.warning("No versions found")
+ return {"Versions": vers}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def create_alias(FunctionName, Name, FunctionVersion, Description="",
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_alias(
+ FunctionName,
+ Name,
+ FunctionVersion,
+ Description="",
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid config, create an alias to a function.
Returns {created: true} if the alias was created and returns
@@ -667,24 +818,28 @@ def create_alias(FunctionName, Name, FunctionVersion, Description="",
salt myminion boto_lamba.create_alias my_function my_alias $LATEST "An alias"
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- alias = conn.create_alias(FunctionName=FunctionName, Name=Name,
- FunctionVersion=FunctionVersion, Description=Description)
+ alias = conn.create_alias(
+ FunctionName=FunctionName,
+ Name=Name,
+ FunctionVersion=FunctionVersion,
+ Description=Description,
+ )
if alias:
- log.info('The newly created alias name is %s', alias['Name'])
+ log.info("The newly created alias name is %s", alias["Name"])
- return {'created': True, 'name': alias['Name']}
+ return {"created": True, "name": alias["Name"]}
else:
- log.warning('Alias was not created')
- return {'created': False}
+ log.warning("Alias was not created")
+ return {"created": False}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
def delete_alias(FunctionName, Name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Given a function name and alias name, delete the alias.
Returns {deleted: true} if the alias was deleted and returns
@@ -696,39 +851,43 @@ def delete_alias(FunctionName, Name, region=None, key=None, keyid=None, profile=
salt myminion boto_lambda.delete_alias myfunction myalias
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_alias(FunctionName=FunctionName, Name=Name)
- return {'deleted': True}
+ return {"deleted": True}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def _find_alias(FunctionName, Name, FunctionVersion=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def _find_alias(
+ FunctionName,
+ Name,
+ FunctionVersion=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given function name and alias name, find and return matching alias information.
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- args = {
- 'FunctionName': FunctionName
- }
+ args = {"FunctionName": FunctionName}
if FunctionVersion:
- args['FunctionVersion'] = FunctionVersion
+ args["FunctionVersion"] = FunctionVersion
- for aliases in __utils__['boto3.paged_call'](conn.list_aliases, **args):
- for alias in aliases.get('Aliases'):
- if alias['Name'] == Name:
+ for aliases in __utils__["boto3.paged_call"](conn.list_aliases, **args):
+ for alias in aliases.get("Aliases"):
+ if alias["Name"] == Name:
return alias
return None
-def alias_exists(FunctionName, Name, region=None, key=None,
- keyid=None, profile=None):
- '''
+def alias_exists(FunctionName, Name, region=None, key=None, keyid=None, profile=None):
+ """
Given a function name and alias name, check to see if the given alias exists.
Returns True if the given alias exists and returns False if the given
@@ -740,19 +899,19 @@ def alias_exists(FunctionName, Name, region=None, key=None,
salt myminion boto_lambda.alias_exists myfunction myalias
- '''
+ """
try:
- alias = _find_alias(FunctionName, Name,
- region=region, key=key, keyid=keyid, profile=profile)
- return {'exists': bool(alias)}
+ alias = _find_alias(
+ FunctionName, Name, region=region, key=key, keyid=keyid, profile=profile
+ )
+ return {"exists": bool(alias)}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def describe_alias(FunctionName, Name, region=None, key=None,
- keyid=None, profile=None):
- '''
+def describe_alias(FunctionName, Name, region=None, key=None, keyid=None, profile=None):
+ """
Given a function name and alias name describe the properties of the alias.
Returns a dictionary of interesting properties.
@@ -763,23 +922,32 @@ def describe_alias(FunctionName, Name, region=None, key=None,
salt myminion boto_lambda.describe_alias myalias
- '''
+ """
try:
- alias = _find_alias(FunctionName, Name,
- region=region, key=key, keyid=keyid, profile=profile)
+ alias = _find_alias(
+ FunctionName, Name, region=region, key=key, keyid=keyid, profile=profile
+ )
if alias:
- keys = ('AliasArn', 'Name', 'FunctionVersion', 'Description')
- return {'alias': dict([(k, alias.get(k)) for k in keys])}
+ keys = ("AliasArn", "Name", "FunctionVersion", "Description")
+ return {"alias": dict([(k, alias.get(k)) for k in keys])}
else:
- return {'alias': None}
+ return {"alias": None}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def update_alias(FunctionName, Name, FunctionVersion=None, Description=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def update_alias(
+ FunctionName,
+ Name,
+ FunctionVersion=None,
+ Description=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Update the named alias to the configuration.
Returns {updated: true} if the alias was updated and returns
@@ -791,30 +959,38 @@ def update_alias(FunctionName, Name, FunctionVersion=None, Description=None,
salt myminion boto_lamba.update_alias my_lambda my_alias $LATEST
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
args = {}
if FunctionVersion:
- args['FunctionVersion'] = FunctionVersion
+ args["FunctionVersion"] = FunctionVersion
if Description:
- args['Description'] = Description
+ args["Description"] = Description
r = conn.update_alias(FunctionName=FunctionName, Name=Name, **args)
if r:
- keys = ('Name', 'FunctionVersion', 'Description')
- return {'updated': True, 'alias': dict([(k, r.get(k)) for k in keys])}
+ keys = ("Name", "FunctionVersion", "Description")
+ return {"updated": True, "alias": dict([(k, r.get(k)) for k in keys])}
else:
- log.warning('Alias was not updated')
- return {'updated': False}
+ log.warning("Alias was not updated")
+ return {"updated": False}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def create_event_source_mapping(EventSourceArn, FunctionName, StartingPosition,
- Enabled=True, BatchSize=100,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_event_source_mapping(
+ EventSourceArn,
+ FunctionName,
+ StartingPosition,
+ Enabled=True,
+ BatchSize=100,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Identifies a stream as an event source for a Lambda function. It can be
either an Amazon Kinesis stream or an Amazon DynamoDB stream. AWS Lambda
invokes the specified function when records are posted to the stream.
@@ -828,28 +1004,31 @@ def create_event_source_mapping(EventSourceArn, FunctionName, StartingPosition,
salt myminion boto_lamba.create_event_source_mapping arn::::eventsource myfunction LATEST
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- obj = conn.create_event_source_mapping(EventSourceArn=EventSourceArn,
- FunctionName=FunctionName,
- Enabled=Enabled,
- BatchSize=BatchSize,
- StartingPosition=StartingPosition)
+ obj = conn.create_event_source_mapping(
+ EventSourceArn=EventSourceArn,
+ FunctionName=FunctionName,
+ Enabled=Enabled,
+ BatchSize=BatchSize,
+ StartingPosition=StartingPosition,
+ )
if obj:
- log.info('The newly created event source mapping ID is %s', obj['UUID'])
+ log.info("The newly created event source mapping ID is %s", obj["UUID"])
- return {'created': True, 'id': obj['UUID']}
+ return {"created": True, "id": obj["UUID"]}
else:
- log.warning('Event source mapping was not created')
- return {'created': False}
+ log.warning("Event source mapping was not created")
+ return {"created": False}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def get_event_source_mapping_ids(EventSourceArn, FunctionName,
- region=None, key=None, keyid=None, profile=None):
- '''
+def get_event_source_mapping_ids(
+ EventSourceArn, FunctionName, region=None, key=None, keyid=None, profile=None
+):
+ """
Given an event source and function name, return a list of mapping IDs
CLI Example:
@@ -858,40 +1037,66 @@ def get_event_source_mapping_ids(EventSourceArn, FunctionName,
salt myminion boto_lambda.get_event_source_mapping_ids arn:::: myfunction
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
mappings = []
- for maps in __utils__['boto3.paged_call'](conn.list_event_source_mappings,
- EventSourceArn=EventSourceArn,
- FunctionName=FunctionName):
- mappings.extend([mapping['UUID']
- for mapping in maps['EventSourceMappings']])
+ for maps in __utils__["boto3.paged_call"](
+ conn.list_event_source_mappings,
+ EventSourceArn=EventSourceArn,
+ FunctionName=FunctionName,
+ ):
+ mappings.extend(
+ [mapping["UUID"] for mapping in maps["EventSourceMappings"]]
+ )
return mappings
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def _get_ids(UUID=None, EventSourceArn=None, FunctionName=None,
- region=None, key=None, keyid=None, profile=None):
+def _get_ids(
+ UUID=None,
+ EventSourceArn=None,
+ FunctionName=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
if UUID:
if EventSourceArn or FunctionName:
- raise SaltInvocationError('Either UUID must be specified, or '
- 'EventSourceArn and FunctionName must be provided.')
+ raise SaltInvocationError(
+ "Either UUID must be specified, or "
+ "EventSourceArn and FunctionName must be provided."
+ )
return [UUID]
else:
if not EventSourceArn or not FunctionName:
- raise SaltInvocationError('Either UUID must be specified, or '
- 'EventSourceArn and FunctionName must be provided.')
- return get_event_source_mapping_ids(EventSourceArn=EventSourceArn,
- FunctionName=FunctionName,
- region=region, key=key, keyid=keyid, profile=profile)
+ raise SaltInvocationError(
+ "Either UUID must be specified, or "
+ "EventSourceArn and FunctionName must be provided."
+ )
+ return get_event_source_mapping_ids(
+ EventSourceArn=EventSourceArn,
+ FunctionName=FunctionName,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
-def delete_event_source_mapping(UUID=None, EventSourceArn=None, FunctionName=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_event_source_mapping(
+ UUID=None,
+ EventSourceArn=None,
+ FunctionName=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given an event source mapping ID or an event source ARN and FunctionName,
delete the event source mapping
@@ -904,22 +1109,27 @@ def delete_event_source_mapping(UUID=None, EventSourceArn=None, FunctionName=Non
salt myminion boto_lambda.delete_event_source_mapping 260c423d-e8b5-4443-8d6a-5e91b9ecd0fa
- '''
- ids = _get_ids(UUID, EventSourceArn=EventSourceArn,
- FunctionName=FunctionName)
+ """
+ ids = _get_ids(UUID, EventSourceArn=EventSourceArn, FunctionName=FunctionName)
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
for id in ids:
conn.delete_event_source_mapping(UUID=id)
- return {'deleted': True}
+ return {"deleted": True}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def event_source_mapping_exists(UUID=None, EventSourceArn=None,
- FunctionName=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def event_source_mapping_exists(
+ UUID=None,
+ EventSourceArn=None,
+ FunctionName=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given an event source mapping ID or an event source ARN and FunctionName,
check whether the mapping exists.
@@ -932,22 +1142,32 @@ def event_source_mapping_exists(UUID=None, EventSourceArn=None,
salt myminion boto_lambda.alias_exists myfunction myalias
- '''
+ """
- desc = describe_event_source_mapping(UUID=UUID,
- EventSourceArn=EventSourceArn,
- FunctionName=FunctionName,
- region=region, key=key,
- keyid=keyid, profile=profile)
- if 'error' in desc:
+ desc = describe_event_source_mapping(
+ UUID=UUID,
+ EventSourceArn=EventSourceArn,
+ FunctionName=FunctionName,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ if "error" in desc:
return desc
- return {'exists': bool(desc.get('event_source_mapping'))}
+ return {"exists": bool(desc.get("event_source_mapping"))}
-def describe_event_source_mapping(UUID=None, EventSourceArn=None,
- FunctionName=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe_event_source_mapping(
+ UUID=None,
+ EventSourceArn=None,
+ FunctionName=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given an event source mapping ID or an event source ARN and FunctionName,
obtain the current settings of that mapping.
@@ -959,32 +1179,45 @@ def describe_event_source_mapping(UUID=None, EventSourceArn=None,
salt myminion boto_lambda.describe_event_source_mapping uuid
- '''
+ """
- ids = _get_ids(UUID, EventSourceArn=EventSourceArn,
- FunctionName=FunctionName)
+ ids = _get_ids(UUID, EventSourceArn=EventSourceArn, FunctionName=FunctionName)
if len(ids) < 1:
- return {'event_source_mapping': None}
+ return {"event_source_mapping": None}
UUID = ids[0]
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
desc = conn.get_event_source_mapping(UUID=UUID)
if desc:
- keys = ('UUID', 'BatchSize', 'EventSourceArn',
- 'FunctionArn', 'LastModified', 'LastProcessingResult',
- 'State', 'StateTransitionReason')
- return {'event_source_mapping': dict([(k, desc.get(k)) for k in keys])}
+ keys = (
+ "UUID",
+ "BatchSize",
+ "EventSourceArn",
+ "FunctionArn",
+ "LastModified",
+ "LastProcessingResult",
+ "State",
+ "StateTransitionReason",
+ )
+ return {"event_source_mapping": dict([(k, desc.get(k)) for k in keys])}
else:
- return {'event_source_mapping': None}
+ return {"event_source_mapping": None}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def update_event_source_mapping(UUID,
- FunctionName=None, Enabled=None, BatchSize=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def update_event_source_mapping(
+ UUID,
+ FunctionName=None,
+ Enabled=None,
+ BatchSize=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Update the event source mapping identified by the UUID.
Returns {updated: true} if the alias was updated and returns
@@ -996,25 +1229,35 @@ def update_event_source_mapping(UUID,
salt myminion boto_lamba.update_event_source_mapping uuid FunctionName=new_function
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
args = {}
if FunctionName is not None:
- args['FunctionName'] = FunctionName
+ args["FunctionName"] = FunctionName
if Enabled is not None:
- args['Enabled'] = Enabled
+ args["Enabled"] = Enabled
if BatchSize is not None:
- args['BatchSize'] = BatchSize
+ args["BatchSize"] = BatchSize
r = conn.update_event_source_mapping(UUID=UUID, **args)
if r:
- keys = ('UUID', 'BatchSize', 'EventSourceArn',
- 'FunctionArn', 'LastModified', 'LastProcessingResult',
- 'State', 'StateTransitionReason')
- return {'updated': True, 'event_source_mapping': dict([(k, r.get(k)) for k in keys])}
+ keys = (
+ "UUID",
+ "BatchSize",
+ "EventSourceArn",
+ "FunctionArn",
+ "LastModified",
+ "LastProcessingResult",
+ "State",
+ "StateTransitionReason",
+ )
+ return {
+ "updated": True,
+ "event_source_mapping": dict([(k, r.get(k)) for k in keys]),
+ }
else:
- log.warning('Mapping was not updated')
- return {'updated': False}
+ log.warning("Mapping was not updated")
+ return {"updated": False}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
diff --git a/salt/modules/boto_rds.py b/salt/modules/boto_rds.py
index 5cc978bfa24..4b9d4ba9699 100644
--- a/salt/modules/boto_rds.py
+++ b/salt/modules/boto_rds.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon RDS
.. versionadded:: 2015.8.0
@@ -40,15 +40,16 @@ Connection module for Amazon RDS
region: us-east-1
:depends: boto3
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
# pylint whinging perfectly valid code
-#pylint: disable=W0106
+# pylint: disable=W0106
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
import time
@@ -58,220 +59,261 @@ import salt.utils.odict as odict
import salt.utils.versions
from salt.exceptions import SaltInvocationError
-log = logging.getLogger(__name__)
-
# Import third party libs
from salt.ext import six
+
+log = logging.getLogger(__name__)
+
+
# pylint: disable=import-error
try:
- #pylint: disable=unused-import
+ # pylint: disable=unused-import
import boto
import boto3
- #pylint: enable=unused-import
+
+ # pylint: enable=unused-import
from botocore.exceptions import ClientError
- logging.getLogger('boto').setLevel(logging.CRITICAL)
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto").setLevel(logging.CRITICAL)
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=import-error
boto3_param_map = {
- 'allocated_storage': ('AllocatedStorage', int),
- 'allow_major_version_upgrade': ('AllowMajorVersionUpgrade', bool),
- 'apply_immediately': ('ApplyImmediately', bool),
- 'auto_minor_version_upgrade': ('AutoMinorVersionUpgrade', bool),
- 'availability_zone': ('AvailabilityZone', str),
- 'backup_retention_period': ('BackupRetentionPeriod', int),
- 'ca_certificate_identifier': ('CACertificateIdentifier', str),
- 'character_set_name': ('CharacterSetName', str),
- 'copy_tags_to_snapshot': ('CopyTagsToSnapshot', bool),
- 'db_cluster_identifier': ('DBClusterIdentifier', str),
- 'db_instance_class': ('DBInstanceClass', str),
- 'db_name': ('DBName', str),
- 'db_parameter_group_name': ('DBParameterGroupName', str),
- 'db_port_number': ('DBPortNumber', int),
- 'db_security_groups': ('DBSecurityGroups', list),
- 'db_subnet_group_name': ('DBSubnetGroupName', str),
- 'domain': ('Domain', str),
- 'domain_iam_role_name': ('DomainIAMRoleName', str),
- 'engine': ('Engine', str),
- 'engine_version': ('EngineVersion', str),
- 'iops': ('Iops', int),
- 'kms_key_id': ('KmsKeyId', str),
- 'license_model': ('LicenseModel', str),
- 'master_user_password': ('MasterUserPassword', str),
- 'master_username': ('MasterUsername', str),
- 'monitoring_interval': ('MonitoringInterval', int),
- 'monitoring_role_arn': ('MonitoringRoleArn', str),
- 'multi_az': ('MultiAZ', bool),
- 'name': ('DBInstanceIdentifier', str),
- 'new_db_instance_identifier': ('NewDBInstanceIdentifier', str),
- 'option_group_name': ('OptionGroupName', str),
- 'port': ('Port', int),
- 'preferred_backup_window': ('PreferredBackupWindow', str),
- 'preferred_maintenance_window': ('PreferredMaintenanceWindow', str),
- 'promotion_tier': ('PromotionTier', int),
- 'publicly_accessible': ('PubliclyAccessible', bool),
- 'storage_encrypted': ('StorageEncrypted', bool),
- 'storage_type': ('StorageType', str),
- 'tags': ('Tags', list),
- 'tde_credential_arn': ('TdeCredentialArn', str),
- 'tde_credential_password': ('TdeCredentialPassword', str),
- 'vpc_security_group_ids': ('VpcSecurityGroupIds', list),
+ "allocated_storage": ("AllocatedStorage", int),
+ "allow_major_version_upgrade": ("AllowMajorVersionUpgrade", bool),
+ "apply_immediately": ("ApplyImmediately", bool),
+ "auto_minor_version_upgrade": ("AutoMinorVersionUpgrade", bool),
+ "availability_zone": ("AvailabilityZone", str),
+ "backup_retention_period": ("BackupRetentionPeriod", int),
+ "ca_certificate_identifier": ("CACertificateIdentifier", str),
+ "character_set_name": ("CharacterSetName", str),
+ "copy_tags_to_snapshot": ("CopyTagsToSnapshot", bool),
+ "db_cluster_identifier": ("DBClusterIdentifier", str),
+ "db_instance_class": ("DBInstanceClass", str),
+ "db_name": ("DBName", str),
+ "db_parameter_group_name": ("DBParameterGroupName", str),
+ "db_port_number": ("DBPortNumber", int),
+ "db_security_groups": ("DBSecurityGroups", list),
+ "db_subnet_group_name": ("DBSubnetGroupName", str),
+ "domain": ("Domain", str),
+ "domain_iam_role_name": ("DomainIAMRoleName", str),
+ "engine": ("Engine", str),
+ "engine_version": ("EngineVersion", str),
+ "iops": ("Iops", int),
+ "kms_key_id": ("KmsKeyId", str),
+ "license_model": ("LicenseModel", str),
+ "master_user_password": ("MasterUserPassword", str),
+ "master_username": ("MasterUsername", str),
+ "monitoring_interval": ("MonitoringInterval", int),
+ "monitoring_role_arn": ("MonitoringRoleArn", str),
+ "multi_az": ("MultiAZ", bool),
+ "name": ("DBInstanceIdentifier", str),
+ "new_db_instance_identifier": ("NewDBInstanceIdentifier", str),
+ "option_group_name": ("OptionGroupName", str),
+ "port": ("Port", int),
+ "preferred_backup_window": ("PreferredBackupWindow", str),
+ "preferred_maintenance_window": ("PreferredMaintenanceWindow", str),
+ "promotion_tier": ("PromotionTier", int),
+ "publicly_accessible": ("PubliclyAccessible", bool),
+ "storage_encrypted": ("StorageEncrypted", bool),
+ "storage_type": ("StorageType", str),
+ "tags": ("Tags", list),
+ "tde_credential_arn": ("TdeCredentialArn", str),
+ "tde_credential_password": ("TdeCredentialPassword", str),
+ "vpc_security_group_ids": ("VpcSecurityGroupIds", list),
}
def __virtual__():
- '''
+ """
Only load if boto libraries exist and if boto libraries are greater than
a given version.
- '''
- return salt.utils.versions.check_boto_reqs(
- boto3_ver='1.3.1'
- )
+ """
+ return salt.utils.versions.check_boto_reqs(boto3_ver="1.3.1")
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
- __utils__['boto3.assign_funcs'](__name__, 'rds')
+ __utils__["boto3.assign_funcs"](__name__, "rds")
def exists(name, tags=None, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if an RDS exists.
CLI example::
salt myminion boto_rds.exists myrds region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
rds = conn.describe_db_instances(DBInstanceIdentifier=name)
- return {'exists': bool(rds)}
+ return {"exists": bool(rds)}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def option_group_exists(name, tags=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def option_group_exists(
+ name, tags=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Check to see if an RDS option group exists.
CLI example::
salt myminion boto_rds.option_group_exists myoptiongr region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
rds = conn.describe_option_groups(OptionGroupName=name)
- return {'exists': bool(rds)}
+ return {"exists": bool(rds)}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def parameter_group_exists(name, tags=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def parameter_group_exists(
+ name, tags=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Check to see if an RDS parameter group exists.
CLI example::
salt myminion boto_rds.parameter_group_exists myparametergroup \
region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
rds = conn.describe_db_parameter_groups(DBParameterGroupName=name)
- return {'exists': bool(rds), 'error': None}
+ return {"exists": bool(rds), "error": None}
except ClientError as e:
resp = {}
- if e.response['Error']['Code'] == 'DBParameterGroupNotFound':
- resp['exists'] = False
- resp['error'] = __utils__['boto3.get_error'](e)
+ if e.response["Error"]["Code"] == "DBParameterGroupNotFound":
+ resp["exists"] = False
+ resp["error"] = __utils__["boto3.get_error"](e)
return resp
-def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def subnet_group_exists(
+ name, tags=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Check to see if an RDS subnet group exists.
CLI example::
salt myminion boto_rds.subnet_group_exists my-param-group \
region=us-east-1
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
- return {'exists': bool(conn)}
+ return {"exists": bool(conn)}
rds = conn.describe_db_subnet_groups(DBSubnetGroupName=name)
- return {'exists': bool(rds)}
+ return {"exists": bool(rds)}
except ClientError as e:
if "DBSubnetGroupNotFoundFault" in e.message:
- return {'exists': False}
+ return {"exists": False}
else:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def create(name, allocated_storage, db_instance_class, engine,
- master_username, master_user_password, db_name=None,
- db_security_groups=None, vpc_security_group_ids=None,
- vpc_security_groups=None, availability_zone=None,
- db_subnet_group_name=None, preferred_maintenance_window=None,
- db_parameter_group_name=None, backup_retention_period=None,
- preferred_backup_window=None, port=None, multi_az=None,
- engine_version=None, auto_minor_version_upgrade=None,
- license_model=None, iops=None, option_group_name=None,
- character_set_name=None, publicly_accessible=None, wait_status=None,
- tags=None, db_cluster_identifier=None, storage_type=None,
- tde_credential_arn=None, tde_credential_password=None,
- storage_encrypted=None, kms_key_id=None, domain=None,
- copy_tags_to_snapshot=None, monitoring_interval=None,
- monitoring_role_arn=None, domain_iam_role_name=None, region=None,
- promotion_tier=None, key=None, keyid=None, profile=None):
- '''
+def create(
+ name,
+ allocated_storage,
+ db_instance_class,
+ engine,
+ master_username,
+ master_user_password,
+ db_name=None,
+ db_security_groups=None,
+ vpc_security_group_ids=None,
+ vpc_security_groups=None,
+ availability_zone=None,
+ db_subnet_group_name=None,
+ preferred_maintenance_window=None,
+ db_parameter_group_name=None,
+ backup_retention_period=None,
+ preferred_backup_window=None,
+ port=None,
+ multi_az=None,
+ engine_version=None,
+ auto_minor_version_upgrade=None,
+ license_model=None,
+ iops=None,
+ option_group_name=None,
+ character_set_name=None,
+ publicly_accessible=None,
+ wait_status=None,
+ tags=None,
+ db_cluster_identifier=None,
+ storage_type=None,
+ tde_credential_arn=None,
+ tde_credential_password=None,
+ storage_encrypted=None,
+ kms_key_id=None,
+ domain=None,
+ copy_tags_to_snapshot=None,
+ monitoring_interval=None,
+ monitoring_role_arn=None,
+ domain_iam_role_name=None,
+ region=None,
+ promotion_tier=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create an RDS Instance
CLI example to create an RDS Instance::
salt myminion boto_rds.create myrds 10 db.t2.micro MySQL sqlusr sqlpassw
- '''
+ """
if not allocated_storage:
- raise SaltInvocationError('allocated_storage is required')
+ raise SaltInvocationError("allocated_storage is required")
if not db_instance_class:
- raise SaltInvocationError('db_instance_class is required')
+ raise SaltInvocationError("db_instance_class is required")
if not engine:
- raise SaltInvocationError('engine is required')
+ raise SaltInvocationError("engine is required")
if not master_username:
- raise SaltInvocationError('master_username is required')
+ raise SaltInvocationError("master_username is required")
if not master_user_password:
- raise SaltInvocationError('master_user_password is required')
+ raise SaltInvocationError("master_user_password is required")
if availability_zone and multi_az:
- raise SaltInvocationError('availability_zone and multi_az are mutually'
- ' exclusive arguments.')
+ raise SaltInvocationError(
+ "availability_zone and multi_az are mutually" " exclusive arguments."
+ )
if wait_status:
- wait_stati = ['available', 'modifying', 'backing-up']
+ wait_stati = ["available", "modifying", "backing-up"]
if wait_status not in wait_stati:
raise SaltInvocationError(
- 'wait_status can be one of: {0}'.format(wait_stati))
+ "wait_status can be one of: {0}".format(wait_stati)
+ )
if vpc_security_groups:
- v_tmp = __salt__['boto_secgroup.convert_to_group_ids'](
- groups=vpc_security_groups, region=region, key=key, keyid=keyid,
- profile=profile)
- vpc_security_group_ids = (vpc_security_group_ids + v_tmp
- if vpc_security_group_ids else v_tmp)
+ v_tmp = __salt__["boto_secgroup.convert_to_group_ids"](
+ groups=vpc_security_groups,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ vpc_security_group_ids = (
+ vpc_security_group_ids + v_tmp if vpc_security_group_ids else v_tmp
+ )
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
- return {'results': bool(conn)}
+ return {"results": bool(conn)}
kwargs = {}
boto_params = set(boto3_param_map.keys())
@@ -291,164 +333,230 @@ def create(name, allocated_storage, db_instance_class, engine,
rds = conn.create_db_instance(**kwargs)
if not rds:
- return {'created': False}
+ return {"created": False}
if not wait_status:
- return {'created': True, 'message':
- 'RDS instance {0} created.'.format(name)}
+ return {
+ "created": True,
+ "message": "RDS instance {0} created.".format(name),
+ }
while True:
- jmespath = 'DBInstances[*].DBInstanceStatus'
- status = describe_db_instances(name=name, jmespath=jmespath,
- region=region, key=key, keyid=keyid,
- profile=profile)
+ jmespath = "DBInstances[*].DBInstanceStatus"
+ status = describe_db_instances(
+ name=name,
+ jmespath=jmespath,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if status:
stat = status[0]
else:
# Whoops, something is horribly wrong...
- return {'created': False,
- 'error': "RDS instance {0} should have been created but"
- " now I can't find it.".format(name)}
+ return {
+ "created": False,
+ "error": "RDS instance {0} should have been created but"
+ " now I can't find it.".format(name),
+ }
if stat == wait_status:
- return {'created': True,
- 'message': 'RDS instance {0} created (current status '
- '{1})'.format(name, stat)}
+ return {
+ "created": True,
+ "message": "RDS instance {0} created (current status "
+ "{1})".format(name, stat),
+ }
time.sleep(10)
- log.info('Instance status after 10 seconds is: %s', stat)
+ log.info("Instance status after 10 seconds is: %s", stat)
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def create_read_replica(name, source_name, db_instance_class=None,
- availability_zone=None, port=None,
- auto_minor_version_upgrade=None, iops=None,
- option_group_name=None, publicly_accessible=None,
- tags=None, db_subnet_group_name=None,
- storage_type=None, copy_tags_to_snapshot=None,
- monitoring_interval=None, monitoring_role_arn=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_read_replica(
+ name,
+ source_name,
+ db_instance_class=None,
+ availability_zone=None,
+ port=None,
+ auto_minor_version_upgrade=None,
+ iops=None,
+ option_group_name=None,
+ publicly_accessible=None,
+ tags=None,
+ db_subnet_group_name=None,
+ storage_type=None,
+ copy_tags_to_snapshot=None,
+ monitoring_interval=None,
+ monitoring_role_arn=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create an RDS read replica
CLI example to create an RDS read replica::
salt myminion boto_rds.create_read_replica replicaname source_name
- '''
+ """
if not backup_retention_period:
- raise SaltInvocationError('backup_retention_period is required')
- res = __salt__['boto_rds.exists'](source_name, tags, region, key, keyid, profile)
- if not res.get('exists'):
- return {'exists': bool(res), 'message':
- 'RDS instance source {0} does not exists.'.format(source_name)}
+ raise SaltInvocationError("backup_retention_period is required")
+ res = __salt__["boto_rds.exists"](source_name, tags, region, key, keyid, profile)
+ if not res.get("exists"):
+ return {
+ "exists": bool(res),
+ "message": "RDS instance source {0} does not exists.".format(source_name),
+ }
- res = __salt__['boto_rds.exists'](name, tags, region, key, keyid, profile)
- if res.get('exists'):
- return {'exists': bool(res), 'message':
- 'RDS replica instance {0} already exists.'.format(name)}
+ res = __salt__["boto_rds.exists"](name, tags, region, key, keyid, profile)
+ if res.get("exists"):
+ return {
+ "exists": bool(res),
+ "message": "RDS replica instance {0} already exists.".format(name),
+ }
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
- for key in ('OptionGroupName', 'MonitoringRoleArn'):
+ for key in ("OptionGroupName", "MonitoringRoleArn"):
if locals()[key] is not None:
- kwargs[key] = str(locals()[key]) # future lint: disable=blacklisted-function
+ kwargs[key] = str(
+ locals()[key]
+ ) # future lint: disable=blacklisted-function
- for key in ('MonitoringInterval', 'Iops', 'Port'):
+ for key in ("MonitoringInterval", "Iops", "Port"):
if locals()[key] is not None:
kwargs[key] = int(locals()[key])
- for key in ('CopyTagsToSnapshot', 'AutoMinorVersionUpgrade'):
+ for key in ("CopyTagsToSnapshot", "AutoMinorVersionUpgrade"):
if locals()[key] is not None:
kwargs[key] = bool(locals()[key])
taglist = _tag_doc(tags)
- rds_replica = conn.create_db_instance_read_replica(DBInstanceIdentifier=name,
- SourceDBInstanceIdentifier=source_name,
- DBInstanceClass=db_instance_class,
- AvailabilityZone=availability_zone,
- PubliclyAccessible=publicly_accessible,
- Tags=taglist, DBSubnetGroupName=db_subnet_group_name,
- StorageType=storage_type,
- **kwargs)
+ rds_replica = conn.create_db_instance_read_replica(
+ DBInstanceIdentifier=name,
+ SourceDBInstanceIdentifier=source_name,
+ DBInstanceClass=db_instance_class,
+ AvailabilityZone=availability_zone,
+ PubliclyAccessible=publicly_accessible,
+ Tags=taglist,
+ DBSubnetGroupName=db_subnet_group_name,
+ StorageType=storage_type,
+ **kwargs
+ )
- return {'exists': bool(rds_replica)}
+ return {"exists": bool(rds_replica)}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def create_option_group(name, engine_name, major_engine_version,
- option_group_description, tags=None, region=None,
- key=None, keyid=None, profile=None):
- '''
+def create_option_group(
+ name,
+ engine_name,
+ major_engine_version,
+ option_group_description,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create an RDS option group
CLI example to create an RDS option group::
salt myminion boto_rds.create_option_group my-opt-group mysql 5.6 \
"group description"
- '''
- res = __salt__['boto_rds.option_group_exists'](name, tags, region, key, keyid,
- profile)
- if res.get('exists'):
- return {'exists': bool(res)}
+ """
+ res = __salt__["boto_rds.option_group_exists"](
+ name, tags, region, key, keyid, profile
+ )
+ if res.get("exists"):
+ return {"exists": bool(res)}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
- return {'results': bool(conn)}
+ return {"results": bool(conn)}
taglist = _tag_doc(tags)
- rds = conn.create_option_group(OptionGroupName=name,
- EngineName=engine_name,
- MajorEngineVersion=major_engine_version,
- OptionGroupDescription=option_group_description,
- Tags=taglist)
+ rds = conn.create_option_group(
+ OptionGroupName=name,
+ EngineName=engine_name,
+ MajorEngineVersion=major_engine_version,
+ OptionGroupDescription=option_group_description,
+ Tags=taglist,
+ )
- return {'exists': bool(rds)}
+ return {"exists": bool(rds)}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def create_parameter_group(name, db_parameter_group_family, description,
- tags=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def create_parameter_group(
+ name,
+ db_parameter_group_family,
+ description,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create an RDS parameter group
CLI example to create an RDS parameter group::
salt myminion boto_rds.create_parameter_group my-param-group mysql5.6 \
"group description"
- '''
- res = __salt__['boto_rds.parameter_group_exists'](name, tags, region, key,
- keyid, profile)
- if res.get('exists'):
- return {'exists': bool(res)}
+ """
+ res = __salt__["boto_rds.parameter_group_exists"](
+ name, tags, region, key, keyid, profile
+ )
+ if res.get("exists"):
+ return {"exists": bool(res)}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
- return {'results': bool(conn)}
+ return {"results": bool(conn)}
taglist = _tag_doc(tags)
- rds = conn.create_db_parameter_group(DBParameterGroupName=name,
- DBParameterGroupFamily=db_parameter_group_family,
- Description=description,
- Tags=taglist)
+ rds = conn.create_db_parameter_group(
+ DBParameterGroupName=name,
+ DBParameterGroupFamily=db_parameter_group_family,
+ Description=description,
+ Tags=taglist,
+ )
if not rds:
- return {'created': False, 'message':
- 'Failed to create RDS parameter group {0}'.format(name)}
+ return {
+ "created": False,
+ "message": "Failed to create RDS parameter group {0}".format(name),
+ }
- return {'exists': bool(rds), 'message':
- 'Created RDS parameter group {0}'.format(name)}
+ return {
+ "exists": bool(rds),
+ "message": "Created RDS parameter group {0}".format(name),
+ }
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def create_subnet_group(name, description, subnet_ids, tags=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_subnet_group(
+ name,
+ description,
+ subnet_ids,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create an RDS subnet group
CLI example to create an RDS subnet group::
@@ -456,31 +564,42 @@ def create_subnet_group(name, description, subnet_ids, tags=None,
salt myminion boto_rds.create_subnet_group my-subnet-group \
"group description" '[subnet-12345678, subnet-87654321]' \
region=us-east-1
- '''
- res = __salt__['boto_rds.subnet_group_exists'](name, tags, region, key,
- keyid, profile)
- if res.get('exists'):
- return {'exists': bool(res)}
+ """
+ res = __salt__["boto_rds.subnet_group_exists"](
+ name, tags, region, key, keyid, profile
+ )
+ if res.get("exists"):
+ return {"exists": bool(res)}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
- return {'results': bool(conn)}
+ return {"results": bool(conn)}
taglist = _tag_doc(tags)
- rds = conn.create_db_subnet_group(DBSubnetGroupName=name,
- DBSubnetGroupDescription=description,
- SubnetIds=subnet_ids, Tags=taglist)
+ rds = conn.create_db_subnet_group(
+ DBSubnetGroupName=name,
+ DBSubnetGroupDescription=description,
+ SubnetIds=subnet_ids,
+ Tags=taglist,
+ )
- return {'created': bool(rds)}
+ return {"created": bool(rds)}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def update_parameter_group(name, parameters, apply_method="pending-reboot",
- tags=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def update_parameter_group(
+ name,
+ parameters,
+ apply_method="pending-reboot",
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Update an RDS parameter group.
CLI example::
@@ -488,93 +607,126 @@ def update_parameter_group(name, parameters, apply_method="pending-reboot",
salt myminion boto_rds.update_parameter_group my-param-group \
parameters='{"back_log":1, "binlog_cache_size":4096}' \
region=us-east-1
- '''
+ """
- res = __salt__['boto_rds.parameter_group_exists'](name, tags, region, key,
- keyid, profile)
- if not res.get('exists'):
- return {'exists': bool(res), 'message':
- 'RDS parameter group {0} does not exist.'.format(name)}
+ res = __salt__["boto_rds.parameter_group_exists"](
+ name, tags, region, key, keyid, profile
+ )
+ if not res.get("exists"):
+ return {
+ "exists": bool(res),
+ "message": "RDS parameter group {0} does not exist.".format(name),
+ }
param_list = []
for key, value in six.iteritems(parameters):
item = odict.OrderedDict()
- item.update({'ParameterName': key})
- item.update({'ApplyMethod': apply_method})
+ item.update({"ParameterName": key})
+ item.update({"ApplyMethod": apply_method})
if type(value) is bool:
- item.update({'ParameterValue': 'on' if value else 'off'})
+ item.update({"ParameterValue": "on" if value else "off"})
else:
- item.update({'ParameterValue': str(value)}) # future lint: disable=blacklisted-function
+ item.update(
+ {"ParameterValue": str(value)}
+ ) # future lint: disable=blacklisted-function
param_list.append(item)
if not param_list:
- return {'results': False}
+ return {"results": False}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
- return {'results': bool(conn)}
+ return {"results": bool(conn)}
- res = conn.modify_db_parameter_group(DBParameterGroupName=name,
- Parameters=param_list)
- return {'results': bool(res)}
+ res = conn.modify_db_parameter_group(
+ DBParameterGroupName=name, Parameters=param_list
+ )
+ return {"results": bool(res)}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def describe(name, tags=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def describe(name, tags=None, region=None, key=None, keyid=None, profile=None):
+ """
Return RDS instance details.
CLI example::
salt myminion boto_rds.describe myrds
- '''
- res = __salt__['boto_rds.exists'](name, tags, region, key, keyid,
- profile)
- if not res.get('exists'):
- return {'exists': bool(res), 'message':
- 'RDS instance {0} does not exist.'.format(name)}
+ """
+ res = __salt__["boto_rds.exists"](name, tags, region, key, keyid, profile)
+ if not res.get("exists"):
+ return {
+ "exists": bool(res),
+ "message": "RDS instance {0} does not exist.".format(name),
+ }
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
- return {'results': bool(conn)}
+ return {"results": bool(conn)}
rds = conn.describe_db_instances(DBInstanceIdentifier=name)
rds = [
- i for i in rds.get('DBInstances', [])
- if i.get('DBInstanceIdentifier') == name
+ i
+ for i in rds.get("DBInstances", [])
+ if i.get("DBInstanceIdentifier") == name
].pop(0)
if rds:
- keys = ('DBInstanceIdentifier', 'DBInstanceClass', 'Engine',
- 'DBInstanceStatus', 'DBName', 'AllocatedStorage',
- 'PreferredBackupWindow', 'BackupRetentionPeriod',
- 'AvailabilityZone', 'PreferredMaintenanceWindow',
- 'LatestRestorableTime', 'EngineVersion',
- 'AutoMinorVersionUpgrade', 'LicenseModel',
- 'Iops', 'CharacterSetName', 'PubliclyAccessible',
- 'StorageType', 'TdeCredentialArn', 'DBInstancePort',
- 'DBClusterIdentifier', 'StorageEncrypted', 'KmsKeyId',
- 'DbiResourceId', 'CACertificateIdentifier',
- 'CopyTagsToSnapshot', 'MonitoringInterval',
- 'MonitoringRoleArn', 'PromotionTier',
- 'DomainMemberships')
- return {'rds': dict([(k, rds.get(k)) for k in keys])}
+ keys = (
+ "DBInstanceIdentifier",
+ "DBInstanceClass",
+ "Engine",
+ "DBInstanceStatus",
+ "DBName",
+ "AllocatedStorage",
+ "PreferredBackupWindow",
+ "BackupRetentionPeriod",
+ "AvailabilityZone",
+ "PreferredMaintenanceWindow",
+ "LatestRestorableTime",
+ "EngineVersion",
+ "AutoMinorVersionUpgrade",
+ "LicenseModel",
+ "Iops",
+ "CharacterSetName",
+ "PubliclyAccessible",
+ "StorageType",
+ "TdeCredentialArn",
+ "DBInstancePort",
+ "DBClusterIdentifier",
+ "StorageEncrypted",
+ "KmsKeyId",
+ "DbiResourceId",
+ "CACertificateIdentifier",
+ "CopyTagsToSnapshot",
+ "MonitoringInterval",
+ "MonitoringRoleArn",
+ "PromotionTier",
+ "DomainMemberships",
+ )
+ return {"rds": dict([(k, rds.get(k)) for k in keys])}
else:
- return {'rds': None}
+ return {"rds": None}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
except IndexError:
- return {'rds': None}
+ return {"rds": None}
-def describe_db_instances(name=None, filters=None, jmespath='DBInstances',
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe_db_instances(
+ name=None,
+ filters=None,
+ jmespath="DBInstances",
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Return a detailed listing of some, or all, DB Instances visible in the
current scope. Arbitrary subelements or subsections of the returned dataset
can be selected by passing in a valid JMSEPath filter as well.
@@ -583,26 +735,33 @@ def describe_db_instances(name=None, filters=None, jmespath='DBInstances',
salt myminion boto_rds.describe_db_instances jmespath='DBInstances[*].DBInstanceIdentifier'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- pag = conn.get_paginator('describe_db_instances')
+ pag = conn.get_paginator("describe_db_instances")
args = {}
- args.update({'DBInstanceIdentifier': name}) if name else None
- args.update({'Filters': filters}) if filters else None
+ args.update({"DBInstanceIdentifier": name}) if name else None
+ args.update({"Filters": filters}) if filters else None
pit = pag.paginate(**args)
pit = pit.search(jmespath) if jmespath else pit
try:
return [p for p in pit]
except ClientError as e:
- code = getattr(e, 'response', {}).get('Error', {}).get('Code')
- if code != 'DBInstanceNotFound':
- log.error(__utils__['boto3.get_error'](e))
+ code = getattr(e, "response", {}).get("Error", {}).get("Code")
+ if code != "DBInstanceNotFound":
+ log.error(__utils__["boto3.get_error"](e))
return []
-def describe_db_subnet_groups(name=None, filters=None, jmespath='DBSubnetGroups',
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe_db_subnet_groups(
+ name=None,
+ filters=None,
+ jmespath="DBSubnetGroups",
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Return a detailed listing of some, or all, DB Subnet Groups visible in the
current scope. Arbitrary subelements or subsections of the returned dataset
can be selected by passing in a valid JMSEPath filter as well.
@@ -611,326 +770,400 @@ def describe_db_subnet_groups(name=None, filters=None, jmespath='DBSubnetGroups'
salt myminion boto_rds.describe_db_subnet_groups
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- pag = conn.get_paginator('describe_db_subnet_groups')
+ pag = conn.get_paginator("describe_db_subnet_groups")
args = {}
- args.update({'DBSubnetGroupName': name}) if name else None
- args.update({'Filters': filters}) if filters else None
+ args.update({"DBSubnetGroupName": name}) if name else None
+ args.update({"Filters": filters}) if filters else None
pit = pag.paginate(**args)
pit = pit.search(jmespath) if jmespath else pit
return [p for p in pit]
-def get_endpoint(name, tags=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def get_endpoint(name, tags=None, region=None, key=None, keyid=None, profile=None):
+ """
Return the endpoint of an RDS instance.
CLI example::
salt myminion boto_rds.get_endpoint myrds
- '''
+ """
endpoint = False
- res = __salt__['boto_rds.exists'](name, tags, region, key, keyid,
- profile)
- if res.get('exists'):
+ res = __salt__["boto_rds.exists"](name, tags, region, key, keyid, profile)
+ if res.get("exists"):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if conn:
rds = conn.describe_db_instances(DBInstanceIdentifier=name)
- if rds and 'Endpoint' in rds['DBInstances'][0]:
- endpoint = rds['DBInstances'][0]['Endpoint']['Address']
+ if rds and "Endpoint" in rds["DBInstances"][0]:
+ endpoint = rds["DBInstances"][0]["Endpoint"]["Address"]
return endpoint
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
return endpoint
-def delete(name, skip_final_snapshot=None, final_db_snapshot_identifier=None,
- region=None, key=None, keyid=None, profile=None, tags=None,
- wait_for_deletion=True, timeout=180):
- '''
+def delete(
+ name,
+ skip_final_snapshot=None,
+ final_db_snapshot_identifier=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ tags=None,
+ wait_for_deletion=True,
+ timeout=180,
+):
+ """
Delete an RDS instance.
CLI example::
salt myminion boto_rds.delete myrds skip_final_snapshot=True \
region=us-east-1
- '''
+ """
if timeout == 180 and not skip_final_snapshot:
timeout = 420
if not skip_final_snapshot and not final_db_snapshot_identifier:
- raise SaltInvocationError('At least one of the following must'
- ' be specified: skip_final_snapshot'
- ' final_db_snapshot_identifier')
+ raise SaltInvocationError(
+ "At least one of the following must"
+ " be specified: skip_final_snapshot"
+ " final_db_snapshot_identifier"
+ )
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
- return {'deleted': bool(conn)}
+ return {"deleted": bool(conn)}
kwargs = {}
- if locals()['skip_final_snapshot'] is not None:
- kwargs['SkipFinalSnapshot'] = bool(locals()['skip_final_snapshot'])
+ if locals()["skip_final_snapshot"] is not None:
+ kwargs["SkipFinalSnapshot"] = bool(locals()["skip_final_snapshot"])
- if locals()['final_db_snapshot_identifier'] is not None:
- kwargs['FinalDBSnapshotIdentifier'] = str(locals()['final_db_snapshot_identifier']) # future lint: disable=blacklisted-function
+ if locals()["final_db_snapshot_identifier"] is not None:
+ kwargs["FinalDBSnapshotIdentifier"] = str(
+ locals()["final_db_snapshot_identifier"]
+ ) # future lint: disable=blacklisted-function
res = conn.delete_db_instance(DBInstanceIdentifier=name, **kwargs)
if not wait_for_deletion:
- return {'deleted': bool(res), 'message':
- 'Deleted RDS instance {0}.'.format(name)}
+ return {
+ "deleted": bool(res),
+ "message": "Deleted RDS instance {0}.".format(name),
+ }
start_time = time.time()
while True:
- res = __salt__['boto_rds.exists'](name=name, tags=tags, region=region,
- key=key, keyid=keyid,
- profile=profile)
- if not res.get('exists'):
- return {'deleted': bool(res), 'message':
- 'Deleted RDS instance {0} completely.'.format(name)}
+ res = __salt__["boto_rds.exists"](
+ name=name,
+ tags=tags,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ if not res.get("exists"):
+ return {
+ "deleted": bool(res),
+ "message": "Deleted RDS instance {0} completely.".format(name),
+ }
if time.time() - start_time > timeout:
- raise SaltInvocationError('RDS instance {0} has not been '
- 'deleted completely after {1} '
- 'seconds'.format(name, timeout))
- log.info('Waiting up to %s seconds for RDS instance %s to be '
- 'deleted.', timeout, name)
+ raise SaltInvocationError(
+ "RDS instance {0} has not been "
+ "deleted completely after {1} "
+ "seconds".format(name, timeout)
+ )
+ log.info(
+ "Waiting up to %s seconds for RDS instance %s to be " "deleted.",
+ timeout,
+ name,
+ )
time.sleep(10)
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
def delete_option_group(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete an RDS option group.
CLI example::
salt myminion boto_rds.delete_option_group my-opt-group \
region=us-east-1
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
- return {'deleted': bool(conn)}
+ return {"deleted": bool(conn)}
res = conn.delete_option_group(OptionGroupName=name)
if not res:
- return {'deleted': bool(res), 'message':
- 'Failed to delete RDS option group {0}.'.format(name)}
+ return {
+ "deleted": bool(res),
+ "message": "Failed to delete RDS option group {0}.".format(name),
+ }
- return {'deleted': bool(res), 'message':
- 'Deleted RDS option group {0}.'.format(name)}
+ return {
+ "deleted": bool(res),
+ "message": "Deleted RDS option group {0}.".format(name),
+ }
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def delete_parameter_group(name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def delete_parameter_group(name, region=None, key=None, keyid=None, profile=None):
+ """
Delete an RDS parameter group.
CLI example::
salt myminion boto_rds.delete_parameter_group my-param-group \
region=us-east-1
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
- return {'results': bool(conn)}
+ return {"results": bool(conn)}
r = conn.delete_db_parameter_group(DBParameterGroupName=name)
- return {'deleted': bool(r), 'message':
- 'Deleted RDS parameter group {0}.'.format(name)}
+ return {
+ "deleted": bool(r),
+ "message": "Deleted RDS parameter group {0}.".format(name),
+ }
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def delete_subnet_group(name, region=None, key=None, keyid=None,
- profile=None):
- '''
+def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None):
+ """
Delete an RDS subnet group.
CLI example::
salt myminion boto_rds.delete_subnet_group my-subnet-group \
region=us-east-1
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
- return {'results': bool(conn)}
+ return {"results": bool(conn)}
r = conn.delete_db_subnet_group(DBSubnetGroupName=name)
- return {'deleted': bool(r), 'message':
- 'Deleted RDS subnet group {0}.'.format(name)}
+ return {
+ "deleted": bool(r),
+ "message": "Deleted RDS subnet group {0}.".format(name),
+ }
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def describe_parameter_group(name, Filters=None, MaxRecords=None, Marker=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe_parameter_group(
+ name,
+ Filters=None,
+ MaxRecords=None,
+ Marker=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Returns a list of `DBParameterGroup` descriptions.
CLI example to description of parameter group::
salt myminion boto_rds.describe_parameter_group parametergroupname\
region=us-east-1
- '''
- res = __salt__['boto_rds.parameter_group_exists'](name, tags=None,
- region=region, key=key,
- keyid=keyid,
- profile=profile)
- if not res.get('exists'):
- return {'exists': bool(res)}
+ """
+ res = __salt__["boto_rds.parameter_group_exists"](
+ name, tags=None, region=region, key=key, keyid=keyid, profile=profile
+ )
+ if not res.get("exists"):
+ return {"exists": bool(res)}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
- return {'results': bool(conn)}
+ return {"results": bool(conn)}
kwargs = {}
- for key in ('Marker', 'Filters'):
+ for key in ("Marker", "Filters"):
if locals()[key] is not None:
- kwargs[key] = str(locals()[key]) # future lint: disable=blacklisted-function
+ kwargs[key] = str(
+ locals()[key]
+ ) # future lint: disable=blacklisted-function
- if locals()['MaxRecords'] is not None:
- kwargs['MaxRecords'] = int(locals()['MaxRecords'])
+ if locals()["MaxRecords"] is not None:
+ kwargs["MaxRecords"] = int(locals()["MaxRecords"])
- info = conn.describe_db_parameter_groups(DBParameterGroupName=name,
- **kwargs)
+ info = conn.describe_db_parameter_groups(DBParameterGroupName=name, **kwargs)
if not info:
- return {'results': bool(info), 'message':
- 'Failed to get RDS description for group {0}.'.format(name)}
+ return {
+ "results": bool(info),
+ "message": "Failed to get RDS description for group {0}.".format(name),
+ }
- return {'results': bool(info), 'message':
- 'Got RDS descrition for group {0}.'.format(name)}
+ return {
+ "results": bool(info),
+ "message": "Got RDS descrition for group {0}.".format(name),
+ }
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def describe_parameters(name, Source=None, MaxRecords=None, Marker=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe_parameters(
+ name,
+ Source=None,
+ MaxRecords=None,
+ Marker=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Returns a list of `DBParameterGroup` parameters.
CLI example to description of parameters ::
salt myminion boto_rds.describe_parameters parametergroupname\
region=us-east-1
- '''
- res = __salt__['boto_rds.parameter_group_exists'](name, tags=None,
- region=region, key=key,
- keyid=keyid,
- profile=profile)
- if not res.get('exists'):
- return {'result': False,
- 'message': 'Parameter group {0} does not exist'.format(name)}
+ """
+ res = __salt__["boto_rds.parameter_group_exists"](
+ name, tags=None, region=region, key=key, keyid=keyid, profile=profile
+ )
+ if not res.get("exists"):
+ return {
+ "result": False,
+ "message": "Parameter group {0} does not exist".format(name),
+ }
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
- return {'result': False,
- 'message': 'Could not establish a connection to RDS'}
+ return {
+ "result": False,
+ "message": "Could not establish a connection to RDS",
+ }
kwargs = {}
- kwargs.update({'DBParameterGroupName': name})
- for key in ('Marker', 'Source'):
+ kwargs.update({"DBParameterGroupName": name})
+ for key in ("Marker", "Source"):
if locals()[key] is not None:
- kwargs[key] = str(locals()[key]) # future lint: disable=blacklisted-function
+ kwargs[key] = str(
+ locals()[key]
+ ) # future lint: disable=blacklisted-function
- if locals()['MaxRecords'] is not None:
- kwargs['MaxRecords'] = int(locals()['MaxRecords'])
+ if locals()["MaxRecords"] is not None:
+ kwargs["MaxRecords"] = int(locals()["MaxRecords"])
- pag = conn.get_paginator('describe_db_parameters')
+ pag = conn.get_paginator("describe_db_parameters")
pit = pag.paginate(**kwargs)
- keys = ['ParameterName', 'ParameterValue', 'Description',
- 'Source', 'ApplyType', 'DataType', 'AllowedValues',
- 'IsModifieable', 'MinimumEngineVersion', 'ApplyMethod']
+ keys = [
+ "ParameterName",
+ "ParameterValue",
+ "Description",
+ "Source",
+ "ApplyType",
+ "DataType",
+ "AllowedValues",
+ "IsModifieable",
+ "MinimumEngineVersion",
+ "ApplyMethod",
+ ]
parameters = odict.OrderedDict()
- ret = {'result': True}
+ ret = {"result": True}
for p in pit:
- for result in p['Parameters']:
+ for result in p["Parameters"]:
data = odict.OrderedDict()
for k in keys:
data[k] = result.get(k)
- parameters[result.get('ParameterName')] = data
+ parameters[result.get("ParameterName")] = data
- ret['parameters'] = parameters
+ ret["parameters"] = parameters
return ret
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def modify_db_instance(name,
- allocated_storage=None,
- allow_major_version_upgrade=None,
- apply_immediately=None,
- auto_minor_version_upgrade=None,
- backup_retention_period=None,
- ca_certificate_identifier=None,
- character_set_name=None,
- copy_tags_to_snapshot=None,
- db_cluster_identifier=None,
- db_instance_class=None,
- db_name=None,
- db_parameter_group_name=None,
- db_port_number=None,
- db_security_groups=None,
- db_subnet_group_name=None,
- domain=None,
- domain_iam_role_name=None,
- engine_version=None,
- iops=None,
- kms_key_id=None,
- license_model=None,
- master_user_password=None,
- monitoring_interval=None,
- monitoring_role_arn=None,
- multi_az=None,
- new_db_instance_identifier=None,
- option_group_name=None,
- preferred_backup_window=None,
- preferred_maintenance_window=None,
- promotion_tier=None,
- publicly_accessible=None,
- storage_encrypted=None,
- storage_type=None,
- tde_credential_arn=None,
- tde_credential_password=None,
- vpc_security_group_ids=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def modify_db_instance(
+ name,
+ allocated_storage=None,
+ allow_major_version_upgrade=None,
+ apply_immediately=None,
+ auto_minor_version_upgrade=None,
+ backup_retention_period=None,
+ ca_certificate_identifier=None,
+ character_set_name=None,
+ copy_tags_to_snapshot=None,
+ db_cluster_identifier=None,
+ db_instance_class=None,
+ db_name=None,
+ db_parameter_group_name=None,
+ db_port_number=None,
+ db_security_groups=None,
+ db_subnet_group_name=None,
+ domain=None,
+ domain_iam_role_name=None,
+ engine_version=None,
+ iops=None,
+ kms_key_id=None,
+ license_model=None,
+ master_user_password=None,
+ monitoring_interval=None,
+ monitoring_role_arn=None,
+ multi_az=None,
+ new_db_instance_identifier=None,
+ option_group_name=None,
+ preferred_backup_window=None,
+ preferred_maintenance_window=None,
+ promotion_tier=None,
+ publicly_accessible=None,
+ storage_encrypted=None,
+ storage_type=None,
+ tde_credential_arn=None,
+ tde_credential_password=None,
+ vpc_security_group_ids=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Modify settings for a DB instance.
CLI example to description of parameters ::
salt myminion boto_rds.modify_db_instance db_instance_identifier region=us-east-1
- '''
- res = __salt__['boto_rds.exists'](name, tags=None, region=region, key=key, keyid=keyid, profile=profile)
- if not res.get('exists'):
- return {'modified': False, 'message':
- 'RDS db instance {0} does not exist.'.format(name)}
+ """
+ res = __salt__["boto_rds.exists"](
+ name, tags=None, region=region, key=key, keyid=keyid, profile=profile
+ )
+ if not res.get("exists"):
+ return {
+ "modified": False,
+ "message": "RDS db instance {0} does not exist.".format(name),
+ }
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
- return {'modified': False}
+ return {"modified": False}
kwargs = {}
- excluded = set(('name',))
+ excluded = set(("name",))
boto_params = set(boto3_param_map.keys())
keys = set(locals().keys())
for key in keys.intersection(boto_params).difference(excluded):
@@ -942,21 +1175,25 @@ def modify_db_instance(name,
info = conn.modify_db_instance(DBInstanceIdentifier=name, **kwargs)
if not info:
- return {'modified': bool(info), 'message':
- 'Failed to modify RDS db instance {0}.'.format(name)}
+ return {
+ "modified": bool(info),
+ "message": "Failed to modify RDS db instance {0}.".format(name),
+ }
- return {'modified': bool(info), 'message':
- 'Modified RDS db instance {0}.'.format(name),
- 'results': dict(info)}
+ return {
+ "modified": bool(info),
+ "message": "Modified RDS db instance {0}.".format(name),
+ "results": dict(info),
+ }
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
def _tag_doc(tags):
taglist = []
if tags is not None:
for k, v in six.iteritems(tags):
- if six.text_type(k).startswith('__'):
+ if six.text_type(k).startswith("__"):
continue
- taglist.append({'Key': six.text_type(k), 'Value': six.text_type(v)})
+ taglist.append({"Key": six.text_type(k), "Value": six.text_type(v)})
return taglist
diff --git a/salt/modules/boto_route53.py b/salt/modules/boto_route53.py
index a663ec72071..6916a2d8cda 100644
--- a/salt/modules/boto_route53.py
+++ b/salt/modules/boto_route53.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon Route53
.. versionadded:: 2014.7.0
@@ -41,9 +41,9 @@ Connection module for Amazon Route53
region: us-east-1
:depends: boto
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
from __future__ import absolute_import, print_function, unicode_literals
@@ -53,62 +53,63 @@ import time
# Import salt libs
import salt.utils.compat
-import salt.utils.versions
import salt.utils.odict as odict
+import salt.utils.versions
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
# Import third party libs
try:
- #pylint: disable=unused-import
+ # pylint: disable=unused-import
import boto
import boto.route53
import boto.route53.healthcheck
from boto.route53.exception import DNSServerError
- #pylint: enable=unused-import
- logging.getLogger('boto').setLevel(logging.CRITICAL)
+
+ # pylint: enable=unused-import
+ logging.getLogger("boto").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def __virtual__():
- '''
+ """
Only load if boto libraries exist.
- '''
+ """
# create_zone params were changed in boto 2.35+
- return salt.utils.versions.check_boto_reqs(
- boto_ver='2.35.0',
- check_boto3=False
- )
+ return salt.utils.versions.check_boto_reqs(boto_ver="2.35.0", check_boto3=False)
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
- __utils__['boto.assign_funcs'](__name__, 'route53', pack=__salt__)
+ __utils__["boto.assign_funcs"](__name__, "route53", pack=__salt__)
def _get_split_zone(zone, _conn, private_zone):
- '''
+ """
With boto route53, zones can only be matched by name
or iterated over in a list. Since the name will be the
same for public and private zones in a split DNS situation,
iterate over the list and match the zone name and public/private
status.
- '''
+ """
for _zone in _conn.get_zones():
if _zone.name == zone:
- _private_zone = True if _zone.config['PrivateZone'].lower() == 'true' else False
+ _private_zone = (
+ True if _zone.config["PrivateZone"].lower() == "true" else False
+ )
if _private_zone == private_zone:
return _zone
return False
-def describe_hosted_zones(zone_id=None, domain_name=None, region=None,
- key=None, keyid=None, profile=None):
- '''
+def describe_hosted_zones(
+ zone_id=None, domain_name=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Return detailed info about one, or all, zones in the bound account.
If neither zone_id nor domain_name is provided, return all zones.
Note that the return format is slightly different between the 'all'
@@ -139,47 +140,56 @@ def describe_hosted_zones(zone_id=None, domain_name=None, region=None,
salt myminion boto_route53.describe_hosted_zones domain_name=foo.bar.com. \
profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if zone_id and domain_name:
- raise SaltInvocationError('At most one of zone_id or domain_name may '
- 'be provided')
+ raise SaltInvocationError(
+ "At most one of zone_id or domain_name may " "be provided"
+ )
retries = 10
while retries:
try:
if zone_id:
- zone_id = zone_id.replace('/hostedzone/',
- '') if zone_id.startswith('/hostedzone/') else zone_id
- ret = getattr(conn.get_hosted_zone(zone_id),
- 'GetHostedZoneResponse', None)
+ zone_id = (
+ zone_id.replace("/hostedzone/", "")
+ if zone_id.startswith("/hostedzone/")
+ else zone_id
+ )
+ ret = getattr(
+ conn.get_hosted_zone(zone_id), "GetHostedZoneResponse", None
+ )
elif domain_name:
- ret = getattr(conn.get_hosted_zone_by_name(domain_name),
- 'GetHostedZoneResponse', None)
+ ret = getattr(
+ conn.get_hosted_zone_by_name(domain_name),
+ "GetHostedZoneResponse",
+ None,
+ )
else:
marker = None
ret = None
- while marker is not '':
- r = conn.get_all_hosted_zones(start_marker=marker,
- zone_list=ret)
- ret = r['ListHostedZonesResponse']['HostedZones']
- marker = r['ListHostedZonesResponse'].get('NextMarker', '')
+ while marker is not "":
+ r = conn.get_all_hosted_zones(start_marker=marker, zone_list=ret)
+ ret = r["ListHostedZonesResponse"]["HostedZones"]
+ marker = r["ListHostedZonesResponse"].get("NextMarker", "")
return ret if ret else []
except DNSServerError as e:
if retries:
- if 'Throttling' == e.code:
- log.debug('Throttled by AWS API.')
- elif 'PriorRequestNotComplete' == e.code:
- log.debug('The request was rejected by AWS API.\
- Route 53 was still processing a prior request')
+ if "Throttling" == e.code:
+ log.debug("Throttled by AWS API.")
+ elif "PriorRequestNotComplete" == e.code:
+ log.debug(
+ "The request was rejected by AWS API.\
+ Route 53 was still processing a prior request"
+ )
time.sleep(3)
retries -= 1
continue
- log.error('Could not list zones: %s', e.message)
+ log.error("Could not list zones: %s", e.message)
return []
def list_all_zones_by_name(region=None, key=None, keyid=None, profile=None):
- '''
+ """
List, by their FQDNs, all hosted zones in the bound account.
region
@@ -200,14 +210,13 @@ def list_all_zones_by_name(region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_route53.list_all_zones_by_name
- '''
- ret = describe_hosted_zones(region=region, key=key, keyid=keyid,
- profile=profile)
- return [r['Name'] for r in ret]
+ """
+ ret = describe_hosted_zones(region=region, key=key, keyid=keyid, profile=profile)
+ return [r["Name"] for r in ret]
def list_all_zones_by_id(region=None, key=None, keyid=None, profile=None):
- '''
+ """
List, by their IDs, all hosted zones in the bound account.
region
@@ -228,16 +237,23 @@ def list_all_zones_by_id(region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_route53.list_all_zones_by_id
- '''
- ret = describe_hosted_zones(region=region, key=key, keyid=keyid,
- profile=profile)
- return [r['Id'].replace('/hostedzone/', '') for r in ret]
+ """
+ ret = describe_hosted_zones(region=region, key=key, keyid=keyid, profile=profile)
+ return [r["Id"].replace("/hostedzone/", "") for r in ret]
-def zone_exists(zone, region=None, key=None, keyid=None, profile=None,
- retry_on_rate_limit=None, rate_limit_retries=None,
- retry_on_errors=True, error_retries=5):
- '''
+def zone_exists(
+ zone,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ retry_on_rate_limit=None,
+ rate_limit_retries=None,
+ retry_on_errors=True,
+ error_retries=5,
+):
+ """
Check for the existence of a Route53 hosted zone.
.. versionadded:: 2015.8.0
@@ -262,9 +278,9 @@ def zone_exists(zone, region=None, key=None, keyid=None, profile=None,
but please migrate to using the favored `error_retries`
argument instead.
- '''
+ """
if region is None:
- region = 'universal'
+ region = "universal"
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
@@ -280,20 +296,30 @@ def zone_exists(zone, region=None, key=None, keyid=None, profile=None,
except DNSServerError as e:
if retry_on_errors:
- if 'Throttling' == e.code:
- log.debug('Throttled by AWS API.')
- elif 'PriorRequestNotComplete' == e.code:
- log.debug('The request was rejected by AWS API.\
- Route 53 was still processing a prior request')
+ if "Throttling" == e.code:
+ log.debug("Throttled by AWS API.")
+ elif "PriorRequestNotComplete" == e.code:
+ log.debug(
+ "The request was rejected by AWS API.\
+ Route 53 was still processing a prior request"
+ )
time.sleep(3)
error_retries -= 1
continue
six.reraise(*sys.exc_info())
-def create_zone(zone, private=False, vpc_id=None, vpc_region=None, region=None,
- key=None, keyid=None, profile=None):
- '''
+def create_zone(
+ zone,
+ private=False,
+ vpc_id=None,
+ vpc_region=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create a Route53 hosted zone.
.. versionadded:: 2015.8.0
@@ -325,13 +351,13 @@ def create_zone(zone, private=False, vpc_id=None, vpc_region=None, region=None,
CLI Example::
salt myminion boto_route53.create_zone example.org
- '''
+ """
if region is None:
- region = 'universal'
+ region = "universal"
if private:
if not vpc_id or not vpc_region:
- msg = 'vpc_id and vpc_region must be specified for a private zone'
+ msg = "vpc_id and vpc_region must be specified for a private zone"
raise SaltInvocationError(msg)
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
@@ -341,15 +367,27 @@ def create_zone(zone, private=False, vpc_id=None, vpc_region=None, region=None,
if _zone:
return False
- conn.create_zone(zone, private_zone=private, vpc_id=vpc_id,
- vpc_region=vpc_region)
+ conn.create_zone(zone, private_zone=private, vpc_id=vpc_id, vpc_region=vpc_region)
return True
-def create_healthcheck(ip_addr=None, fqdn=None, region=None, key=None, keyid=None, profile=None,
- port=53, hc_type='TCP', resource_path='', string_match=None, request_interval=30,
- failure_threshold=3, retry_on_errors=True, error_retries=5):
- '''
+def create_healthcheck(
+ ip_addr=None,
+ fqdn=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ port=53,
+ hc_type="TCP",
+ resource_path="",
+ string_match=None,
+ request_interval=30,
+ failure_threshold=3,
+ retry_on_errors=True,
+ error_retries=5,
+):
+ """
Create a Route53 healthcheck
.. versionadded:: 2018.3.0
@@ -411,45 +449,49 @@ def create_healthcheck(ip_addr=None, fqdn=None, region=None, key=None, keyid=Non
salt myminion boto_route53.create_healthcheck 192.168.0.1
salt myminion boto_route53.create_healthcheck 192.168.0.1 port=443 hc_type=HTTPS \
resource_path=/ fqdn=blog.saltstack.furniture
- '''
+ """
if fqdn is None and ip_addr is None:
- msg = 'One of the following must be specified: fqdn or ip_addr'
+ msg = "One of the following must be specified: fqdn or ip_addr"
log.error(msg)
- return {'error': msg}
- hc_ = boto.route53.healthcheck.HealthCheck(ip_addr,
- port,
- hc_type,
- resource_path,
- fqdn=fqdn,
- string_match=string_match,
- request_interval=request_interval,
- failure_threshold=failure_threshold)
+ return {"error": msg}
+ hc_ = boto.route53.healthcheck.HealthCheck(
+ ip_addr,
+ port,
+ hc_type,
+ resource_path,
+ fqdn=fqdn,
+ string_match=string_match,
+ request_interval=request_interval,
+ failure_threshold=failure_threshold,
+ )
if region is None:
- region = 'universal'
+ region = "universal"
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
while error_retries > 0:
try:
- return {'result': conn.create_health_check(hc_)}
+ return {"result": conn.create_health_check(hc_)}
except DNSServerError as exc:
log.debug(exc)
if retry_on_errors:
- if 'Throttling' == exc.code:
- log.debug('Throttled by AWS API.')
- elif 'PriorRequestNotComplete' == exc.code:
- log.debug('The request was rejected by AWS API.\
- Route 53 was still processing a prior request')
+ if "Throttling" == exc.code:
+ log.debug("Throttled by AWS API.")
+ elif "PriorRequestNotComplete" == exc.code:
+ log.debug(
+ "The request was rejected by AWS API.\
+ Route 53 was still processing a prior request"
+ )
time.sleep(3)
error_retries -= 1
continue
- return {'error': __utils__['boto.get_error'](exc)}
+ return {"error": __utils__["boto.get_error"](exc)}
return False
def delete_zone(zone, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete a Route53 hosted zone.
.. versionadded:: 2015.8.0
@@ -457,9 +499,9 @@ def delete_zone(zone, region=None, key=None, keyid=None, profile=None):
CLI Example::
salt myminion boto_route53.delete_zone example.org
- '''
+ """
if region is None:
- region = 'universal'
+ region = "universal"
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
@@ -472,18 +514,31 @@ def delete_zone(zone, region=None, key=None, keyid=None, profile=None):
def _encode_name(name):
- return name.replace('*', r'\052')
+ return name.replace("*", r"\052")
def _decode_name(name):
- return name.replace(r'\052', '*')
+ return name.replace(r"\052", "*")
-def get_record(name, zone, record_type, fetch_all=False, region=None, key=None,
- keyid=None, profile=None, split_dns=False, private_zone=False,
- identifier=None, retry_on_rate_limit=None,
- rate_limit_retries=None, retry_on_errors=True, error_retries=5):
- '''
+def get_record(
+ name,
+ zone,
+ record_type,
+ fetch_all=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ split_dns=False,
+ private_zone=False,
+ identifier=None,
+ retry_on_rate_limit=None,
+ rate_limit_retries=None,
+ retry_on_errors=True,
+ error_retries=5,
+):
+ """
Get a record from a zone.
CLI example::
@@ -505,9 +560,9 @@ def get_record(name, zone, record_type, fetch_all=False, region=None, key=None,
`rate_limit_retries` to ensure backwards compatibility,
but please migrate to using the favored `error_retries`
argument instead.
- '''
+ """
if region is None:
- region = 'universal'
+ region = "universal"
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
@@ -524,7 +579,7 @@ def get_record(name, zone, record_type, fetch_all=False, region=None, key=None,
else:
_zone = conn.get_zone(zone)
if not _zone:
- msg = 'Failed to retrieve zone {0}'.format(zone)
+ msg = "Failed to retrieve zone {0}".format(zone)
log.error(msg)
return None
_type = record_type.upper()
@@ -532,48 +587,66 @@ def get_record(name, zone, record_type, fetch_all=False, region=None, key=None,
name = _encode_name(name)
- _record = _zone.find_records(name, _type, all=fetch_all, identifier=identifier)
+ _record = _zone.find_records(
+ name, _type, all=fetch_all, identifier=identifier
+ )
break # the while True
except DNSServerError as e:
if retry_on_errors:
- if 'Throttling' == e.code:
- log.debug('Throttled by AWS API.')
- elif 'PriorRequestNotComplete' == e.code:
- log.debug('The request was rejected by AWS API.\
- Route 53 was still processing a prior request')
+ if "Throttling" == e.code:
+ log.debug("Throttled by AWS API.")
+ elif "PriorRequestNotComplete" == e.code:
+ log.debug(
+ "The request was rejected by AWS API.\
+ Route 53 was still processing a prior request"
+ )
time.sleep(3)
error_retries -= 1
continue
six.reraise(*sys.exc_info())
if _record:
- ret['name'] = _decode_name(_record.name)
- ret['value'] = _record.resource_records[0]
- ret['record_type'] = _record.type
- ret['ttl'] = _record.ttl
+ ret["name"] = _decode_name(_record.name)
+ ret["value"] = _record.resource_records[0]
+ ret["record_type"] = _record.type
+ ret["ttl"] = _record.ttl
if _record.identifier:
- ret['identifier'] = []
- ret['identifier'].append(_record.identifier)
- ret['identifier'].append(_record.weight)
+ ret["identifier"] = []
+ ret["identifier"].append(_record.identifier)
+ ret["identifier"].append(_record.weight)
return ret
def _munge_value(value, _type):
- split_types = ['A', 'MX', 'AAAA', 'TXT', 'SRV', 'SPF', 'NS']
+ split_types = ["A", "MX", "AAAA", "TXT", "SRV", "SPF", "NS"]
if _type in split_types:
- return value.split(',')
+ return value.split(",")
return value
-def add_record(name, value, zone, record_type, identifier=None, ttl=None,
- region=None, key=None, keyid=None, profile=None,
- wait_for_sync=True, split_dns=False, private_zone=False,
- retry_on_rate_limit=None, rate_limit_retries=None,
- retry_on_errors=True, error_retries=5):
- '''
+def add_record(
+ name,
+ value,
+ zone,
+ record_type,
+ identifier=None,
+ ttl=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ wait_for_sync=True,
+ split_dns=False,
+ private_zone=False,
+ retry_on_rate_limit=None,
+ rate_limit_retries=None,
+ retry_on_errors=True,
+ error_retries=5,
+):
+ """
Add a record to a zone.
CLI example::
@@ -595,9 +668,9 @@ def add_record(name, value, zone, record_type, identifier=None, ttl=None,
`rate_limit_retries` to ensure backwards compatibility,
but please migrate to using the favored `error_retries`
argument instead.
- '''
+ """
if region is None:
- region = 'universal'
+ region = "universal"
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
@@ -614,7 +687,7 @@ def add_record(name, value, zone, record_type, identifier=None, ttl=None,
else:
_zone = conn.get_zone(zone)
if not _zone:
- msg = 'Failed to retrieve zone {0}'.format(zone)
+ msg = "Failed to retrieve zone {0}".format(zone)
log.error(msg)
return False
_type = record_type.upper()
@@ -622,11 +695,13 @@ def add_record(name, value, zone, record_type, identifier=None, ttl=None,
except DNSServerError as e:
if retry_on_errors:
- if 'Throttling' == e.code:
- log.debug('Throttled by AWS API.')
- elif 'PriorRequestNotComplete' == e.code:
- log.debug('The request was rejected by AWS API.\
- Route 53 was still processing a prior request')
+ if "Throttling" == e.code:
+ log.debug("Throttled by AWS API.")
+ elif "PriorRequestNotComplete" == e.code:
+ log.debug(
+ "The request was rejected by AWS API.\
+ Route 53 was still processing a prior request"
+ )
time.sleep(3)
error_retries -= 1
continue
@@ -643,23 +718,39 @@ def add_record(name, value, zone, record_type, identifier=None, ttl=None,
except DNSServerError as e:
if retry_on_errors:
- if 'Throttling' == e.code:
- log.debug('Throttled by AWS API.')
- elif 'PriorRequestNotComplete' == e.code:
- log.debug('The request was rejected by AWS API.\
- Route 53 was still processing a prior request')
+ if "Throttling" == e.code:
+ log.debug("Throttled by AWS API.")
+ elif "PriorRequestNotComplete" == e.code:
+ log.debug(
+ "The request was rejected by AWS API.\
+ Route 53 was still processing a prior request"
+ )
time.sleep(3)
error_retries -= 1
continue
six.reraise(*sys.exc_info())
-def update_record(name, value, zone, record_type, identifier=None, ttl=None,
- region=None, key=None, keyid=None, profile=None,
- wait_for_sync=True, split_dns=False, private_zone=False,
- retry_on_rate_limit=None, rate_limit_retries=None,
- retry_on_errors=True, error_retries=5):
- '''
+def update_record(
+ name,
+ value,
+ zone,
+ record_type,
+ identifier=None,
+ ttl=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ wait_for_sync=True,
+ split_dns=False,
+ private_zone=False,
+ retry_on_rate_limit=None,
+ rate_limit_retries=None,
+ retry_on_errors=True,
+ error_retries=5,
+):
+ """
Modify a record in a zone.
CLI example::
@@ -681,9 +772,9 @@ def update_record(name, value, zone, record_type, identifier=None, ttl=None,
`rate_limit_retries` to ensure backwards compatibility,
but please migrate to using the favored `error_retries`
argument instead.
- '''
+ """
if region is None:
- region = 'universal'
+ region = "universal"
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
@@ -692,7 +783,7 @@ def update_record(name, value, zone, record_type, identifier=None, ttl=None,
else:
_zone = conn.get_zone(zone)
if not _zone:
- msg = 'Failed to retrieve zone {0}'.format(zone)
+ msg = "Failed to retrieve zone {0}".format(zone)
log.error(msg)
return False
_type = record_type.upper()
@@ -714,23 +805,38 @@ def update_record(name, value, zone, record_type, identifier=None, ttl=None,
except DNSServerError as e:
if retry_on_errors:
- if 'Throttling' == e.code:
- log.debug('Throttled by AWS API.')
- elif 'PriorRequestNotComplete' == e.code:
- log.debug('The request was rejected by AWS API.\
- Route 53 was still processing a prior request')
+ if "Throttling" == e.code:
+ log.debug("Throttled by AWS API.")
+ elif "PriorRequestNotComplete" == e.code:
+ log.debug(
+ "The request was rejected by AWS API.\
+ Route 53 was still processing a prior request"
+ )
time.sleep(3)
error_retries -= 1
continue
six.reraise(*sys.exc_info())
-def delete_record(name, zone, record_type, identifier=None, all_records=False,
- region=None, key=None, keyid=None, profile=None,
- wait_for_sync=True, split_dns=False, private_zone=False,
- retry_on_rate_limit=None, rate_limit_retries=None,
- retry_on_errors=True, error_retries=5):
- '''
+def delete_record(
+ name,
+ zone,
+ record_type,
+ identifier=None,
+ all_records=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ wait_for_sync=True,
+ split_dns=False,
+ private_zone=False,
+ retry_on_rate_limit=None,
+ rate_limit_retries=None,
+ retry_on_errors=True,
+ error_retries=5,
+):
+ """
Modify a record in a zone.
CLI example::
@@ -752,9 +858,9 @@ def delete_record(name, zone, record_type, identifier=None, all_records=False,
`rate_limit_retries` to ensure backwards compatibility,
but please migrate to using the favored `error_retries`
argument instead.
- '''
+ """
if region is None:
- region = 'universal'
+ region = "universal"
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
@@ -763,7 +869,7 @@ def delete_record(name, zone, record_type, identifier=None, all_records=False,
else:
_zone = conn.get_zone(zone)
if not _zone:
- msg = 'Failed to retrieve zone {0}'.format(zone)
+ msg = "Failed to retrieve zone {0}".format(zone)
log.error(msg)
return False
_type = record_type.upper()
@@ -776,7 +882,9 @@ def delete_record(name, zone, record_type, identifier=None, all_records=False,
while error_retries > 0:
try:
- old_record = _zone.find_records(name, _type, all=all_records, identifier=identifier)
+ old_record = _zone.find_records(
+ name, _type, all=all_records, identifier=identifier
+ )
if not old_record:
return False
status = _zone.delete_record(old_record)
@@ -784,11 +892,13 @@ def delete_record(name, zone, record_type, identifier=None, all_records=False,
except DNSServerError as e:
if retry_on_errors:
- if 'Throttling' == e.code:
- log.debug('Throttled by AWS API.')
- elif 'PriorRequestNotComplete' == e.code:
- log.debug('The request was rejected by AWS API.\
- Route 53 was still processing a prior request')
+ if "Throttling" == e.code:
+ log.debug("Throttled by AWS API.")
+ elif "PriorRequestNotComplete" == e.code:
+ log.debug(
+ "The request was rejected by AWS API.\
+ Route 53 was still processing a prior request"
+ )
time.sleep(3)
error_retries -= 1
continue
@@ -802,16 +912,17 @@ def _try_func(conn, func, **args):
return getattr(conn, func)(**args)
except AttributeError as e:
# Don't include **args in log messages - security concern.
- log.error('Function `%s()` not found for AWS connection object %s',
- func, conn)
+ log.error(
+ "Function `%s()` not found for AWS connection object %s", func, conn
+ )
return None
except DNSServerError as e:
- if tries and e.code == 'Throttling':
- log.debug('Throttled by AWS API. Will retry in 5 seconds')
+ if tries and e.code == "Throttling":
+ log.debug("Throttled by AWS API. Will retry in 5 seconds")
time.sleep(5)
tries -= 1
continue
- log.error('Failed calling %s(): %s', func, e)
+ log.error("Failed calling %s(): %s", func, e)
return None
@@ -822,28 +933,39 @@ def _wait_for_sync(status, conn, wait=True):
if not wait:
return True
orig_wait = wait
- log.info('Waiting up to %s seconds for Route53 changes to synchronize', orig_wait)
+ log.info("Waiting up to %s seconds for Route53 changes to synchronize", orig_wait)
while wait > 0:
change = conn.get_change(status)
current = change.GetChangeResponse.ChangeInfo.Status
- if current == 'INSYNC':
+ if current == "INSYNC":
return True
sleep = wait if wait % 60 == wait else 60
log.info(
- 'Sleeping %s seconds waiting for changes to synch (current status %s)',
- sleep, current
+ "Sleeping %s seconds waiting for changes to synch (current status %s)",
+ sleep,
+ current,
)
time.sleep(sleep)
wait -= sleep
continue
- log.error('Route53 changes not synced after %s seconds.', orig_wait)
+ log.error("Route53 changes not synced after %s seconds.", orig_wait)
return False
-def create_hosted_zone(domain_name, caller_ref=None, comment='', private_zone=False, vpc_id=None,
- vpc_name=None, vpc_region=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def create_hosted_zone(
+ domain_name,
+ caller_ref=None,
+ comment="",
+ private_zone=False,
+ vpc_id=None,
+ vpc_name=None,
+ vpc_region=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create a new Route53 Hosted Zone. Returns a Python data structure with information about the
newly created Hosted Zone.
@@ -898,63 +1020,78 @@ def create_hosted_zone(domain_name, caller_ref=None, comment='', private_zone=Fa
CLI Example::
salt myminion boto_route53.create_hosted_zone example.org
- '''
+ """
if region is None:
- region = 'universal'
+ region = "universal"
- if not domain_name.endswith('.'):
- raise SaltInvocationError('Domain MUST be fully-qualified, complete '
- 'with ending period.')
+ if not domain_name.endswith("."):
+ raise SaltInvocationError(
+ "Domain MUST be fully-qualified, complete " "with ending period."
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
deets = conn.get_hosted_zone_by_name(domain_name)
if deets:
- log.info('Route53 hosted zone %s already exists', domain_name)
+ log.info("Route53 hosted zone %s already exists", domain_name)
return None
- args = {'domain_name': domain_name,
- 'caller_ref': caller_ref,
- 'comment': comment,
- 'private_zone': private_zone}
+ args = {
+ "domain_name": domain_name,
+ "caller_ref": caller_ref,
+ "comment": comment,
+ "private_zone": private_zone,
+ }
if private_zone:
if not _exactly_one((vpc_name, vpc_id)):
- raise SaltInvocationError('Either vpc_name or vpc_id is required '
- 'when creating a private zone.')
- vpcs = __salt__['boto_vpc.describe_vpcs'](
- vpc_id=vpc_id, name=vpc_name, region=region, key=key,
- keyid=keyid, profile=profile).get('vpcs', [])
+ raise SaltInvocationError(
+ "Either vpc_name or vpc_id is required " "when creating a private zone."
+ )
+ vpcs = __salt__["boto_vpc.describe_vpcs"](
+ vpc_id=vpc_id,
+ name=vpc_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ).get("vpcs", [])
if vpc_region and vpcs:
- vpcs = [v for v in vpcs if v['region'] == vpc_region]
+ vpcs = [v for v in vpcs if v["region"] == vpc_region]
if not vpcs:
- log.error('Private zone requested but a VPC matching given criteria'
- ' not found.')
+ log.error(
+ "Private zone requested but a VPC matching given criteria" " not found."
+ )
return None
if len(vpcs) > 1:
- log.error('Private zone requested but multiple VPCs matching given '
- 'criteria found: %s.', [v['id'] for v in vpcs])
+ log.error(
+ "Private zone requested but multiple VPCs matching given "
+ "criteria found: %s.",
+ [v["id"] for v in vpcs],
+ )
return None
vpc = vpcs[0]
if vpc_name:
- vpc_id = vpc['id']
+ vpc_id = vpc["id"]
if not vpc_region:
- vpc_region = vpc['region']
- args.update({'vpc_id': vpc_id, 'vpc_region': vpc_region})
+ vpc_region = vpc["region"]
+ args.update({"vpc_id": vpc_id, "vpc_region": vpc_region})
else:
if any((vpc_id, vpc_name, vpc_region)):
- log.info('Options vpc_id, vpc_name, and vpc_region are ignored '
- 'when creating non-private zones.')
+ log.info(
+ "Options vpc_id, vpc_name, and vpc_region are ignored "
+ "when creating non-private zones."
+ )
- r = _try_func(conn, 'create_hosted_zone', **args)
+ r = _try_func(conn, "create_hosted_zone", **args)
if r is None:
- log.error('Failed to create hosted zone %s', domain_name)
+ log.error("Failed to create hosted zone %s", domain_name)
return None
- r = r.get('CreateHostedZoneResponse', {})
+ r = r.get("CreateHostedZoneResponse", {})
# Pop it since it'll be irrelevant by the time we return
- status = r.pop('ChangeInfo', {}).get('Id', '').replace('/change/', '')
+ status = r.pop("ChangeInfo", {}).get("Id", "").replace("/change/", "")
synced = _wait_for_sync(status, conn, wait=600)
if not synced:
- log.error('Hosted zone %s not synced after 600 seconds.', domain_name)
+ log.error("Hosted zone %s not synced after 600 seconds.", domain_name)
return None
return r
diff --git a/salt/modules/boto_s3.py b/salt/modules/boto_s3.py
index ef952d82966..e11e726d2c1 100644
--- a/salt/modules/boto_s3.py
+++ b/salt/modules/boto_s3.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon S3 using boto3
.. versionadded:: 2018.3.0
@@ -46,12 +46,13 @@ Connection module for Amazon S3 using boto3
region: us-east-1
:depends: boto3
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
# Import Salt libs
@@ -63,9 +64,11 @@ log = logging.getLogger(__name__)
try:
# pylint: disable=unused-import
import boto3
+
# pylint: enable=unused-import
import botocore
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@@ -73,29 +76,22 @@ except ImportError:
def __virtual__():
- '''
+ """
Only load if boto libraries exist and if boto libraries are greater than
a given version.
- '''
- return salt.utils.versions.check_boto_reqs(
- boto3_ver='1.2.1'
- )
+ """
+ return salt.utils.versions.check_boto_reqs(boto3_ver="1.2.1")
def __init__(opts): # pylint: disable=unused-argument
if HAS_BOTO:
- __utils__['boto3.assign_funcs'](__name__, 's3')
+ __utils__["boto3.assign_funcs"](__name__, "s3")
def get_object_metadata(
- name,
- extra_args=None,
- region=None,
- key=None,
- keyid=None,
- profile=None,
+ name, extra_args=None, region=None, key=None, keyid=None, profile=None,
):
- '''
+ """
Get metadata about an S3 object.
Returns None if the object does not exist.
@@ -111,37 +107,27 @@ def get_object_metadata(
key=key \\
keyid=keyid \\
profile=profile \\
- '''
- bucket, _, s3_key = name.partition('/')
+ """
+ bucket, _, s3_key = name.partition("/")
if extra_args is None:
extra_args = {}
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- metadata = conn.head_object(
- Bucket=bucket,
- Key=s3_key,
- **extra_args
- )
+ metadata = conn.head_object(Bucket=bucket, Key=s3_key, **extra_args)
except botocore.exceptions.ClientError as e:
- if e.response['Error']['Message'] == 'Not Found':
- return {'result': None}
- return {'error': __utils__['boto3.get_error'](e)}
+ if e.response["Error"]["Message"] == "Not Found":
+ return {"result": None}
+ return {"error": __utils__["boto3.get_error"](e)}
- return {'result': metadata}
+ return {"result": metadata}
def upload_file(
- source,
- name,
- extra_args=None,
- region=None,
- key=None,
- keyid=None,
- profile=None,
+ source, name, extra_args=None, region=None, key=None, keyid=None, profile=None,
):
- '''
+ """
Upload a local file as an S3 object.
CLI Example:
@@ -155,15 +141,15 @@ def upload_file(
key=key \\
keyid=keyid \\
profile=profile \\
- '''
- bucket, _, s3_key = name.partition('/')
+ """
+ bucket, _, s3_key = name.partition("/")
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.upload_file(source, bucket, s3_key, ExtraArgs=extra_args)
except boto3.exceptions.S3UploadFailedError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
- log.info('S3 object uploaded to %s', name)
- return {'result': True}
+ log.info("S3 object uploaded to %s", name)
+ return {"result": True}
diff --git a/salt/modules/boto_s3_bucket.py b/salt/modules/boto_s3_bucket.py
index 93311525fc7..7136ff99fed 100644
--- a/salt/modules/boto_s3_bucket.py
+++ b/salt/modules/boto_s3_bucket.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon S3 Buckets
.. versionadded:: 2016.3.0
@@ -45,7 +45,7 @@ The dependencies listed above can be installed via package or pip.
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
# disable complaints about perfectly valid non-assignment code
@@ -53,16 +53,18 @@ The dependencies listed above can be installed via package or pip.
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
-# Import Salt libs
-from salt.ext import six
-from salt.ext.six.moves import range # pylint: disable=import-error
import salt.utils.compat
import salt.utils.json
import salt.utils.versions
from salt.exceptions import SaltInvocationError
+# Import Salt libs
+from salt.ext import six
+from salt.ext.six.moves import range # pylint: disable=import-error
+
log = logging.getLogger(__name__)
# Import third party libs
@@ -72,9 +74,11 @@ try:
# pylint: disable=unused-import
import boto
import boto3
+
# pylint: enable=unused-import
from botocore.exceptions import ClientError
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@@ -82,27 +86,24 @@ except ImportError:
def __virtual__():
- '''
+ """
Only load if boto libraries exist and if boto libraries are greater than
a given version.
- '''
+ """
# the boto_lambda execution module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
- return salt.utils.versions.check_boto_reqs(
- boto3_ver='1.2.1'
- )
+ return salt.utils.versions.check_boto_reqs(boto3_ver="1.2.1")
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
- __utils__['boto3.assign_funcs'](__name__, 's3')
+ __utils__["boto3.assign_funcs"](__name__, "s3")
-def exists(Bucket,
- region=None, key=None, keyid=None, profile=None):
- '''
+def exists(Bucket, region=None, key=None, keyid=None, profile=None):
+ """
Given a bucket name, check to see if the given bucket exists.
Returns True if the given bucket exists and returns False if the given
@@ -114,28 +115,34 @@ def exists(Bucket,
salt myminion boto_s3_bucket.exists mybucket
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
buckets = conn.head_bucket(Bucket=Bucket)
- return {'exists': True}
+ return {"exists": True}
except ClientError as e:
- if e.response.get('Error', {}).get('Code') == '404':
- return {'exists': False}
- err = __utils__['boto3.get_error'](e)
- return {'error': err}
+ if e.response.get("Error", {}).get("Code") == "404":
+ return {"exists": False}
+ err = __utils__["boto3.get_error"](e)
+ return {"error": err}
-def create(Bucket,
- ACL=None, LocationConstraint=None,
- GrantFullControl=None,
- GrantRead=None,
- GrantReadACP=None,
- GrantWrite=None,
- GrantWriteACP=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create(
+ Bucket,
+ ACL=None,
+ LocationConstraint=None,
+ GrantFullControl=None,
+ GrantRead=None,
+ GrantReadACP=None,
+ GrantWrite=None,
+ GrantWriteACP=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid config, create an S3 Bucket.
Returns {created: true} if the bucket was created and returns
@@ -151,35 +158,53 @@ def create(Bucket,
GrantReadACP='emailaddress="exampl@example.com",id="2345678909876432"' \\
LocationConstraint=us-west-1
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
- for arg in ('ACL', 'GrantFullControl',
- 'GrantRead', 'GrantReadACP',
- 'GrantWrite', 'GrantWriteACP'):
+ for arg in (
+ "ACL",
+ "GrantFullControl",
+ "GrantRead",
+ "GrantReadACP",
+ "GrantWrite",
+ "GrantWriteACP",
+ ):
if locals()[arg] is not None:
- kwargs[arg] = str(locals()[arg]) # future lint: disable=blacklisted-function
+ kwargs[arg] = str(
+ locals()[arg]
+ ) # future lint: disable=blacklisted-function
if LocationConstraint:
- kwargs['CreateBucketConfiguration'] = {'LocationConstraint': LocationConstraint}
- location = conn.create_bucket(Bucket=Bucket,
- **kwargs)
+ kwargs["CreateBucketConfiguration"] = {
+ "LocationConstraint": LocationConstraint
+ }
+ location = conn.create_bucket(Bucket=Bucket, **kwargs)
conn.get_waiter("bucket_exists").wait(Bucket=Bucket)
if location:
- log.info('The newly created bucket name is located at %s', location['Location'])
+ log.info(
+ "The newly created bucket name is located at %s", location["Location"]
+ )
- return {'created': True, 'name': Bucket, 'Location': location['Location']}
+ return {"created": True, "name": Bucket, "Location": location["Location"]}
else:
- log.warning('Bucket was not created')
- return {'created': False}
+ log.warning("Bucket was not created")
+ return {"created": False}
except ClientError as e:
- return {'created': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"created": False, "error": __utils__["boto3.get_error"](e)}
-def delete(Bucket, MFA=None, RequestPayer=None, Force=False,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete(
+ Bucket,
+ MFA=None,
+ RequestPayer=None,
+ Force=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a bucket name, delete it, optionally emptying it first.
Returns {deleted: true} if the bucket was deleted and returns
@@ -191,22 +216,37 @@ def delete(Bucket, MFA=None, RequestPayer=None, Force=False,
salt myminion boto_s3_bucket.delete mybucket
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if Force:
- empty(Bucket, MFA=MFA, RequestPayer=RequestPayer, region=region,
- key=key, keyid=keyid, profile=profile)
+ empty(
+ Bucket,
+ MFA=MFA,
+ RequestPayer=RequestPayer,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
conn.delete_bucket(Bucket=Bucket)
- return {'deleted': True}
+ return {"deleted": True}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def delete_objects(Bucket, Delete, MFA=None, RequestPayer=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_objects(
+ Bucket,
+ Delete,
+ MFA=None,
+ RequestPayer=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Delete objects in a given S3 bucket.
Returns {deleted: true} if all objects were deleted
@@ -218,40 +258,39 @@ def delete_objects(Bucket, Delete, MFA=None, RequestPayer=None,
salt myminion boto_s3_bucket.delete_objects mybucket '{Objects: [Key: myobject]}'
- '''
+ """
if isinstance(Delete, six.string_types):
Delete = salt.utils.json.loads(Delete)
if not isinstance(Delete, dict):
raise SaltInvocationError("Malformed Delete request.")
- if 'Objects' not in Delete:
+ if "Objects" not in Delete:
raise SaltInvocationError("Malformed Delete request.")
failed = []
- objs = Delete['Objects']
+ objs = Delete["Objects"]
for i in range(0, len(objs), 1000):
- chunk = objs[i:i+1000]
- subset = {'Objects': chunk, 'Quiet': True}
+ chunk = objs[i : i + 1000]
+ subset = {"Objects": chunk, "Quiet": True}
try:
- args = {'Bucket': Bucket}
- args.update({'MFA': MFA}) if MFA else None
- args.update({'RequestPayer': RequestPayer}) if RequestPayer else None
- args.update({'Delete': subset})
+ args = {"Bucket": Bucket}
+ args.update({"MFA": MFA}) if MFA else None
+ args.update({"RequestPayer": RequestPayer}) if RequestPayer else None
+ args.update({"Delete": subset})
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
ret = conn.delete_objects(**args)
- failed += ret.get('Errors', [])
+ failed += ret.get("Errors", [])
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
if failed:
- return {'deleted': False, 'failed': failed}
+ return {"deleted": False, "failed": failed}
else:
- return {'deleted': True}
+ return {"deleted": True}
-def describe(Bucket,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe(Bucket, region=None, key=None, keyid=None, profile=None):
+ """
Given a bucket name describe its properties.
Returns a dictionary of interesting properties.
@@ -262,62 +301,65 @@ def describe(Bucket,
salt myminion boto_s3_bucket.describe mybucket
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
result = {}
- conn_dict = {'ACL': conn.get_bucket_acl,
- 'CORS': conn.get_bucket_cors,
- 'LifecycleConfiguration': conn.get_bucket_lifecycle_configuration,
- 'Location': conn.get_bucket_location,
- 'Logging': conn.get_bucket_logging,
- 'NotificationConfiguration': conn.get_bucket_notification_configuration,
- 'Policy': conn.get_bucket_policy,
- 'Replication': conn.get_bucket_replication,
- 'RequestPayment': conn.get_bucket_request_payment,
- 'Versioning': conn.get_bucket_versioning,
- 'Website': conn.get_bucket_website}
+ conn_dict = {
+ "ACL": conn.get_bucket_acl,
+ "CORS": conn.get_bucket_cors,
+ "LifecycleConfiguration": conn.get_bucket_lifecycle_configuration,
+ "Location": conn.get_bucket_location,
+ "Logging": conn.get_bucket_logging,
+ "NotificationConfiguration": conn.get_bucket_notification_configuration,
+ "Policy": conn.get_bucket_policy,
+ "Replication": conn.get_bucket_replication,
+ "RequestPayment": conn.get_bucket_request_payment,
+ "Versioning": conn.get_bucket_versioning,
+ "Website": conn.get_bucket_website,
+ }
for key, query in six.iteritems(conn_dict):
try:
data = query(Bucket=Bucket)
except ClientError as e:
- if e.response.get('Error', {}).get('Code') in (
- 'NoSuchLifecycleConfiguration',
- 'NoSuchCORSConfiguration',
- 'NoSuchBucketPolicy',
- 'NoSuchWebsiteConfiguration',
- 'ReplicationConfigurationNotFoundError',
- 'NoSuchTagSet',
- ):
+ if e.response.get("Error", {}).get("Code") in (
+ "NoSuchLifecycleConfiguration",
+ "NoSuchCORSConfiguration",
+ "NoSuchBucketPolicy",
+ "NoSuchWebsiteConfiguration",
+ "ReplicationConfigurationNotFoundError",
+ "NoSuchTagSet",
+ ):
continue
raise
- if 'ResponseMetadata' in data:
- del data['ResponseMetadata']
+ if "ResponseMetadata" in data:
+ del data["ResponseMetadata"]
result[key] = data
tags = {}
try:
data = conn.get_bucket_tagging(Bucket=Bucket)
- for tagdef in data.get('TagSet'):
- tags[tagdef.get('Key')] = tagdef.get('Value')
+ for tagdef in data.get("TagSet"):
+ tags[tagdef.get("Key")] = tagdef.get("Value")
except ClientError as e:
- if not e.response.get('Error', {}).get('Code') == 'NoSuchTagSet':
+ if not e.response.get("Error", {}).get("Code") == "NoSuchTagSet":
raise
if tags:
- result['Tagging'] = tags
- return {'bucket': result}
+ result["Tagging"] = tags
+ return {"bucket": result}
except ClientError as e:
- err = __utils__['boto3.get_error'](e)
- if e.response.get('Error', {}).get('Code') == 'NoSuchBucket':
- return {'bucket': None}
- return {'error': __utils__['boto3.get_error'](e)}
+ err = __utils__["boto3.get_error"](e)
+ if e.response.get("Error", {}).get("Code") == "NoSuchBucket":
+ return {"bucket": None}
+ return {"error": __utils__["boto3.get_error"](e)}
-def empty(Bucket, MFA=None, RequestPayer=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def empty(
+ Bucket, MFA=None, RequestPayer=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Delete all objects in a given S3 bucket.
Returns {deleted: true} if all objects were deleted
@@ -329,24 +371,39 @@ def empty(Bucket, MFA=None, RequestPayer=None, region=None, key=None,
salt myminion boto_s3_bucket.empty mybucket
- '''
+ """
- stuff = list_object_versions(Bucket, region=region, key=key, keyid=keyid,
- profile=profile)
+ stuff = list_object_versions(
+ Bucket, region=region, key=key, keyid=keyid, profile=profile
+ )
Delete = {}
- Delete['Objects'] = [{'Key': v['Key'], 'VersionId': v['VersionId']} for v in stuff.get('Versions', [])]
- Delete['Objects'] += [{'Key': v['Key'], 'VersionId': v['VersionId']} for v in stuff.get('DeleteMarkers', [])]
- if Delete['Objects']:
- ret = delete_objects(Bucket, Delete, MFA=MFA, RequestPayer=RequestPayer,
- region=region, key=key, keyid=keyid, profile=profile)
- failed = ret.get('failed', [])
+ Delete["Objects"] = [
+ {"Key": v["Key"], "VersionId": v["VersionId"]}
+ for v in stuff.get("Versions", [])
+ ]
+ Delete["Objects"] += [
+ {"Key": v["Key"], "VersionId": v["VersionId"]}
+ for v in stuff.get("DeleteMarkers", [])
+ ]
+ if Delete["Objects"]:
+ ret = delete_objects(
+ Bucket,
+ Delete,
+ MFA=MFA,
+ RequestPayer=RequestPayer,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ failed = ret.get("failed", [])
if failed:
- return {'deleted': False, 'failed': ret[failed]}
- return {'deleted': True}
+ return {"deleted": False, "failed": ret[failed]}
+ return {"deleted": True}
def list(region=None, key=None, keyid=None, profile=None):
- '''
+ """
List all buckets owned by the authenticated sender of the request.
Returns list of buckets
@@ -360,22 +417,30 @@ def list(region=None, key=None, keyid=None, profile=None):
- {...}
- {...}
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
buckets = conn.list_buckets()
- if not bool(buckets.get('Buckets')):
- log.warning('No buckets found')
- if 'ResponseMetadata' in buckets:
- del buckets['ResponseMetadata']
+ if not bool(buckets.get("Buckets")):
+ log.warning("No buckets found")
+ if "ResponseMetadata" in buckets:
+ del buckets["ResponseMetadata"]
return buckets
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def list_object_versions(Bucket, Delimiter=None, EncodingType=None, Prefix=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def list_object_versions(
+ Bucket,
+ Delimiter=None,
+ EncodingType=None,
+ Prefix=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
List objects in a given S3 bucket.
Returns a list of objects.
@@ -386,34 +451,43 @@ def list_object_versions(Bucket, Delimiter=None, EncodingType=None, Prefix=None,
salt myminion boto_s3_bucket.list_object_versions mybucket
- '''
+ """
try:
Versions = []
DeleteMarkers = []
- args = {'Bucket': Bucket}
- args.update({'Delimiter': Delimiter}) if Delimiter else None
- args.update({'EncodingType': EncodingType}) if Delimiter else None
- args.update({'Prefix': Prefix}) if Prefix else None
+ args = {"Bucket": Bucket}
+ args.update({"Delimiter": Delimiter}) if Delimiter else None
+ args.update({"EncodingType": EncodingType}) if Delimiter else None
+ args.update({"Prefix": Prefix}) if Prefix else None
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
IsTruncated = True
while IsTruncated:
ret = conn.list_object_versions(**args)
- IsTruncated = ret.get('IsTruncated', False)
- if IsTruncated in ('True', 'true', True):
- args['KeyMarker'] = ret['NextKeyMarker']
- args['VersionIdMarker'] = ret['NextVersionIdMarker']
- Versions += ret.get('Versions', [])
- DeleteMarkers += ret.get('DeleteMarkers', [])
- return {'Versions': Versions, 'DeleteMarkers': DeleteMarkers}
+ IsTruncated = ret.get("IsTruncated", False)
+ if IsTruncated in ("True", "true", True):
+ args["KeyMarker"] = ret["NextKeyMarker"]
+ args["VersionIdMarker"] = ret["NextVersionIdMarker"]
+ Versions += ret.get("Versions", [])
+ DeleteMarkers += ret.get("DeleteMarkers", [])
+ return {"Versions": Versions, "DeleteMarkers": DeleteMarkers}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def list_objects(Bucket, Delimiter=None, EncodingType=None, Prefix=None,
- FetchOwner=False, StartAfter=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def list_objects(
+ Bucket,
+ Delimiter=None,
+ EncodingType=None,
+ Prefix=None,
+ FetchOwner=False,
+ StartAfter=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
List objects in a given S3 bucket.
Returns a list of objects.
@@ -424,38 +498,43 @@ def list_objects(Bucket, Delimiter=None, EncodingType=None, Prefix=None,
salt myminion boto_s3_bucket.list_objects mybucket
- '''
+ """
try:
Contents = []
- args = {'Bucket': Bucket, 'FetchOwner': FetchOwner}
- args.update({'Delimiter': Delimiter}) if Delimiter else None
- args.update({'EncodingType': EncodingType}) if Delimiter else None
- args.update({'Prefix': Prefix}) if Prefix else None
- args.update({'StartAfter': StartAfter}) if StartAfter else None
+ args = {"Bucket": Bucket, "FetchOwner": FetchOwner}
+ args.update({"Delimiter": Delimiter}) if Delimiter else None
+ args.update({"EncodingType": EncodingType}) if Delimiter else None
+ args.update({"Prefix": Prefix}) if Prefix else None
+ args.update({"StartAfter": StartAfter}) if StartAfter else None
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
IsTruncated = True
while IsTruncated:
ret = conn.list_objects_v2(**args)
- IsTruncated = ret.get('IsTruncated', False)
- if IsTruncated in ('True', 'true', True):
- args['ContinuationToken'] = ret['NextContinuationToken']
- Contents += ret.get('Contents', [])
- return {'Contents': Contents}
+ IsTruncated = ret.get("IsTruncated", False)
+ if IsTruncated in ("True", "true", True):
+ args["ContinuationToken"] = ret["NextContinuationToken"]
+ Contents += ret.get("Contents", [])
+ return {"Contents": Contents}
except ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
-def put_acl(Bucket,
- ACL=None,
- AccessControlPolicy=None,
- GrantFullControl=None,
- GrantRead=None,
- GrantReadACP=None,
- GrantWrite=None,
- GrantWriteACP=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def put_acl(
+ Bucket,
+ ACL=None,
+ AccessControlPolicy=None,
+ GrantFullControl=None,
+ GrantRead=None,
+ GrantReadACP=None,
+ GrantWrite=None,
+ GrantWriteACP=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid config, update the ACL for a bucket.
Returns {updated: true} if the ACL was updated and returns
@@ -470,7 +549,7 @@ def put_acl(Bucket,
GrantRead='uri="http://acs.amazonaws.com/groups/global/AllUsers"' \\
GrantReadACP='emailaddress="exampl@example.com",id="2345678909876432"'
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
@@ -478,23 +557,27 @@ def put_acl(Bucket,
if AccessControlPolicy is not None:
if isinstance(AccessControlPolicy, six.string_types):
AccessControlPolicy = salt.utils.json.loads(AccessControlPolicy)
- kwargs['AccessControlPolicy'] = AccessControlPolicy
- for arg in ('ACL',
- 'GrantFullControl',
- 'GrantRead', 'GrantReadACP',
- 'GrantWrite', 'GrantWriteACP'):
+ kwargs["AccessControlPolicy"] = AccessControlPolicy
+ for arg in (
+ "ACL",
+ "GrantFullControl",
+ "GrantRead",
+ "GrantReadACP",
+ "GrantWrite",
+ "GrantWriteACP",
+ ):
if locals()[arg] is not None:
- kwargs[arg] = str(locals()[arg]) # future lint: disable=blacklisted-function
+ kwargs[arg] = str(
+ locals()[arg]
+ ) # future lint: disable=blacklisted-function
conn.put_bucket_acl(Bucket=Bucket, **kwargs)
- return {'updated': True, 'name': Bucket}
+ return {"updated": True, "name": Bucket}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def put_cors(Bucket,
- CORSRules,
- region=None, key=None, keyid=None, profile=None):
- '''
+def put_cors(Bucket, CORSRules, region=None, key=None, keyid=None, profile=None):
+ """
Given a valid config, update the CORS rules for a bucket.
Returns {updated: true} if CORS was updated and returns
@@ -512,22 +595,22 @@ def put_cors(Bucket,
"MaxAgeSeconds":123,\\
}]'
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if CORSRules is not None and isinstance(CORSRules, six.string_types):
CORSRules = salt.utils.json.loads(CORSRules)
- conn.put_bucket_cors(Bucket=Bucket, CORSConfiguration={'CORSRules': CORSRules})
- return {'updated': True, 'name': Bucket}
+ conn.put_bucket_cors(Bucket=Bucket, CORSConfiguration={"CORSRules": CORSRules})
+ return {"updated": True, "name": Bucket}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def put_lifecycle_configuration(Bucket,
- Rules,
- region=None, key=None, keyid=None, profile=None):
- '''
+def put_lifecycle_configuration(
+ Bucket, Rules, region=None, key=None, keyid=None, profile=None
+):
+ """
Given a valid config, update the Lifecycle rules for a bucket.
Returns {updated: true} if Lifecycle was updated and returns
@@ -547,22 +630,31 @@ def put_lifecycle_configuration(Bucket,
"NoncurrentVersionExpiration": {...},\\
}]'
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if Rules is not None and isinstance(Rules, six.string_types):
Rules = salt.utils.json.loads(Rules)
- conn.put_bucket_lifecycle_configuration(Bucket=Bucket, LifecycleConfiguration={'Rules': Rules})
- return {'updated': True, 'name': Bucket}
+ conn.put_bucket_lifecycle_configuration(
+ Bucket=Bucket, LifecycleConfiguration={"Rules": Rules}
+ )
+ return {"updated": True, "name": Bucket}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def put_logging(Bucket,
- TargetBucket=None, TargetPrefix=None, TargetGrants=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def put_logging(
+ Bucket,
+ TargetBucket=None,
+ TargetPrefix=None,
+ TargetGrants=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid config, update the logging parameters for a bucket.
Returns {updated: true} if parameters were updated and returns
@@ -574,34 +666,42 @@ def put_logging(Bucket,
salt myminion boto_s3_bucket.put_logging my_bucket log_bucket '[{...}]' prefix
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
logstate = {}
- targets = {'TargetBucket': TargetBucket,
- 'TargetGrants': TargetGrants,
- 'TargetPrefix': TargetPrefix}
+ targets = {
+ "TargetBucket": TargetBucket,
+ "TargetGrants": TargetGrants,
+ "TargetPrefix": TargetPrefix,
+ }
for key, val in six.iteritems(targets):
if val is not None:
logstate[key] = val
if logstate:
- logstatus = {'LoggingEnabled': logstate}
+ logstatus = {"LoggingEnabled": logstate}
else:
logstatus = {}
if TargetGrants is not None and isinstance(TargetGrants, six.string_types):
TargetGrants = salt.utils.json.loads(TargetGrants)
conn.put_bucket_logging(Bucket=Bucket, BucketLoggingStatus=logstatus)
- return {'updated': True, 'name': Bucket}
+ return {"updated": True, "name": Bucket}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def put_notification_configuration(Bucket,
- TopicConfigurations=None, QueueConfigurations=None,
- LambdaFunctionConfigurations=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def put_notification_configuration(
+ Bucket,
+ TopicConfigurations=None,
+ QueueConfigurations=None,
+ LambdaFunctionConfigurations=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid config, update the notification parameters for a bucket.
Returns {updated: true} if parameters were updated and returns
@@ -616,7 +716,7 @@ def put_notification_configuration(Bucket,
[{...}] \\
[{...}]
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
@@ -631,21 +731,25 @@ def put_notification_configuration(Bucket,
if LambdaFunctionConfigurations is None:
LambdaFunctionConfigurations = []
elif isinstance(LambdaFunctionConfigurations, six.string_types):
- LambdaFunctionConfigurations = salt.utils.json.loads(LambdaFunctionConfigurations)
+ LambdaFunctionConfigurations = salt.utils.json.loads(
+ LambdaFunctionConfigurations
+ )
# TODO allow the user to use simple names & substitute ARNs for those names
- conn.put_bucket_notification_configuration(Bucket=Bucket, NotificationConfiguration={
- 'TopicConfigurations': TopicConfigurations,
- 'QueueConfigurations': QueueConfigurations,
- 'LambdaFunctionConfigurations': LambdaFunctionConfigurations,
- })
- return {'updated': True, 'name': Bucket}
+ conn.put_bucket_notification_configuration(
+ Bucket=Bucket,
+ NotificationConfiguration={
+ "TopicConfigurations": TopicConfigurations,
+ "QueueConfigurations": QueueConfigurations,
+ "LambdaFunctionConfigurations": LambdaFunctionConfigurations,
+ },
+ )
+ return {"updated": True, "name": Bucket}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def put_policy(Bucket, Policy,
- region=None, key=None, keyid=None, profile=None):
- '''
+def put_policy(Bucket, Policy, region=None, key=None, keyid=None, profile=None):
+ """
Given a valid config, update the policy for a bucket.
Returns {updated: true} if policy was updated and returns
@@ -657,37 +761,38 @@ def put_policy(Bucket, Policy,
salt myminion boto_s3_bucket.put_policy my_bucket {...}
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if Policy is None:
- Policy = '{}'
+ Policy = "{}"
elif not isinstance(Policy, six.string_types):
Policy = salt.utils.json.dumps(Policy)
conn.put_bucket_policy(Bucket=Bucket, Policy=Policy)
- return {'updated': True, 'name': Bucket}
+ return {"updated": True, "name": Bucket}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
def _get_role_arn(name, region=None, key=None, keyid=None, profile=None):
- if name.startswith('arn:aws:iam:'):
+ if name.startswith("arn:aws:iam:"):
return name
- account_id = __salt__['boto_iam.get_account_id'](
+ account_id = __salt__["boto_iam.get_account_id"](
region=region, key=key, keyid=keyid, profile=profile
)
- if profile and 'region' in profile:
- region = profile['region']
+ if profile and "region" in profile:
+ region = profile["region"]
if region is None:
- region = 'us-east-1'
- return 'arn:aws:iam::{0}:role/{1}'.format(account_id, name)
+ region = "us-east-1"
+ return "arn:aws:iam::{0}:role/{1}".format(account_id, name)
-def put_replication(Bucket, Role, Rules,
- region=None, key=None, keyid=None, profile=None):
- '''
+def put_replication(
+ Bucket, Role, Rules, region=None, key=None, keyid=None, profile=None
+):
+ """
Given a valid config, update the replication configuration for a bucket.
Returns {updated: true} if replication configuration was updated and returns
@@ -699,28 +804,27 @@ def put_replication(Bucket, Role, Rules,
salt myminion boto_s3_bucket.put_replication my_bucket my_role [...]
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- Role = _get_role_arn(name=Role,
- region=region, key=key, keyid=keyid, profile=profile)
+ Role = _get_role_arn(
+ name=Role, region=region, key=key, keyid=keyid, profile=profile
+ )
if Rules is None:
Rules = []
elif isinstance(Rules, six.string_types):
Rules = salt.utils.json.loads(Rules)
- conn.put_bucket_replication(Bucket=Bucket, ReplicationConfiguration={
- 'Role': Role,
- 'Rules': Rules
- })
- return {'updated': True, 'name': Bucket}
+ conn.put_bucket_replication(
+ Bucket=Bucket, ReplicationConfiguration={"Role": Role, "Rules": Rules}
+ )
+ return {"updated": True, "name": Bucket}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def put_request_payment(Bucket, Payer,
- region=None, key=None, keyid=None, profile=None):
- '''
+def put_request_payment(Bucket, Payer, region=None, key=None, keyid=None, profile=None):
+ """
Given a valid config, update the request payment configuration for a bucket.
Returns {updated: true} if request payment configuration was updated and returns
@@ -732,21 +836,20 @@ def put_request_payment(Bucket, Payer,
salt myminion boto_s3_bucket.put_request_payment my_bucket Requester
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- conn.put_bucket_request_payment(Bucket=Bucket, RequestPaymentConfiguration={
- 'Payer': Payer,
- })
- return {'updated': True, 'name': Bucket}
+ conn.put_bucket_request_payment(
+ Bucket=Bucket, RequestPaymentConfiguration={"Payer": Payer}
+ )
+ return {"updated": True, "name": Bucket}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def put_tagging(Bucket,
- region=None, key=None, keyid=None, profile=None, **kwargs):
- '''
+def put_tagging(Bucket, region=None, key=None, keyid=None, profile=None, **kwargs):
+ """
Given a valid config, update the tags for a bucket.
Returns {updated: true} if tags were updated and returns
@@ -758,26 +861,32 @@ def put_tagging(Bucket,
salt myminion boto_s3_bucket.put_tagging my_bucket my_role [...]
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tagslist = []
for k, v in six.iteritems(kwargs):
- if six.text_type(k).startswith('__'):
+ if six.text_type(k).startswith("__"):
continue
- tagslist.append({'Key': six.text_type(k), 'Value': six.text_type(v)})
- conn.put_bucket_tagging(Bucket=Bucket, Tagging={
- 'TagSet': tagslist,
- })
- return {'updated': True, 'name': Bucket}
+ tagslist.append({"Key": six.text_type(k), "Value": six.text_type(v)})
+ conn.put_bucket_tagging(Bucket=Bucket, Tagging={"TagSet": tagslist})
+ return {"updated": True, "name": Bucket}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def put_versioning(Bucket, Status, MFADelete=None, MFA=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def put_versioning(
+ Bucket,
+ Status,
+ MFADelete=None,
+ MFA=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid config, update the versioning configuration for a bucket.
Returns {updated: true} if versioning configuration was updated and returns
@@ -789,28 +898,36 @@ def put_versioning(Bucket, Status, MFADelete=None, MFA=None,
salt myminion boto_s3_bucket.put_versioning my_bucket Enabled
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- VersioningConfiguration = {'Status': Status}
+ VersioningConfiguration = {"Status": Status}
if MFADelete is not None:
- VersioningConfiguration['MFADelete'] = MFADelete
+ VersioningConfiguration["MFADelete"] = MFADelete
kwargs = {}
if MFA is not None:
- kwargs['MFA'] = MFA
- conn.put_bucket_versioning(Bucket=Bucket,
- VersioningConfiguration=VersioningConfiguration,
- **kwargs)
- return {'updated': True, 'name': Bucket}
+ kwargs["MFA"] = MFA
+ conn.put_bucket_versioning(
+ Bucket=Bucket, VersioningConfiguration=VersioningConfiguration, **kwargs
+ )
+ return {"updated": True, "name": Bucket}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def put_website(Bucket, ErrorDocument=None, IndexDocument=None,
- RedirectAllRequestsTo=None, RoutingRules=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def put_website(
+ Bucket,
+ ErrorDocument=None,
+ IndexDocument=None,
+ RedirectAllRequestsTo=None,
+ RoutingRules=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid config, update the website configuration for a bucket.
Returns {updated: true} if website configuration was updated and returns
@@ -822,29 +939,33 @@ def put_website(Bucket, ErrorDocument=None, IndexDocument=None,
salt myminion boto_s3_bucket.put_website my_bucket IndexDocument='{"Suffix":"index.html"}'
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
WebsiteConfiguration = {}
- for key in ('ErrorDocument', 'IndexDocument',
- 'RedirectAllRequestsTo', 'RoutingRules'):
+ for key in (
+ "ErrorDocument",
+ "IndexDocument",
+ "RedirectAllRequestsTo",
+ "RoutingRules",
+ ):
val = locals()[key]
if val is not None:
if isinstance(val, six.string_types):
WebsiteConfiguration[key] = salt.utils.json.loads(val)
else:
WebsiteConfiguration[key] = val
- conn.put_bucket_website(Bucket=Bucket,
- WebsiteConfiguration=WebsiteConfiguration)
- return {'updated': True, 'name': Bucket}
+ conn.put_bucket_website(
+ Bucket=Bucket, WebsiteConfiguration=WebsiteConfiguration
+ )
+ return {"updated": True, "name": Bucket}
except ClientError as e:
- return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"updated": False, "error": __utils__["boto3.get_error"](e)}
-def delete_cors(Bucket,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_cors(Bucket, region=None, key=None, keyid=None, profile=None):
+ """
Delete the CORS configuration for the given bucket
Returns {deleted: true} if CORS was deleted and returns
@@ -856,19 +977,20 @@ def delete_cors(Bucket,
salt myminion boto_s3_bucket.delete_cors my_bucket
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_bucket_cors(Bucket=Bucket)
- return {'deleted': True, 'name': Bucket}
+ return {"deleted": True, "name": Bucket}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def delete_lifecycle_configuration(Bucket,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_lifecycle_configuration(
+ Bucket, region=None, key=None, keyid=None, profile=None
+):
+ """
Delete the lifecycle configuration for the given bucket
Returns {deleted: true} if Lifecycle was deleted and returns
@@ -880,19 +1002,18 @@ def delete_lifecycle_configuration(Bucket,
salt myminion boto_s3_bucket.delete_lifecycle_configuration my_bucket
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_bucket_lifecycle(Bucket=Bucket)
- return {'deleted': True, 'name': Bucket}
+ return {"deleted": True, "name": Bucket}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def delete_policy(Bucket,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_policy(Bucket, region=None, key=None, keyid=None, profile=None):
+ """
Delete the policy from the given bucket
Returns {deleted: true} if policy was deleted and returns
@@ -904,19 +1025,18 @@ def delete_policy(Bucket,
salt myminion boto_s3_bucket.delete_policy my_bucket
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_bucket_policy(Bucket=Bucket)
- return {'deleted': True, 'name': Bucket}
+ return {"deleted": True, "name": Bucket}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def delete_replication(Bucket,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_replication(Bucket, region=None, key=None, keyid=None, profile=None):
+ """
Delete the replication config from the given bucket
Returns {deleted: true} if replication configuration was deleted and returns
@@ -928,19 +1048,18 @@ def delete_replication(Bucket,
salt myminion boto_s3_bucket.delete_replication my_bucket
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_bucket_replication(Bucket=Bucket)
- return {'deleted': True, 'name': Bucket}
+ return {"deleted": True, "name": Bucket}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def delete_tagging(Bucket,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_tagging(Bucket, region=None, key=None, keyid=None, profile=None):
+ """
Delete the tags from the given bucket
Returns {deleted: true} if tags were deleted and returns
@@ -952,19 +1071,18 @@ def delete_tagging(Bucket,
salt myminion boto_s3_bucket.delete_tagging my_bucket
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_bucket_tagging(Bucket=Bucket)
- return {'deleted': True, 'name': Bucket}
+ return {"deleted": True, "name": Bucket}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
-def delete_website(Bucket,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_website(Bucket, region=None, key=None, keyid=None, profile=None):
+ """
Remove the website configuration from the given bucket
Returns {deleted: true} if website configuration was deleted and returns
@@ -976,11 +1094,11 @@ def delete_website(Bucket,
salt myminion boto_s3_bucket.delete_website my_bucket
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_bucket_website(Bucket=Bucket)
- return {'deleted': True, 'name': Bucket}
+ return {"deleted": True, "name": Bucket}
except ClientError as e:
- return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto3.get_error"](e)}
diff --git a/salt/modules/boto_secgroup.py b/salt/modules/boto_secgroup.py
index 16f6bb79cbe..cff5a265be7 100644
--- a/salt/modules/boto_secgroup.py
+++ b/salt/modules/boto_secgroup.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon Security Groups
.. versionadded:: 2014.7.0
@@ -40,127 +40,166 @@ Connection module for Amazon Security Groups
region: us-east-1
:depends: boto
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
-# Import Salt libs
-from salt.exceptions import CommandExecutionError, SaltInvocationError
import salt.utils.odict as odict
import salt.utils.versions
-log = logging.getLogger(__name__)
+# Import Salt libs
+from salt.exceptions import CommandExecutionError, SaltInvocationError
# Import third party libs
from salt.ext import six
+
+log = logging.getLogger(__name__)
+
+
try:
# pylint: disable=unused-import
import boto
import boto.ec2
+
# pylint: enable=unused-import
- logging.getLogger('boto').setLevel(logging.CRITICAL)
+ logging.getLogger("boto").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def __virtual__():
- '''
+ """
Only load if boto libraries exist and if boto libraries are greater than
a given version.
- '''
+ """
# Boto < 2.4.0 GroupOrCIDR objects have different attributes than
# Boto >= 2.4.0 GroupOrCIDR objects
# Differences include no group_id attribute in Boto < 2.4.0 and returning
# a groupId attribute when a GroupOrCIDR object authorizes an IP range
# Support for Boto < 2.4.0 can be added if needed
has_boto_reqs = salt.utils.versions.check_boto_reqs(
- boto_ver='2.4.0',
- check_boto3=False
+ boto_ver="2.4.0", check_boto3=False
)
if has_boto_reqs is True:
- __utils__['boto.assign_funcs'](__name__, 'ec2', pack=__salt__)
+ __utils__["boto.assign_funcs"](__name__, "ec2", pack=__salt__)
return has_boto_reqs
-def exists(name=None, region=None, key=None, keyid=None, profile=None,
- vpc_id=None, vpc_name=None, group_id=None):
- '''
+def exists(
+ name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ vpc_id=None,
+ vpc_name=None,
+ group_id=None,
+):
+ """
Check to see if a security group exists.
CLI example::
salt myminion boto_secgroup.exists mysecgroup
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- group = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
- group_id=group_id, region=region, key=key, keyid=keyid,
- profile=profile)
+ group = _get_group(
+ conn,
+ name=name,
+ vpc_id=vpc_id,
+ vpc_name=vpc_name,
+ group_id=group_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if group:
return True
else:
return False
-def _vpc_name_to_id(vpc_id=None, vpc_name=None, region=None, key=None, keyid=None,
- profile=None):
- data = __salt__['boto_vpc.get_id'](name=vpc_name, region=region,
- key=key, keyid=keyid, profile=profile)
- return data.get('id')
+def _vpc_name_to_id(
+ vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None
+):
+ data = __salt__["boto_vpc.get_id"](
+ name=vpc_name, region=region, key=key, keyid=keyid, profile=profile
+ )
+ return data.get("id")
def _split_rules(rules):
- '''
+ """
Split rules with combined grants into individual rules.
Amazon returns a set of rules with the same protocol, from and to ports
together as a single rule with a set of grants. Authorizing and revoking
rules, however, is done as a split set of rules. This function splits the
rules up.
- '''
+ """
split = []
for rule in rules:
- ip_protocol = rule.get('ip_protocol')
- to_port = rule.get('to_port')
- from_port = rule.get('from_port')
- grants = rule.get('grants')
+ ip_protocol = rule.get("ip_protocol")
+ to_port = rule.get("to_port")
+ from_port = rule.get("from_port")
+ grants = rule.get("grants")
for grant in grants:
- _rule = {'ip_protocol': ip_protocol,
- 'to_port': to_port,
- 'from_port': from_port}
+ _rule = {
+ "ip_protocol": ip_protocol,
+ "to_port": to_port,
+ "from_port": from_port,
+ }
for key, val in six.iteritems(grant):
_rule[key] = val
split.append(_rule)
return split
-def _get_group(conn=None, name=None, vpc_id=None, vpc_name=None, group_id=None,
- region=None, key=None, keyid=None, profile=None): # pylint: disable=W0613
- '''
+def _get_group(
+ conn=None,
+ name=None,
+ vpc_id=None,
+ vpc_name=None,
+ group_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+): # pylint: disable=W0613
+ """
Get a group object given a name, name and vpc_id/vpc_name or group_id. Return
a boto.ec2.securitygroup.SecurityGroup object if the group is found, else
return None.
- '''
+ """
if vpc_name and vpc_id:
- raise SaltInvocationError('The params \'vpc_id\' and \'vpc_name\' '
- 'are mutually exclusive.')
+ raise SaltInvocationError(
+ "The params 'vpc_id' and 'vpc_name' " "are mutually exclusive."
+ )
if vpc_name:
try:
- vpc_id = _vpc_name_to_id(vpc_id=vpc_id, vpc_name=vpc_name, region=region,
- key=key, keyid=keyid, profile=profile)
+ vpc_id = _vpc_name_to_id(
+ vpc_id=vpc_id,
+ vpc_name=vpc_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
except boto.exception.BotoServerError as e:
log.debug(e)
return None
if name:
if vpc_id is None:
- log.debug('getting group for %s', name)
- group_filter = {'group-name': name}
+ log.debug("getting group for %s", name)
+ group_filter = {"group-name": name}
filtered_groups = conn.get_all_security_groups(filters=group_filter)
# security groups can have the same name if groups exist in both
# EC2-Classic and EC2-VPC
@@ -172,13 +211,15 @@ def _get_group(conn=None, name=None, vpc_id=None, vpc_name=None, group_id=None,
return group
# If there are more security groups, and no vpc_id, we can't know which one to choose.
if len(filtered_groups) > 1:
- raise CommandExecutionError('Security group belongs to more VPCs, specify the VPC ID!')
+ raise CommandExecutionError(
+ "Security group belongs to more VPCs, specify the VPC ID!"
+ )
elif len(filtered_groups) == 1:
return filtered_groups[0]
return None
elif vpc_id:
- log.debug('getting group for %s in vpc_id %s', name, vpc_id)
- group_filter = {'group-name': name, 'vpc_id': vpc_id}
+ log.debug("getting group for %s in vpc_id %s", name, vpc_id)
+ group_filter = {"group-name": name, "vpc_id": vpc_id}
filtered_groups = conn.get_all_security_groups(filters=group_filter)
if len(filtered_groups) == 1:
return filtered_groups[0]
@@ -203,21 +244,23 @@ def _get_group(conn=None, name=None, vpc_id=None, vpc_name=None, group_id=None,
def _parse_rules(sg, rules):
_rules = []
for rule in rules:
- log.debug('examining rule %s for group %s', rule, sg.id)
- attrs = ['ip_protocol', 'from_port', 'to_port', 'grants']
+ log.debug("examining rule %s for group %s", rule, sg.id)
+ attrs = ["ip_protocol", "from_port", "to_port", "grants"]
_rule = odict.OrderedDict()
for attr in attrs:
val = getattr(rule, attr)
if not val:
continue
- if attr == 'grants':
+ if attr == "grants":
_grants = []
for grant in val:
- log.debug('examining grant %s for', grant)
- g_attrs = {'name': 'source_group_name',
- 'owner_id': 'source_group_owner_id',
- 'group_id': 'source_group_group_id',
- 'cidr_ip': 'cidr_ip'}
+ log.debug("examining grant %s for", grant)
+ g_attrs = {
+ "name": "source_group_name",
+ "owner_id": "source_group_owner_id",
+ "group_id": "source_group_group_id",
+ "cidr_ip": "cidr_ip",
+ }
_grant = odict.OrderedDict()
for g_attr, g_attr_map in six.iteritems(g_attrs):
g_val = getattr(grant, g_attr)
@@ -225,10 +268,10 @@ def _parse_rules(sg, rules):
continue
_grant[g_attr_map] = g_val
_grants.append(_grant)
- _rule['grants'] = _grants
- elif attr == 'from_port':
+ _rule["grants"] = _grants
+ elif attr == "from_port":
_rule[attr] = int(val)
- elif attr == 'to_port':
+ elif attr == "to_port":
_rule[attr] = int(val)
else:
_rule[attr] = val
@@ -236,9 +279,16 @@ def _parse_rules(sg, rules):
return _rules
-def get_all_security_groups(groupnames=None, group_ids=None, filters=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def get_all_security_groups(
+ groupnames=None,
+ group_ids=None,
+ filters=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Return a list of all Security Groups matching the given criteria and
filters.
@@ -253,7 +303,7 @@ def get_all_security_groups(groupnames=None, group_ids=None, filters=None,
CLI example::
salt myminion boto_secgroup.get_all_security_groups filters='{group-name: mygroup}'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(groupnames, six.string_types):
@@ -261,22 +311,32 @@ def get_all_security_groups(groupnames=None, group_ids=None, filters=None,
if isinstance(group_ids, six.string_types):
groupnames = [group_ids]
- interesting = ['description', 'id', 'instances', 'name', 'owner_id',
- 'region', 'rules', 'rules_egress', 'tags', 'vpc_id']
+ interesting = [
+ "description",
+ "id",
+ "instances",
+ "name",
+ "owner_id",
+ "region",
+ "rules",
+ "rules_egress",
+ "tags",
+ "vpc_id",
+ ]
ret = []
try:
- r = conn.get_all_security_groups(groupnames=groupnames,
- group_ids=group_ids,
- filters=filters)
+ r = conn.get_all_security_groups(
+ groupnames=groupnames, group_ids=group_ids, filters=filters
+ )
for g in r:
n = {}
for a in interesting:
v = getattr(g, a, None)
- if a == 'region':
+ if a == "region":
v = v.name
- elif a in ('rules', 'rules_egress'):
+ elif a in ("rules", "rules_egress"):
v = _parse_rules(g, v)
- elif a == 'instances':
+ elif a == "instances":
v = [i.id for i in v()]
n[a] = v
ret += [n]
@@ -286,156 +346,241 @@ def get_all_security_groups(groupnames=None, group_ids=None, filters=None,
return []
-def get_group_id(name, vpc_id=None, vpc_name=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def get_group_id(
+ name, vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Get a Group ID given a Group Name or Group Name and VPC ID
CLI example::
salt myminion boto_secgroup.get_group_id mysecgroup
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- if name.startswith('sg-'):
- log.debug('group %s is a group id. get_group_id not called.', name)
+ if name.startswith("sg-"):
+ log.debug("group %s is a group id. get_group_id not called.", name)
return name
- group = _get_group(conn=conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
- region=region, key=key, keyid=keyid, profile=profile)
- return getattr(group, 'id', None)
+ group = _get_group(
+ conn=conn,
+ name=name,
+ vpc_id=vpc_id,
+ vpc_name=vpc_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ return getattr(group, "id", None)
-def convert_to_group_ids(groups, vpc_id=None, vpc_name=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def convert_to_group_ids(
+ groups, vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Given a list of security groups and a vpc_id, convert_to_group_ids will
convert all list items in the given list to security group ids.
CLI example::
salt myminion boto_secgroup.convert_to_group_ids mysecgroup vpc-89yhh7h
- '''
- log.debug('security group contents %s pre-conversion', groups)
+ """
+ log.debug("security group contents %s pre-conversion", groups)
group_ids = []
for group in groups:
- group_id = get_group_id(name=group, vpc_id=vpc_id,
- vpc_name=vpc_name, region=region,
- key=key, keyid=keyid, profile=profile)
+ group_id = get_group_id(
+ name=group,
+ vpc_id=vpc_id,
+ vpc_name=vpc_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not group_id:
# Security groups are a big deal - need to fail if any can't be resolved...
- raise CommandExecutionError('Could not resolve Security Group name '
- '{0} to a Group ID'.format(group))
+ raise CommandExecutionError(
+ "Could not resolve Security Group name "
+ "{0} to a Group ID".format(group)
+ )
else:
group_ids.append(six.text_type(group_id))
- log.debug('security group contents %s post-conversion', group_ids)
+ log.debug("security group contents %s post-conversion", group_ids)
return group_ids
-def get_config(name=None, group_id=None, region=None, key=None, keyid=None,
- profile=None, vpc_id=None, vpc_name=None):
- '''
+def get_config(
+ name=None,
+ group_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ vpc_id=None,
+ vpc_name=None,
+):
+ """
Get the configuration for a security group.
CLI example::
salt myminion boto_secgroup.get_config mysecgroup
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- sg = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
- group_id=group_id, region=region, key=key, keyid=keyid,
- profile=profile)
+ sg = _get_group(
+ conn,
+ name=name,
+ vpc_id=vpc_id,
+ vpc_name=vpc_name,
+ group_id=group_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if sg:
ret = odict.OrderedDict()
- ret['name'] = sg.name
+ ret["name"] = sg.name
# TODO: add support for vpc_id in return
# ret['vpc_id'] = sg.vpc_id
- ret['group_id'] = sg.id
- ret['owner_id'] = sg.owner_id
- ret['description'] = sg.description
- ret['tags'] = sg.tags
+ ret["group_id"] = sg.id
+ ret["owner_id"] = sg.owner_id
+ ret["description"] = sg.description
+ ret["tags"] = sg.tags
_rules = _parse_rules(sg, sg.rules)
_rules_egress = _parse_rules(sg, sg.rules_egress)
- ret['rules'] = _split_rules(_rules)
- ret['rules_egress'] = _split_rules(_rules_egress)
+ ret["rules"] = _split_rules(_rules)
+ ret["rules_egress"] = _split_rules(_rules_egress)
return ret
else:
return None
-def create(name, description, vpc_id=None, vpc_name=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def create(
+ name,
+ description,
+ vpc_id=None,
+ vpc_name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create a security group.
CLI example::
salt myminion boto_secgroup.create mysecgroup 'My Security Group'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not vpc_id and vpc_name:
try:
- vpc_id = _vpc_name_to_id(vpc_id=vpc_id, vpc_name=vpc_name, region=region,
- key=key, keyid=keyid, profile=profile)
+ vpc_id = _vpc_name_to_id(
+ vpc_id=vpc_id,
+ vpc_name=vpc_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
except boto.exception.BotoServerError as e:
log.debug(e)
return False
created = conn.create_security_group(name, description, vpc_id)
if created:
- log.info('Created security group %s.', name)
+ log.info("Created security group %s.", name)
return True
else:
- msg = 'Failed to create security group {0}.'.format(name)
+ msg = "Failed to create security group {0}.".format(name)
log.error(msg)
return False
-def delete(name=None, group_id=None, region=None, key=None, keyid=None,
- profile=None, vpc_id=None, vpc_name=None):
- '''
+def delete(
+ name=None,
+ group_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ vpc_id=None,
+ vpc_name=None,
+):
+ """
Delete a security group.
CLI example::
salt myminion boto_secgroup.delete mysecgroup
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- group = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
- group_id=group_id, region=region, key=key, keyid=keyid,
- profile=profile)
+ group = _get_group(
+ conn,
+ name=name,
+ vpc_id=vpc_id,
+ vpc_name=vpc_name,
+ group_id=group_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if group:
deleted = conn.delete_security_group(group_id=group.id)
if deleted:
- log.info('Deleted security group %s with id %s.', group.name, group.id)
+ log.info("Deleted security group %s with id %s.", group.name, group.id)
return True
else:
- msg = 'Failed to delete security group {0}.'.format(name)
+ msg = "Failed to delete security group {0}.".format(name)
log.error(msg)
return False
else:
- log.debug('Security group not found.')
+ log.debug("Security group not found.")
return False
-def authorize(name=None, source_group_name=None,
- source_group_owner_id=None, ip_protocol=None,
- from_port=None, to_port=None, cidr_ip=None, group_id=None,
- source_group_group_id=None, region=None, key=None, keyid=None,
- profile=None, vpc_id=None, vpc_name=None, egress=False):
- '''
+def authorize(
+ name=None,
+ source_group_name=None,
+ source_group_owner_id=None,
+ ip_protocol=None,
+ from_port=None,
+ to_port=None,
+ cidr_ip=None,
+ group_id=None,
+ source_group_group_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ vpc_id=None,
+ vpc_name=None,
+ egress=False,
+):
+ """
Add a new rule to an existing security group.
CLI example::
salt myminion boto_secgroup.authorize mysecgroup ip_protocol=tcp from_port=80 to_port=80 cidr_ip='['10.0.0.0/8', '192.168.0.0/24']'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- group = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
- group_id=group_id, region=region, key=key, keyid=keyid,
- profile=profile)
+ group = _get_group(
+ conn,
+ name=name,
+ vpc_id=vpc_id,
+ vpc_name=vpc_name,
+ group_id=group_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if group:
try:
added = None
@@ -443,54 +588,86 @@ def authorize(name=None, source_group_name=None,
added = conn.authorize_security_group(
src_security_group_name=source_group_name,
src_security_group_owner_id=source_group_owner_id,
- ip_protocol=ip_protocol, from_port=from_port, to_port=to_port,
- cidr_ip=cidr_ip, group_id=group.id,
- src_security_group_group_id=source_group_group_id)
+ ip_protocol=ip_protocol,
+ from_port=from_port,
+ to_port=to_port,
+ cidr_ip=cidr_ip,
+ group_id=group.id,
+ src_security_group_group_id=source_group_group_id,
+ )
else:
added = conn.authorize_security_group_egress(
- ip_protocol=ip_protocol, from_port=from_port, to_port=to_port,
- cidr_ip=cidr_ip, group_id=group.id,
- src_group_id=source_group_group_id)
+ ip_protocol=ip_protocol,
+ from_port=from_port,
+ to_port=to_port,
+ cidr_ip=cidr_ip,
+ group_id=group.id,
+ src_group_id=source_group_group_id,
+ )
if added:
- log.info('Added rule to security group %s with id %s',
- group.name, group.id)
+ log.info(
+ "Added rule to security group %s with id %s", group.name, group.id
+ )
return True
else:
- msg = ('Failed to add rule to security group {0} with id {1}.'
- .format(group.name, group.id))
+ msg = "Failed to add rule to security group {0} with id {1}.".format(
+ group.name, group.id
+ )
log.error(msg)
return False
except boto.exception.EC2ResponseError as e:
# if we are trying to add the same rule then we are already in the desired state, return true
- if e.error_code == 'InvalidPermission.Duplicate':
+ if e.error_code == "InvalidPermission.Duplicate":
return True
- msg = ('Failed to add rule to security group {0} with id {1}.'
- .format(group.name, group.id))
+ msg = "Failed to add rule to security group {0} with id {1}.".format(
+ group.name, group.id
+ )
log.error(msg)
log.error(e)
return False
else:
- log.error('Failed to add rule to security group.')
+ log.error("Failed to add rule to security group.")
return False
-def revoke(name=None, source_group_name=None,
- source_group_owner_id=None, ip_protocol=None,
- from_port=None, to_port=None, cidr_ip=None, group_id=None,
- source_group_group_id=None, region=None, key=None, keyid=None,
- profile=None, vpc_id=None, vpc_name=None, egress=False):
- '''
+def revoke(
+ name=None,
+ source_group_name=None,
+ source_group_owner_id=None,
+ ip_protocol=None,
+ from_port=None,
+ to_port=None,
+ cidr_ip=None,
+ group_id=None,
+ source_group_group_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ vpc_id=None,
+ vpc_name=None,
+ egress=False,
+):
+ """
Remove a rule from an existing security group.
CLI example::
salt myminion boto_secgroup.revoke mysecgroup ip_protocol=tcp from_port=80 to_port=80 cidr_ip='10.0.0.0/8'
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- group = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
- group_id=group_id, region=region, key=key, keyid=keyid,
- profile=profile)
+ group = _get_group(
+ conn,
+ name=name,
+ vpc_id=vpc_id,
+ vpc_name=vpc_name,
+ group_id=group_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if group:
try:
revoked = None
@@ -498,69 +675,92 @@ def revoke(name=None, source_group_name=None,
revoked = conn.revoke_security_group(
src_security_group_name=source_group_name,
src_security_group_owner_id=source_group_owner_id,
- ip_protocol=ip_protocol, from_port=from_port, to_port=to_port,
- cidr_ip=cidr_ip, group_id=group.id,
- src_security_group_group_id=source_group_group_id)
+ ip_protocol=ip_protocol,
+ from_port=from_port,
+ to_port=to_port,
+ cidr_ip=cidr_ip,
+ group_id=group.id,
+ src_security_group_group_id=source_group_group_id,
+ )
else:
revoked = conn.revoke_security_group_egress(
- ip_protocol=ip_protocol, from_port=from_port, to_port=to_port,
- cidr_ip=cidr_ip, group_id=group.id,
- src_group_id=source_group_group_id)
+ ip_protocol=ip_protocol,
+ from_port=from_port,
+ to_port=to_port,
+ cidr_ip=cidr_ip,
+ group_id=group.id,
+ src_group_id=source_group_group_id,
+ )
if revoked:
- log.info('Removed rule from security group %s with id %s.',
- group.name, group.id)
+ log.info(
+ "Removed rule from security group %s with id %s.",
+ group.name,
+ group.id,
+ )
return True
else:
- msg = ('Failed to remove rule from security group {0} with id {1}.'
- .format(group.name, group.id))
+ msg = "Failed to remove rule from security group {0} with id {1}.".format(
+ group.name, group.id
+ )
log.error(msg)
return False
except boto.exception.EC2ResponseError as e:
- msg = ('Failed to remove rule from security group {0} with id {1}.'
- .format(group.name, group.id))
+ msg = "Failed to remove rule from security group {0} with id {1}.".format(
+ group.name, group.id
+ )
log.error(msg)
log.error(e)
return False
else:
- log.error('Failed to remove rule from security group.')
+ log.error("Failed to remove rule from security group.")
return False
-def _find_vpcs(vpc_id=None, vpc_name=None, cidr=None, tags=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def _find_vpcs(
+ vpc_id=None,
+ vpc_name=None,
+ cidr=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given VPC properties, find and return matching VPC ids.
Borrowed from boto_vpc; these could be refactored into a common library
- '''
+ """
if all((vpc_id, vpc_name)):
- raise SaltInvocationError('Only one of vpc_name or vpc_id may be '
- 'provided.')
+ raise SaltInvocationError("Only one of vpc_name or vpc_id may be " "provided.")
if not any((vpc_id, vpc_name, tags, cidr)):
- raise SaltInvocationError('At least one of the following must be '
- 'provided: vpc_id, vpc_name, cidr or tags.')
+ raise SaltInvocationError(
+ "At least one of the following must be "
+ "provided: vpc_id, vpc_name, cidr or tags."
+ )
- local_get_conn = __utils__['boto.get_connection_func']('vpc')
+ local_get_conn = __utils__["boto.get_connection_func"]("vpc")
conn = local_get_conn(region=region, key=key, keyid=keyid, profile=profile)
- filter_parameters = {'filters': {}}
+ filter_parameters = {"filters": {}}
if vpc_id:
- filter_parameters['vpc_ids'] = [vpc_id]
+ filter_parameters["vpc_ids"] = [vpc_id]
if cidr:
- filter_parameters['filters']['cidr'] = cidr
+ filter_parameters["filters"]["cidr"] = cidr
if vpc_name:
- filter_parameters['filters']['tag:Name'] = vpc_name
+ filter_parameters["filters"]["tag:Name"] = vpc_name
if tags:
for tag_name, tag_value in six.iteritems(tags):
- filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
+ filter_parameters["filters"]["tag:{0}".format(tag_name)] = tag_value
vpcs = conn.get_all_vpcs(**filter_parameters)
- log.debug('The filters criteria %s matched the following VPCs:%s',
- filter_parameters, vpcs)
+ log.debug(
+ "The filters criteria %s matched the following VPCs:%s", filter_parameters, vpcs
+ )
if vpcs:
return [vpc.id for vpc in vpcs]
@@ -568,16 +768,18 @@ def _find_vpcs(vpc_id=None, vpc_name=None, cidr=None, tags=None,
return []
-def set_tags(tags,
- name=None,
- group_id=None,
- vpc_name=None,
- vpc_id=None,
- region=None,
- key=None,
- keyid=None,
- profile=None):
- '''
+def set_tags(
+ tags,
+ name=None,
+ group_id=None,
+ vpc_name=None,
+ vpc_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Sets tags on a security group.
.. versionadded:: 2016.3.0
@@ -614,34 +816,44 @@ def set_tags(tags,
.. code-block:: bash
salt myminion boto_secgroup.set_tags "{'TAG1': 'Value1', 'TAG2': 'Value2'}" security_group_name vpc_id=vpc-13435 profile=my_aws_profile
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- secgrp = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
- group_id=group_id, region=region, key=key, keyid=keyid,
- profile=profile)
+ secgrp = _get_group(
+ conn,
+ name=name,
+ vpc_id=vpc_id,
+ vpc_name=vpc_name,
+ group_id=group_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if secgrp:
if isinstance(tags, dict):
secgrp.add_tags(tags)
else:
- msg = 'Tags must be a dict of tagname:tagvalue'
+ msg = "Tags must be a dict of tagname:tagvalue"
raise SaltInvocationError(msg)
else:
- msg = 'The security group could not be found'
+ msg = "The security group could not be found"
raise SaltInvocationError(msg)
return True
-def delete_tags(tags,
- name=None,
- group_id=None,
- vpc_name=None,
- vpc_id=None,
- region=None,
- key=None,
- keyid=None,
- profile=None):
- '''
+def delete_tags(
+ tags,
+ name=None,
+ group_id=None,
+ vpc_name=None,
+ vpc_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Deletes tags from a security group.
.. versionadded:: 2016.3.0
@@ -678,11 +890,19 @@ def delete_tags(tags,
.. code-block:: bash
salt myminion boto_secgroup.delete_tags ['TAG_TO_DELETE1','TAG_TO_DELETE2'] security_group_name vpc_id=vpc-13435 profile=my_aws_profile
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- secgrp = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
- group_id=group_id, region=region, key=key, keyid=keyid,
- profile=profile)
+ secgrp = _get_group(
+ conn,
+ name=name,
+ vpc_id=vpc_id,
+ vpc_name=vpc_name,
+ group_id=group_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if secgrp:
if isinstance(tags, list):
tags_to_remove = {}
@@ -690,9 +910,9 @@ def delete_tags(tags,
tags_to_remove[tag] = None
secgrp.remove_tags(tags_to_remove)
else:
- msg = 'Tags must be a list of tagnames to remove from the security group'
+ msg = "Tags must be a list of tagnames to remove from the security group"
raise SaltInvocationError(msg)
else:
- msg = 'The security group could not be found'
+ msg = "The security group could not be found"
raise SaltInvocationError(msg)
return True
diff --git a/salt/modules/boto_sns.py b/salt/modules/boto_sns.py
index 4473949c6d7..5e74485a191 100644
--- a/salt/modules/boto_sns.py
+++ b/salt/modules/boto_sns.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon SNS
:configuration: This module accepts explicit sns credentials but can also
@@ -38,12 +38,13 @@ Connection module for Amazon SNS
region: us-east-1
:depends: boto
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
# Import Salt libs
@@ -53,36 +54,35 @@ log = logging.getLogger(__name__)
# Import third party libs
try:
- #pylint: disable=unused-import
+ # pylint: disable=unused-import
import boto
import boto.sns
- #pylint: enable=unused-import
- logging.getLogger('boto').setLevel(logging.CRITICAL)
+
+ # pylint: enable=unused-import
+ logging.getLogger("boto").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def __virtual__():
- '''
+ """
Only load if boto libraries exist.
- '''
- has_boto_reqs = salt.utils.versions.check_boto_reqs(
- check_boto3=False
- )
+ """
+ has_boto_reqs = salt.utils.versions.check_boto_reqs(check_boto3=False)
if has_boto_reqs is True:
- __utils__['boto.assign_funcs'](__name__, 'sns', pack=__salt__)
+ __utils__["boto.assign_funcs"](__name__, "sns", pack=__salt__)
return has_boto_reqs
def get_all_topics(region=None, key=None, keyid=None, profile=None):
- '''
+ """
Returns a list of the all topics..
CLI example::
salt myminion boto_sns.get_all_topics
- '''
+ """
cache_key = _cache_get_key()
try:
return __context__[cache_key]
@@ -93,66 +93,67 @@ def get_all_topics(region=None, key=None, keyid=None, profile=None):
__context__[cache_key] = {}
# TODO: support >100 SNS topics (via NextToken)
topics = conn.get_all_topics()
- for t in topics['ListTopicsResponse']['ListTopicsResult']['Topics']:
- short_name = t['TopicArn'].split(':')[-1]
- __context__[cache_key][short_name] = t['TopicArn']
+ for t in topics["ListTopicsResponse"]["ListTopicsResult"]["Topics"]:
+ short_name = t["TopicArn"].split(":")[-1]
+ __context__[cache_key][short_name] = t["TopicArn"]
return __context__[cache_key]
def exists(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if an SNS topic exists.
CLI example::
salt myminion boto_sns.exists mytopic region=us-east-1
- '''
- topics = get_all_topics(region=region, key=key, keyid=keyid,
- profile=profile)
- if name.startswith('arn:aws:sns:'):
+ """
+ topics = get_all_topics(region=region, key=key, keyid=keyid, profile=profile)
+ if name.startswith("arn:aws:sns:"):
return name in list(topics.values())
else:
return name in list(topics.keys())
def create(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Create an SNS topic.
CLI example to create a topic::
salt myminion boto_sns.create mytopic region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.create_topic(name)
- log.info('Created SNS topic %s', name)
+ log.info("Created SNS topic %s", name)
_invalidate_cache()
return True
def delete(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete an SNS topic.
CLI example to delete a topic::
salt myminion boto_sns.delete mytopic region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_topic(get_arn(name, region, key, keyid, profile))
- log.info('Deleted SNS topic %s', name)
+ log.info("Deleted SNS topic %s", name)
_invalidate_cache()
return True
-def get_all_subscriptions_by_topic(name, region=None, key=None, keyid=None, profile=None):
- '''
+def get_all_subscriptions_by_topic(
+ name, region=None, key=None, keyid=None, profile=None
+):
+ """
Get list of all subscriptions to a specific topic.
CLI example to delete a topic::
salt myminion boto_sns.get_all_subscriptions_by_topic mytopic region=us-east-1
- '''
+ """
cache_key = _subscriptions_cache_key(name)
try:
return __context__[cache_key]
@@ -160,22 +161,28 @@ def get_all_subscriptions_by_topic(name, region=None, key=None, keyid=None, prof
pass
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- ret = conn.get_all_subscriptions_by_topic(get_arn(name, region, key, keyid, profile))
- __context__[cache_key] = ret['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions']
+ ret = conn.get_all_subscriptions_by_topic(
+ get_arn(name, region, key, keyid, profile)
+ )
+ __context__[cache_key] = ret["ListSubscriptionsByTopicResponse"][
+ "ListSubscriptionsByTopicResult"
+ ]["Subscriptions"]
return __context__[cache_key]
-def subscribe(topic, protocol, endpoint, region=None, key=None, keyid=None, profile=None):
- '''
+def subscribe(
+ topic, protocol, endpoint, region=None, key=None, keyid=None, profile=None
+):
+ """
Subscribe to a Topic.
CLI example to delete a topic::
salt myminion boto_sns.subscribe mytopic https https://www.example.com/sns-endpoint region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.subscribe(get_arn(topic, region, key, keyid, profile), protocol, endpoint)
- log.info('Subscribe %s %s to %s topic', protocol, endpoint, topic)
+ log.info("Subscribe %s %s to %s topic", protocol, endpoint, topic)
try:
del __context__[_subscriptions_cache_key(topic)]
except KeyError:
@@ -183,8 +190,10 @@ def subscribe(topic, protocol, endpoint, region=None, key=None, keyid=None, prof
return True
-def unsubscribe(topic, subscription_arn, region=None, key=None, keyid=None, profile=None):
- '''
+def unsubscribe(
+ topic, subscription_arn, region=None, key=None, keyid=None, profile=None
+):
+ """
Unsubscribe a specific SubscriptionArn of a topic.
CLI Example:
@@ -194,17 +203,17 @@ def unsubscribe(topic, subscription_arn, region=None, key=None, keyid=None, prof
salt myminion boto_sns.unsubscribe my_topic my_subscription_arn region=us-east-1
.. versionadded:: 2016.11.0
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- if subscription_arn.startswith('arn:aws:sns:') is False:
+ if subscription_arn.startswith("arn:aws:sns:") is False:
return False
try:
conn.unsubscribe(subscription_arn)
- log.info('Unsubscribe %s to %s topic', subscription_arn, topic)
+ log.info("Unsubscribe %s to %s topic", subscription_arn, topic)
except Exception as e: # pylint: disable=broad-except
- log.error('Unsubscribe Error', exc_info=True)
+ log.error("Unsubscribe Error", exc_info=True)
return False
else:
__context__.pop(_subscriptions_cache_key(topic), None)
@@ -212,38 +221,39 @@ def unsubscribe(topic, subscription_arn, region=None, key=None, keyid=None, prof
def get_arn(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Returns the full ARN for a given topic name.
CLI example::
salt myminion boto_sns.get_arn mytopic
- '''
- if name.startswith('arn:aws:sns:'):
+ """
+ if name.startswith("arn:aws:sns:"):
return name
- account_id = __salt__['boto_iam.get_account_id'](
+ account_id = __salt__["boto_iam.get_account_id"](
region=region, key=key, keyid=keyid, profile=profile
)
- return 'arn:aws:sns:{0}:{1}:{2}'.format(_get_region(region, profile),
- account_id, name)
+ return "arn:aws:sns:{0}:{1}:{2}".format(
+ _get_region(region, profile), account_id, name
+ )
def _get_region(region=None, profile=None):
- if profile and 'region' in profile:
- return profile['region']
- if not region and __salt__['config.option'](profile):
- _profile = __salt__['config.option'](profile)
- region = _profile.get('region', None)
- if not region and __salt__['config.option']('sns.region'):
- region = __salt__['config.option']('sns.region')
+ if profile and "region" in profile:
+ return profile["region"]
+ if not region and __salt__["config.option"](profile):
+ _profile = __salt__["config.option"](profile)
+ region = _profile.get("region", None)
+ if not region and __salt__["config.option"]("sns.region"):
+ region = __salt__["config.option"]("sns.region")
if not region:
- region = 'us-east-1'
+ region = "us-east-1"
return region
def _subscriptions_cache_key(name):
- return '{0}_{1}_subscriptions'.format(_cache_get_key(), name)
+ return "{0}_{1}_subscriptions".format(_cache_get_key(), name)
def _invalidate_cache():
@@ -254,4 +264,4 @@ def _invalidate_cache():
def _cache_get_key():
- return 'boto_sns.topics_cache'
+ return "boto_sns.topics_cache"
diff --git a/salt/modules/boto_sqs.py b/salt/modules/boto_sqs.py
index dd1ab5aa3c4..4155a3c63f5 100644
--- a/salt/modules/boto_sqs.py
+++ b/salt/modules/boto_sqs.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon SQS
.. versionadded:: 2014.7.0
@@ -40,7 +40,7 @@ Connection module for Amazon SQS
region: us-east-1
:depends: boto3
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
@@ -55,12 +55,12 @@ import salt.utils.versions
# Import 3rd-party libs
from salt.ext import six
-from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module
+from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
log = logging.getLogger(__name__)
__func_alias__ = {
- 'list_': 'list',
+ "list_": "list",
}
# Import third party libs
@@ -68,27 +68,28 @@ try:
# pylint: disable=unused-import
import boto3
import botocore
+
# pylint: enable=unused-import
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def __virtual__():
- '''
+ """
Only load if boto3 libraries exist.
- '''
+ """
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
- __utils__['boto3.assign_funcs'](__name__, 'sqs')
+ __utils__["boto3.assign_funcs"](__name__, "sqs")
return has_boto_reqs
def _preprocess_attributes(attributes):
- '''
+ """
Pre-process incoming queue attributes before setting them
- '''
+ """
if isinstance(attributes, six.string_types):
attributes = salt.utils.json.loads(attributes)
@@ -99,13 +100,11 @@ def _preprocess_attributes(attributes):
return salt.utils.json.dumps(val)
return val
- return dict(
- (attr, stringified(val)) for attr, val in six.iteritems(attributes)
- )
+ return dict((attr, stringified(val)) for attr, val in six.iteritems(attributes))
def exists(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Check to see if a queue exists.
CLI Example:
@@ -113,28 +112,23 @@ def exists(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_sqs.exists myqueue region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.get_queue_url(QueueName=name)
except botocore.exceptions.ClientError as e:
- missing_code = 'AWS.SimpleQueueService.NonExistentQueue'
- if e.response.get('Error', {}).get('Code') == missing_code:
- return {'result': False}
- return {'error': __utils__['boto3.get_error'](e)}
- return {'result': True}
+ missing_code = "AWS.SimpleQueueService.NonExistentQueue"
+ if e.response.get("Error", {}).get("Code") == missing_code:
+ return {"result": False}
+ return {"error": __utils__["boto3.get_error"](e)}
+ return {"result": True}
def create(
- name,
- attributes=None,
- region=None,
- key=None,
- keyid=None,
- profile=None,
+ name, attributes=None, region=None, key=None, keyid=None, profile=None,
):
- '''
+ """
Create an SQS queue.
CLI Example:
@@ -142,7 +136,7 @@ def create(
.. code-block:: bash
salt myminion boto_sqs.create myqueue region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if attributes is None:
@@ -152,12 +146,12 @@ def create(
try:
conn.create_queue(QueueName=name, Attributes=attributes)
except botocore.exceptions.ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
- return {'result': True}
+ return {"error": __utils__["boto3.get_error"](e)}
+ return {"result": True}
def delete(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Delete an SQS queue.
CLI Example:
@@ -165,19 +159,19 @@ def delete(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_sqs.delete myqueue region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- url = conn.get_queue_url(QueueName=name)['QueueUrl']
+ url = conn.get_queue_url(QueueName=name)["QueueUrl"]
conn.delete_queue(QueueUrl=url)
except botocore.exceptions.ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
- return {'result': True}
+ return {"error": __utils__["boto3.get_error"](e)}
+ return {"result": True}
-def list_(prefix='', region=None, key=None, keyid=None, profile=None):
- '''
+def list_(prefix="", region=None, key=None, keyid=None, profile=None):
+ """
Return a list of the names of all visible queues.
.. versionadded:: 2016.11.0
@@ -187,24 +181,24 @@ def list_(prefix='', region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_sqs.list region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
def extract_name(queue_url):
# Note: this logic taken from boto, so should be safe
- return _urlparse(queue_url).path.split('/')[2]
+ return _urlparse(queue_url).path.split("/")[2]
try:
r = conn.list_queues(QueueNamePrefix=prefix)
# The 'QueueUrls' attribute is missing if there are no queues
- urls = r.get('QueueUrls', [])
- return {'result': [extract_name(url) for url in urls]}
+ urls = r.get("QueueUrls", [])
+ return {"result": [extract_name(url) for url in urls]}
except botocore.exceptions.ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
def get_attributes(name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Return attributes currently set on an SQS queue.
CLI Example:
@@ -212,26 +206,21 @@ def get_attributes(name, region=None, key=None, keyid=None, profile=None):
.. code-block:: bash
salt myminion boto_sqs.get_attributes myqueue
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- url = conn.get_queue_url(QueueName=name)['QueueUrl']
- r = conn.get_queue_attributes(QueueUrl=url, AttributeNames=['All'])
- return {'result': r['Attributes']}
+ url = conn.get_queue_url(QueueName=name)["QueueUrl"]
+ r = conn.get_queue_attributes(QueueUrl=url, AttributeNames=["All"])
+ return {"result": r["Attributes"]}
except botocore.exceptions.ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
+ return {"error": __utils__["boto3.get_error"](e)}
def set_attributes(
- name,
- attributes,
- region=None,
- key=None,
- keyid=None,
- profile=None,
+ name, attributes, region=None, key=None, keyid=None, profile=None,
):
- '''
+ """
Set attributes on an SQS queue.
CLI Example:
@@ -239,14 +228,14 @@ def set_attributes(
.. code-block:: bash
salt myminion boto_sqs.set_attributes myqueue '{ReceiveMessageWaitTimeSeconds: 20}' region=us-east-1
- '''
+ """
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
attributes = _preprocess_attributes(attributes)
try:
- url = conn.get_queue_url(QueueName=name)['QueueUrl']
+ url = conn.get_queue_url(QueueName=name)["QueueUrl"]
conn.set_queue_attributes(QueueUrl=url, Attributes=attributes)
except botocore.exceptions.ClientError as e:
- return {'error': __utils__['boto3.get_error'](e)}
- return {'result': True}
+ return {"error": __utils__["boto3.get_error"](e)}
+ return {"result": True}
diff --git a/salt/modules/boto_ssm.py b/salt/modules/boto_ssm.py
index 16ef0dda44c..682c185fddd 100644
--- a/salt/modules/boto_ssm.py
+++ b/salt/modules/boto_ssm.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon SSM
:configuration: This module uses IAM roles assigned to the instance through
@@ -12,30 +12,40 @@ Connection module for Amazon SSM
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
:depends: boto3
-'''
+"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
+import salt.utils.json as json
+
# Import Salt libs
import salt.utils.versions
-import salt.utils.json as json
log = logging.getLogger(__name__)
def __virtual__():
- '''
+ """
Only load if boto libraries exist.
- '''
+ """
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
- __utils__['boto3.assign_funcs'](__name__, 'ssm')
+ __utils__["boto3.assign_funcs"](__name__, "ssm")
return has_boto_reqs
-def get_parameter(name, withdecryption=False, resp_json=False, region=None, key=None, keyid=None, profile=None):
- '''
+def get_parameter(
+ name,
+ withdecryption=False,
+ resp_json=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Retrives a parameter from SSM Parameter Store
.. versionadded:: 3000
@@ -43,8 +53,10 @@ def get_parameter(name, withdecryption=False, resp_json=False, region=None, key=
.. code-block:: text
salt-call boto_ssm.get_parameter test-param withdescription=True
- '''
- conn = __utils__['boto3.get_connection']('ssm', region=region, key=key, keyid=keyid, profile=profile)
+ """
+ conn = __utils__["boto3.get_connection"](
+ "ssm", region=region, key=key, keyid=keyid, profile=profile
+ )
try:
resp = conn.get_parameter(Name=name, WithDecryption=withdecryption)
except conn.exceptions.ParameterNotFound:
@@ -52,23 +64,25 @@ def get_parameter(name, withdecryption=False, resp_json=False, region=None, key=
return False
if resp_json:
- return json.loads(resp['Parameter']['Value'])
+ return json.loads(resp["Parameter"]["Value"])
else:
- return resp['Parameter']['Value']
+ return resp["Parameter"]["Value"]
-def put_parameter(Name,
- Value,
- Description=None,
- Type='String',
- KeyId=None,
- Overwrite=False,
- AllowedPattern=None,
- region=None,
- key=None,
- keyid=None,
- profile=None):
- '''
+def put_parameter(
+ Name,
+ Value,
+ Description=None,
+ Type="String",
+ KeyId=None,
+ Overwrite=False,
+ AllowedPattern=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Sets a parameter in the SSM parameter store
.. versionadded:: 3000
@@ -76,46 +90,55 @@ def put_parameter(Name,
.. code-block:: text
salt-call boto_ssm.put_parameter test-param test_value Type=SecureString KeyId=alias/aws/ssm Description='test encrypted key'
- '''
- conn = __utils__['boto3.get_connection']('ssm', region=region, key=key, keyid=keyid, profile=profile)
- if Type not in ('String', 'StringList', 'SecureString'):
- raise AssertionError('Type needs to be String|StringList|SecureString')
- if Type == 'SecureString' and not KeyId:
- raise AssertionError('Require KeyId with SecureString')
+ """
+ conn = __utils__["boto3.get_connection"](
+ "ssm", region=region, key=key, keyid=keyid, profile=profile
+ )
+ if Type not in ("String", "StringList", "SecureString"):
+ raise AssertionError("Type needs to be String|StringList|SecureString")
+ if Type == "SecureString" and not KeyId:
+ raise AssertionError("Require KeyId with SecureString")
boto_args = {}
if Description:
- boto_args['Description'] = Description
+ boto_args["Description"] = Description
if KeyId:
- boto_args['KeyId'] = KeyId
+ boto_args["KeyId"] = KeyId
if AllowedPattern:
- boto_args['AllowedPattern'] = AllowedPattern
+ boto_args["AllowedPattern"] = AllowedPattern
try:
- resp = conn.put_parameter(Name=Name, Value=Value, Type=Type, Overwrite=Overwrite, **boto_args)
+ resp = conn.put_parameter(
+ Name=Name, Value=Value, Type=Type, Overwrite=Overwrite, **boto_args
+ )
except conn.exceptions.ParameterAlreadyExists:
- log.warning("The parameter already exists."
- " To overwrite this value, set the Overwrite option in the request to True")
+ log.warning(
+ "The parameter already exists."
+ " To overwrite this value, set the Overwrite option in the request to True"
+ )
return False
- return resp['Version']
+ return resp["Version"]
def delete_parameter(Name, region=None, key=None, keyid=None, profile=None):
- '''
+ """
Removes a parameter from the SSM parameter store
.. versionadded:: 3000
.. code-block:: text
+
salt-call boto_ssm.delete_parameter test-param
- '''
- conn = __utils__['boto3.get_connection']('ssm', region=region, key=key, keyid=keyid, profile=profile)
+ """
+ conn = __utils__["boto3.get_connection"](
+ "ssm", region=region, key=key, keyid=keyid, profile=profile
+ )
try:
resp = conn.delete_parameter(Name=Name)
except conn.exceptions.ParameterNotFound:
log.warning("delete_parameter: Unable to locate name: %s", Name)
return False
- if resp['ResponseMetadata']['HTTPStatusCode'] == 200:
+ if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
return True
else:
return False
diff --git a/salt/modules/boto_vpc.py b/salt/modules/boto_vpc.py
index 8b63ca0c53d..5ff9e6d351a 100644
--- a/salt/modules/boto_vpc.py
+++ b/salt/modules/boto_vpc.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Connection module for Amazon VPC
.. versionadded:: 2014.7.0
@@ -120,87 +120,94 @@ Deleting VPC peering connection via this module
# specify an id
salt myminion boto_vpc.delete_vpc_peering_connection conn_id=pcx-8a8939e3
-'''
+"""
# keep lint from choking on _get_conn and _cache_id
-#pylint: disable=E0602
+# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
+import random
import socket
import time
-import random
# Import Salt libs
import salt.utils.compat
import salt.utils.versions
-from salt.exceptions import SaltInvocationError, CommandExecutionError
-
-# from salt.utils import exactly_one
-# TODO: Uncomment this and s/_exactly_one/exactly_one/
-# See note in utils.boto
-PROVISIONING = 'provisioning'
-PENDING_ACCEPTANCE = 'pending-acceptance'
-ACTIVE = 'active'
-
-log = logging.getLogger(__name__)
+from salt.exceptions import CommandExecutionError, SaltInvocationError
# Import third party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error
+
+# from salt.utils import exactly_one
+# TODO: Uncomment this and s/_exactly_one/exactly_one/
+# See note in utils.boto
+PROVISIONING = "provisioning"
+PENDING_ACCEPTANCE = "pending-acceptance"
+ACTIVE = "active"
+
+log = logging.getLogger(__name__)
+
+
# pylint: disable=import-error
try:
- #pylint: disable=unused-import
+ # pylint: disable=unused-import
import boto
import botocore
import boto.vpc
- #pylint: enable=unused-import
+
+ # pylint: enable=unused-import
from boto.exception import BotoServerError
- logging.getLogger('boto').setLevel(logging.CRITICAL)
+
+ logging.getLogger("boto").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=import-error
try:
- #pylint: disable=unused-import
+ # pylint: disable=unused-import
import boto3
- #pylint: enable=unused-import
- logging.getLogger('boto3').setLevel(logging.CRITICAL)
+
+ # pylint: enable=unused-import
+ logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def __virtual__():
- '''
+ """
Only load if boto libraries exist and if boto libraries are greater than
a given version.
- '''
+ """
# the boto_vpc execution module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
# the boto_vpc execution module relies on the create_nat_gateway() method
# which was added in boto3 1.2.6
- return salt.utils.versions.check_boto_reqs(
- boto_ver='2.8.0',
- boto3_ver='1.2.6'
- )
+ return salt.utils.versions.check_boto_reqs(boto_ver="2.8.0", boto3_ver="1.2.6")
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
- __utils__['boto.assign_funcs'](__name__, 'vpc', pack=__salt__)
+ __utils__["boto.assign_funcs"](__name__, "vpc", pack=__salt__)
if HAS_BOTO3:
- __utils__['boto3.assign_funcs'](__name__, 'ec2',
- get_conn_funcname='_get_conn3',
- cache_id_funcname='_cache_id3',
- exactly_one_funcname=None)
+ __utils__["boto3.assign_funcs"](
+ __name__,
+ "ec2",
+ get_conn_funcname="_get_conn3",
+ cache_id_funcname="_cache_id3",
+ exactly_one_funcname=None,
+ )
-def check_vpc(vpc_id=None, vpc_name=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def check_vpc(
+ vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Check whether a VPC with the given name or id exists.
Returns the vpc_id or None. Raises SaltInvocationError if
both vpc_id and vpc_name are None. Optionally raise a
@@ -213,224 +220,295 @@ def check_vpc(vpc_id=None, vpc_name=None, region=None, key=None,
.. code-block:: bash
salt myminion boto_vpc.check_vpc vpc_name=myvpc profile=awsprofile
- '''
+ """
if not _exactly_one((vpc_name, vpc_id)):
- raise SaltInvocationError('One (but not both) of vpc_id or vpc_name '
- 'must be provided.')
+ raise SaltInvocationError(
+ "One (but not both) of vpc_id or vpc_name " "must be provided."
+ )
if vpc_name:
- vpc_id = _get_id(vpc_name=vpc_name, region=region, key=key, keyid=keyid,
- profile=profile)
- elif not _find_vpcs(vpc_id=vpc_id, region=region, key=key, keyid=keyid,
- profile=profile):
- log.info('VPC %s does not exist.', vpc_id)
+ vpc_id = _get_id(
+ vpc_name=vpc_name, region=region, key=key, keyid=keyid, profile=profile
+ )
+ elif not _find_vpcs(
+ vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile
+ ):
+ log.info("VPC %s does not exist.", vpc_id)
return None
return vpc_id
-def _create_resource(resource, name=None, tags=None, region=None, key=None,
- keyid=None, profile=None, **kwargs):
- '''
+def _create_resource(
+ resource,
+ name=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ **kwargs
+):
+ """
Create a VPC resource. Returns the resource id if created, or False
if not created.
- '''
+ """
try:
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- create_resource = getattr(conn, 'create_' + resource)
+ create_resource = getattr(conn, "create_" + resource)
except AttributeError:
- raise AttributeError('{0} function does not exist for boto VPC '
- 'connection.'.format('create_' + resource))
+ raise AttributeError(
+ "{0} function does not exist for boto VPC "
+ "connection.".format("create_" + resource)
+ )
- if name and _get_resource_id(resource, name, region=region, key=key,
- keyid=keyid, profile=profile):
- return {'created': False, 'error': {'message':
- 'A {0} named {1} already exists.'.format(
- resource, name)}}
+ if name and _get_resource_id(
+ resource, name, region=region, key=key, keyid=keyid, profile=profile
+ ):
+ return {
+ "created": False,
+ "error": {
+ "message": "A {0} named {1} already exists.".format(resource, name)
+ },
+ }
r = create_resource(**kwargs)
if r:
if isinstance(r, bool):
- return {'created': True}
+ return {"created": True}
else:
- log.info('A %s with id %s was created', resource, r.id)
+ log.info("A %s with id %s was created", resource, r.id)
_maybe_set_name_tag(name, r)
_maybe_set_tags(tags, r)
if name:
- _cache_id(name,
- sub_resource=resource,
- resource_id=r.id,
- region=region,
- key=key, keyid=keyid,
- profile=profile)
- return {'created': True, 'id': r.id}
+ _cache_id(
+ name,
+ sub_resource=resource,
+ resource_id=r.id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ return {"created": True, "id": r.id}
else:
if name:
- e = '{0} {1} was not created.'.format(resource, name)
+ e = "{0} {1} was not created.".format(resource, name)
else:
- e = '{0} was not created.'.format(resource)
+ e = "{0} was not created.".format(resource)
log.warning(e)
- return {'created': False, 'error': {'message': e}}
+ return {"created": False, "error": {"message": e}}
except BotoServerError as e:
- return {'created': False, 'error': __utils__['boto.get_error'](e)}
+ return {"created": False, "error": __utils__["boto.get_error"](e)}
-def _delete_resource(resource, name=None, resource_id=None, region=None,
- key=None, keyid=None, profile=None, **kwargs):
- '''
+def _delete_resource(
+ resource,
+ name=None,
+ resource_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ **kwargs
+):
+ """
Delete a VPC resource. Returns True if successful, otherwise False.
- '''
+ """
if not _exactly_one((name, resource_id)):
- raise SaltInvocationError('One (but not both) of name or id must be '
- 'provided.')
+ raise SaltInvocationError(
+ "One (but not both) of name or id must be " "provided."
+ )
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
- delete_resource = getattr(conn, 'delete_' + resource)
+ delete_resource = getattr(conn, "delete_" + resource)
except AttributeError:
- raise AttributeError('{0} function does not exist for boto VPC '
- 'connection.'.format('delete_' + resource))
+ raise AttributeError(
+ "{0} function does not exist for boto VPC "
+ "connection.".format("delete_" + resource)
+ )
if name:
- resource_id = _get_resource_id(resource, name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ resource_id = _get_resource_id(
+ resource, name, region=region, key=key, keyid=keyid, profile=profile
+ )
if not resource_id:
- return {'deleted': False, 'error': {'message':
- '{0} {1} does not exist.'.format(resource, name)}}
+ return {
+ "deleted": False,
+ "error": {
+ "message": "{0} {1} does not exist.".format(resource, name)
+ },
+ }
if delete_resource(resource_id, **kwargs):
- _cache_id(name, sub_resource=resource,
- resource_id=resource_id,
- invalidate=True,
- region=region,
- key=key, keyid=keyid,
- profile=profile)
- return {'deleted': True}
+ _cache_id(
+ name,
+ sub_resource=resource,
+ resource_id=resource_id,
+ invalidate=True,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ return {"deleted": True}
else:
if name:
- e = '{0} {1} was not deleted.'.format(resource, name)
+ e = "{0} {1} was not deleted.".format(resource, name)
else:
- e = '{0} was not deleted.'.format(resource)
- return {'deleted': False, 'error': {'message': e}}
+ e = "{0} was not deleted.".format(resource)
+ return {"deleted": False, "error": {"message": e}}
except BotoServerError as e:
- return {'deleted': False, 'error': __utils__['boto.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto.get_error"](e)}
-def _get_resource(resource, name=None, resource_id=None, region=None,
- key=None, keyid=None, profile=None):
- '''
+def _get_resource(
+ resource,
+ name=None,
+ resource_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Get a VPC resource based on resource type and name or id.
Cache the id if name was provided.
- '''
+ """
if not _exactly_one((name, resource_id)):
- raise SaltInvocationError('One (but not both) of name or id must be '
- 'provided.')
+ raise SaltInvocationError(
+ "One (but not both) of name or id must be " "provided."
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- f = 'get_all_{0}'.format(resource)
- if not f.endswith('s'):
- f = f + 's'
+ f = "get_all_{0}".format(resource)
+ if not f.endswith("s"):
+ f = f + "s"
get_resources = getattr(conn, f)
filter_parameters = {}
if name:
- filter_parameters['filters'] = {'tag:Name': name}
+ filter_parameters["filters"] = {"tag:Name": name}
if resource_id:
- filter_parameters['{0}_ids'.format(resource)] = resource_id
+ filter_parameters["{0}_ids".format(resource)] = resource_id
try:
r = get_resources(**filter_parameters)
except BotoServerError as e:
- if e.code.endswith('.NotFound'):
+ if e.code.endswith(".NotFound"):
return None
raise
if r:
if len(r) == 1:
if name:
- _cache_id(name, sub_resource=resource,
- resource_id=r[0].id,
- region=region,
- key=key, keyid=keyid,
- profile=profile)
+ _cache_id(
+ name,
+ sub_resource=resource,
+ resource_id=r[0].id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
return r[0]
else:
- raise CommandExecutionError('Found more than one '
- '{0} named "{1}"'.format(
- resource, name))
+ raise CommandExecutionError(
+ "Found more than one " '{0} named "{1}"'.format(resource, name)
+ )
else:
return None
-def _find_resources(resource, name=None, resource_id=None, tags=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def _find_resources(
+ resource,
+ name=None,
+ resource_id=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Get VPC resources based on resource type and name, id, or tags.
- '''
+ """
if all((resource_id, name)):
- raise SaltInvocationError('Only one of name or id may be '
- 'provided.')
+ raise SaltInvocationError("Only one of name or id may be " "provided.")
if not any((resource_id, name, tags)):
- raise SaltInvocationError('At least one of the following must be '
- 'provided: id, name, or tags.')
+ raise SaltInvocationError(
+ "At least one of the following must be " "provided: id, name, or tags."
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- f = 'get_all_{0}'.format(resource)
- if not f.endswith('s'):
- f = f + 's'
+ f = "get_all_{0}".format(resource)
+ if not f.endswith("s"):
+ f = f + "s"
get_resources = getattr(conn, f)
filter_parameters = {}
if name:
- filter_parameters['filters'] = {'tag:Name': name}
+ filter_parameters["filters"] = {"tag:Name": name}
if resource_id:
- filter_parameters['{0}_ids'.format(resource)] = resource_id
+ filter_parameters["{0}_ids".format(resource)] = resource_id
if tags:
for tag_name, tag_value in six.iteritems(tags):
- filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
+ filter_parameters["filters"]["tag:{0}".format(tag_name)] = tag_value
try:
r = get_resources(**filter_parameters)
except BotoServerError as e:
- if e.code.endswith('.NotFound'):
+ if e.code.endswith(".NotFound"):
return None
raise
return r
-def _get_resource_id(resource, name, region=None, key=None,
- keyid=None, profile=None):
- '''
+def _get_resource_id(resource, name, region=None, key=None, keyid=None, profile=None):
+ """
Get an AWS id for a VPC resource by type and name.
- '''
+ """
- _id = _cache_id(name, sub_resource=resource,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ _id = _cache_id(
+ name,
+ sub_resource=resource,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if _id:
return _id
- r = _get_resource(resource, name=name, region=region, key=key,
- keyid=keyid, profile=profile)
+ r = _get_resource(
+ resource, name=name, region=region, key=key, keyid=keyid, profile=profile
+ )
if r:
return r.id
-def get_resource_id(resource, name=None, resource_id=None, region=None,
- key=None, keyid=None, profile=None):
- '''
+def get_resource_id(
+ resource,
+ name=None,
+ resource_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Get an AWS id for a VPC resource by type and name.
.. versionadded:: 2015.8.0
@@ -441,18 +519,29 @@ def get_resource_id(resource, name=None, resource_id=None, region=None,
salt myminion boto_vpc.get_resource_id internet_gateway myigw
- '''
+ """
try:
- return {'id': _get_resource_id(resource, name, region=region, key=key,
- keyid=keyid, profile=profile)}
+ return {
+ "id": _get_resource_id(
+ resource, name, region=region, key=key, keyid=keyid, profile=profile
+ )
+ }
except BotoServerError as e:
- return {'error': __utils__['boto.get_error'](e)}
+ return {"error": __utils__["boto.get_error"](e)}
-def resource_exists(resource, name=None, resource_id=None, tags=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def resource_exists(
+ resource,
+ name=None,
+ resource_id=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a resource type and name, return {exists: true} if it exists,
{exists: false} if it does not exist, or {error: {message: error text}
on error.
@@ -465,52 +554,71 @@ def resource_exists(resource, name=None, resource_id=None, tags=None,
salt myminion boto_vpc.resource_exists internet_gateway myigw
- '''
+ """
try:
- return {'exists': bool(_find_resources(resource, name=name,
- resource_id=resource_id,
- tags=tags, region=region,
- key=key, keyid=keyid,
- profile=profile))}
+ return {
+ "exists": bool(
+ _find_resources(
+ resource,
+ name=name,
+ resource_id=resource_id,
+ tags=tags,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ )
+ }
except BotoServerError as e:
- return {'error': __utils__['boto.get_error'](e)}
+ return {"error": __utils__["boto.get_error"](e)}
-def _find_vpcs(vpc_id=None, vpc_name=None, cidr=None, tags=None,
- region=None, key=None, keyid=None, profile=None):
+def _find_vpcs(
+ vpc_id=None,
+ vpc_name=None,
+ cidr=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
- '''
+ """
Given VPC properties, find and return matching VPC ids.
- '''
+ """
if all((vpc_id, vpc_name)):
- raise SaltInvocationError('Only one of vpc_name or vpc_id may be '
- 'provided.')
+ raise SaltInvocationError("Only one of vpc_name or vpc_id may be " "provided.")
if not any((vpc_id, vpc_name, tags, cidr)):
- raise SaltInvocationError('At least one of the following must be '
- 'provided: vpc_id, vpc_name, cidr or tags.')
+ raise SaltInvocationError(
+ "At least one of the following must be "
+ "provided: vpc_id, vpc_name, cidr or tags."
+ )
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- filter_parameters = {'filters': {}}
+ filter_parameters = {"filters": {}}
if vpc_id:
- filter_parameters['vpc_ids'] = [vpc_id]
+ filter_parameters["vpc_ids"] = [vpc_id]
if cidr:
- filter_parameters['filters']['cidr'] = cidr
+ filter_parameters["filters"]["cidr"] = cidr
if vpc_name:
- filter_parameters['filters']['tag:Name'] = vpc_name
+ filter_parameters["filters"]["tag:Name"] = vpc_name
if tags:
for tag_name, tag_value in six.iteritems(tags):
- filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
+ filter_parameters["filters"]["tag:{0}".format(tag_name)] = tag_value
vpcs = conn.get_all_vpcs(**filter_parameters)
- log.debug('The filters criteria %s matched the following VPCs:%s',
- filter_parameters, vpcs)
+ log.debug(
+ "The filters criteria %s matched the following VPCs:%s", filter_parameters, vpcs
+ )
if vpcs:
return [vpc.id for vpc in vpcs]
@@ -518,40 +626,56 @@ def _find_vpcs(vpc_id=None, vpc_name=None, cidr=None, tags=None,
return []
-def _get_id(vpc_name=None, cidr=None, tags=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def _get_id(
+ vpc_name=None, cidr=None, tags=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Given VPC properties, return the VPC id if a match is found.
- '''
+ """
if vpc_name and not any((cidr, tags)):
- vpc_id = _cache_id(vpc_name, region=region,
- key=key, keyid=keyid,
- profile=profile)
+ vpc_id = _cache_id(
+ vpc_name, region=region, key=key, keyid=keyid, profile=profile
+ )
if vpc_id:
return vpc_id
- vpc_ids = _find_vpcs(vpc_name=vpc_name, cidr=cidr, tags=tags, region=region,
- key=key, keyid=keyid, profile=profile)
+ vpc_ids = _find_vpcs(
+ vpc_name=vpc_name,
+ cidr=cidr,
+ tags=tags,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if vpc_ids:
log.debug("Matching VPC: %s", " ".join(vpc_ids))
if len(vpc_ids) == 1:
vpc_id = vpc_ids[0]
if vpc_name:
- _cache_id(vpc_name, vpc_id,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ _cache_id(
+ vpc_name,
+ vpc_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
return vpc_id
else:
- raise CommandExecutionError('Found more than one VPC matching the criteria.')
+ raise CommandExecutionError(
+ "Found more than one VPC matching the criteria."
+ )
else:
- log.info('No VPC found.')
+ log.info("No VPC found.")
return None
-def get_id(name=None, cidr=None, tags=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def get_id(
+ name=None, cidr=None, tags=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Given VPC properties, return the VPC id if a match is found.
CLI Example:
@@ -560,18 +684,35 @@ def get_id(name=None, cidr=None, tags=None, region=None, key=None, keyid=None,
salt myminion boto_vpc.get_id myvpc
- '''
+ """
try:
- return {'id': _get_id(vpc_name=name, cidr=cidr, tags=tags, region=region,
- key=key, keyid=keyid, profile=profile)}
+ return {
+ "id": _get_id(
+ vpc_name=name,
+ cidr=cidr,
+ tags=tags,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ }
except BotoServerError as e:
- return {'error': __utils__['boto.get_error'](e)}
+ return {"error": __utils__["boto.get_error"](e)}
-def exists(vpc_id=None, name=None, cidr=None, tags=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def exists(
+ vpc_id=None,
+ name=None,
+ cidr=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a VPC ID, check to see if the given VPC ID exists.
Returns True if the given VPC ID exists and returns False if the given
@@ -583,25 +724,42 @@ def exists(vpc_id=None, name=None, cidr=None, tags=None, region=None, key=None,
salt myminion boto_vpc.exists myvpc
- '''
+ """
try:
- vpc_ids = _find_vpcs(vpc_id=vpc_id, vpc_name=name, cidr=cidr, tags=tags,
- region=region, key=key, keyid=keyid, profile=profile)
+ vpc_ids = _find_vpcs(
+ vpc_id=vpc_id,
+ vpc_name=name,
+ cidr=cidr,
+ tags=tags,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
except BotoServerError as err:
- boto_err = __utils__['boto.get_error'](err)
- if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound':
+ boto_err = __utils__["boto.get_error"](err)
+ if boto_err.get("aws", {}).get("code") == "InvalidVpcID.NotFound":
# VPC was not found: handle the error and return False.
- return {'exists': False}
- return {'error': boto_err}
+ return {"exists": False}
+ return {"error": boto_err}
- return {'exists': bool(vpc_ids)}
+ return {"exists": bool(vpc_ids)}
-def create(cidr_block, instance_tenancy=None, vpc_name=None,
- enable_dns_support=None, enable_dns_hostnames=None, tags=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create(
+ cidr_block,
+ instance_tenancy=None,
+ vpc_name=None,
+ enable_dns_support=None,
+ enable_dns_hostnames=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid CIDR block, create a VPC.
An optional instance_tenancy argument can be provided. If provided, the
@@ -618,33 +776,46 @@ def create(cidr_block, instance_tenancy=None, vpc_name=None,
salt myminion boto_vpc.create '10.0.0.0/24'
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
vpc = conn.create_vpc(cidr_block, instance_tenancy=instance_tenancy)
if vpc:
- log.info('The newly created VPC id is %s', vpc.id)
+ log.info("The newly created VPC id is %s", vpc.id)
_maybe_set_name_tag(vpc_name, vpc)
_maybe_set_tags(tags, vpc)
_maybe_set_dns(conn, vpc.id, enable_dns_support, enable_dns_hostnames)
_maybe_name_route_table(conn, vpc.id, vpc_name)
if vpc_name:
- _cache_id(vpc_name, vpc.id,
- region=region, key=key,
- keyid=keyid, profile=profile)
- return {'created': True, 'id': vpc.id}
+ _cache_id(
+ vpc_name,
+ vpc.id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ return {"created": True, "id": vpc.id}
else:
- log.warning('VPC was not created')
- return {'created': False}
+ log.warning("VPC was not created")
+ return {"created": False}
except BotoServerError as e:
- return {'created': False, 'error': __utils__['boto.get_error'](e)}
+ return {"created": False, "error": __utils__["boto.get_error"](e)}
-def delete(vpc_id=None, name=None, vpc_name=None, tags=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete(
+ vpc_id=None,
+ name=None,
+ vpc_name=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a VPC ID or VPC name, delete the VPC.
Returns {deleted: true} if the VPC was deleted and returns
@@ -657,44 +828,59 @@ def delete(vpc_id=None, name=None, vpc_name=None, tags=None,
salt myminion boto_vpc.delete vpc_id='vpc-6b1fe402'
salt myminion boto_vpc.delete name='myvpc'
- '''
+ """
if name:
- log.warning('boto_vpc.delete: name parameter is deprecated '
- 'use vpc_name instead.')
+ log.warning(
+ "boto_vpc.delete: name parameter is deprecated " "use vpc_name instead."
+ )
vpc_name = name
if not _exactly_one((vpc_name, vpc_id)):
- raise SaltInvocationError('One (but not both) of vpc_name or vpc_id must be '
- 'provided.')
+ raise SaltInvocationError(
+ "One (but not both) of vpc_name or vpc_id must be " "provided."
+ )
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not vpc_id:
- vpc_id = _get_id(vpc_name=vpc_name, tags=tags, region=region, key=key,
- keyid=keyid, profile=profile)
+ vpc_id = _get_id(
+ vpc_name=vpc_name,
+ tags=tags,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not vpc_id:
- return {'deleted': False, 'error': {'message':
- 'VPC {0} not found'.format(vpc_name)}}
+ return {
+ "deleted": False,
+ "error": {"message": "VPC {0} not found".format(vpc_name)},
+ }
if conn.delete_vpc(vpc_id):
- log.info('VPC %s was deleted.', vpc_id)
+ log.info("VPC %s was deleted.", vpc_id)
if vpc_name:
- _cache_id(vpc_name, resource_id=vpc_id,
- invalidate=True,
- region=region,
- key=key, keyid=keyid,
- profile=profile)
- return {'deleted': True}
+ _cache_id(
+ vpc_name,
+ resource_id=vpc_id,
+ invalidate=True,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ return {"deleted": True}
else:
- log.warning('VPC %s was not deleted.', vpc_id)
- return {'deleted': False}
+ log.warning("VPC %s was not deleted.", vpc_id)
+ return {"deleted": False}
except BotoServerError as e:
- return {'deleted': False, 'error': __utils__['boto.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto.get_error"](e)}
-def describe(vpc_id=None, vpc_name=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def describe(
+ vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Given a VPC ID describe its properties.
Returns a dictionary of interesting properties.
@@ -709,47 +895,62 @@ def describe(vpc_id=None, vpc_name=None, region=None, key=None,
salt myminion boto_vpc.describe vpc_id=vpc-123456
salt myminion boto_vpc.describe vpc_name=myvpc
- '''
+ """
if not any((vpc_id, vpc_name)):
- raise SaltInvocationError('A valid vpc id or name needs to be specified.')
+ raise SaltInvocationError("A valid vpc id or name needs to be specified.")
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
except BotoServerError as err:
- boto_err = __utils__['boto.get_error'](err)
- if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound':
+ boto_err = __utils__["boto.get_error"](err)
+ if boto_err.get("aws", {}).get("code") == "InvalidVpcID.NotFound":
# VPC was not found: handle the error and return None.
- return {'vpc': None}
- return {'error': boto_err}
+ return {"vpc": None}
+ return {"error": boto_err}
if not vpc_id:
- return {'vpc': None}
+ return {"vpc": None}
- filter_parameters = {'vpc_ids': vpc_id}
+ filter_parameters = {"vpc_ids": vpc_id}
try:
vpcs = conn.get_all_vpcs(**filter_parameters)
except BotoServerError as err:
- return {'error': __utils__['boto.get_error'](err)}
+ return {"error": __utils__["boto.get_error"](err)}
if vpcs:
vpc = vpcs[0] # Found!
- log.debug('Found VPC: %s', vpc.id)
+ log.debug("Found VPC: %s", vpc.id)
- keys = ('id', 'cidr_block', 'is_default', 'state', 'tags',
- 'dhcp_options_id', 'instance_tenancy')
+ keys = (
+ "id",
+ "cidr_block",
+ "is_default",
+ "state",
+ "tags",
+ "dhcp_options_id",
+ "instance_tenancy",
+ )
_r = dict([(k, getattr(vpc, k)) for k in keys])
- _r.update({'region': getattr(vpc, 'region').name})
- return {'vpc': _r}
+ _r.update({"region": getattr(vpc, "region").name})
+ return {"vpc": _r}
else:
- return {'vpc': None}
+ return {"vpc": None}
-def describe_vpcs(vpc_id=None, name=None, cidr=None, tags=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe_vpcs(
+ vpc_id=None,
+ name=None,
+ cidr=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Describe all VPCs, matching the filter criteria if provided.
Returns a list of dictionaries with interesting properties.
@@ -762,32 +963,34 @@ def describe_vpcs(vpc_id=None, name=None, cidr=None, tags=None,
salt myminion boto_vpc.describe_vpcs
- '''
+ """
- keys = ('id',
- 'cidr_block',
- 'is_default',
- 'state',
- 'tags',
- 'dhcp_options_id',
- 'instance_tenancy')
+ keys = (
+ "id",
+ "cidr_block",
+ "is_default",
+ "state",
+ "tags",
+ "dhcp_options_id",
+ "instance_tenancy",
+ )
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- filter_parameters = {'filters': {}}
+ filter_parameters = {"filters": {}}
if vpc_id:
- filter_parameters['vpc_ids'] = [vpc_id]
+ filter_parameters["vpc_ids"] = [vpc_id]
if cidr:
- filter_parameters['filters']['cidr'] = cidr
+ filter_parameters["filters"]["cidr"] = cidr
if name:
- filter_parameters['filters']['tag:Name'] = name
+ filter_parameters["filters"]["tag:Name"] = name
if tags:
for tag_name, tag_value in six.iteritems(tags):
- filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
+ filter_parameters["filters"]["tag:{0}".format(tag_name)] = tag_value
vpcs = conn.get_all_vpcs(**filter_parameters)
@@ -795,43 +998,48 @@ def describe_vpcs(vpc_id=None, name=None, cidr=None, tags=None,
ret = []
for vpc in vpcs:
_r = dict([(k, getattr(vpc, k)) for k in keys])
- _r.update({'region': getattr(vpc, 'region').name})
+ _r.update({"region": getattr(vpc, "region").name})
ret.append(_r)
- return {'vpcs': ret}
+ return {"vpcs": ret}
else:
- return {'vpcs': []}
+ return {"vpcs": []}
except BotoServerError as e:
- return {'error': __utils__['boto.get_error'](e)}
+ return {"error": __utils__["boto.get_error"](e)}
def _find_subnets(subnet_name=None, vpc_id=None, cidr=None, tags=None, conn=None):
- '''
+ """
Given subnet properties, find and return matching subnet ids
- '''
+ """
if not any([subnet_name, tags, cidr]):
- raise SaltInvocationError('At least one of the following must be '
- 'specified: subnet_name, cidr or tags.')
+ raise SaltInvocationError(
+ "At least one of the following must be "
+ "specified: subnet_name, cidr or tags."
+ )
- filter_parameters = {'filters': {}}
+ filter_parameters = {"filters": {}}
if cidr:
- filter_parameters['filters']['cidr'] = cidr
+ filter_parameters["filters"]["cidr"] = cidr
if subnet_name:
- filter_parameters['filters']['tag:Name'] = subnet_name
+ filter_parameters["filters"]["tag:Name"] = subnet_name
if vpc_id:
- filter_parameters['filters']['VpcId'] = vpc_id
+ filter_parameters["filters"]["VpcId"] = vpc_id
if tags:
for tag_name, tag_value in six.iteritems(tags):
- filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
+ filter_parameters["filters"]["tag:{0}".format(tag_name)] = tag_value
subnets = conn.get_all_subnets(**filter_parameters)
- log.debug('The filters criteria %s matched the following subnets: %s',
- filter_parameters, subnets)
+ log.debug(
+ "The filters criteria %s matched the following subnets: %s",
+ filter_parameters,
+ subnets,
+ )
if subnets:
return [subnet.id for subnet in subnets]
@@ -839,10 +1047,20 @@ def _find_subnets(subnet_name=None, vpc_id=None, cidr=None, tags=None, conn=None
return False
-def create_subnet(vpc_id=None, cidr_block=None, vpc_name=None,
- availability_zone=None, subnet_name=None, tags=None,
- region=None, key=None, keyid=None, profile=None, auto_assign_public_ipv4=False):
- '''
+def create_subnet(
+ vpc_id=None,
+ cidr_block=None,
+ vpc_name=None,
+ availability_zone=None,
+ subnet_name=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ auto_assign_public_ipv4=False,
+):
+ """
Given a valid VPC ID or Name and a CIDR block, create a subnet for the VPC.
An optional availability zone argument can be provided.
@@ -860,29 +1078,45 @@ def create_subnet(vpc_id=None, cidr_block=None, vpc_name=None,
subnet_name='mysubnet' cidr_block='10.0.0.0/25'
salt myminion boto_vpc.create_subnet vpc_name='myvpc' \\
subnet_name='mysubnet', cidr_block='10.0.0.0/25'
- '''
+ """
try:
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
if not vpc_id:
- return {'created': False, 'error': {'message': 'VPC {0} does not exist.'.format(vpc_name or vpc_id)}}
+ return {
+ "created": False,
+ "error": {
+ "message": "VPC {0} does not exist.".format(vpc_name or vpc_id)
+ },
+ }
except BotoServerError as e:
- return {'created': False, 'error': __utils__['boto.get_error'](e)}
+ return {"created": False, "error": __utils__["boto.get_error"](e)}
- subnet_object_dict = _create_resource('subnet', name=subnet_name, tags=tags, vpc_id=vpc_id,
- availability_zone=availability_zone,
- cidr_block=cidr_block, region=region, key=key,
- keyid=keyid, profile=profile)
+ subnet_object_dict = _create_resource(
+ "subnet",
+ name=subnet_name,
+ tags=tags,
+ vpc_id=vpc_id,
+ availability_zone=availability_zone,
+ cidr_block=cidr_block,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
# if auto_assign_public_ipv4 is requested set that to true using boto3
if auto_assign_public_ipv4:
conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
- conn3.modify_subnet_attribute(MapPublicIpOnLaunch={'Value': True}, SubnetId=subnet_object_dict['id'])
+ conn3.modify_subnet_attribute(
+ MapPublicIpOnLaunch={"Value": True}, SubnetId=subnet_object_dict["id"]
+ )
return subnet_object_dict
-def delete_subnet(subnet_id=None, subnet_name=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def delete_subnet(
+ subnet_id=None, subnet_name=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Given a subnet ID or name, delete the subnet.
Returns True if the subnet was deleted and returns False if the subnet was not deleted.
@@ -896,17 +1130,32 @@ def delete_subnet(subnet_id=None, subnet_name=None, region=None, key=None,
salt myminion boto_vpc.delete_subnet 'subnet-6a1fe403'
- '''
+ """
- return _delete_resource(resource='subnet', name=subnet_name,
- resource_id=subnet_id, region=region, key=key,
- keyid=keyid, profile=profile)
+ return _delete_resource(
+ resource="subnet",
+ name=subnet_name,
+ resource_id=subnet_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
-def subnet_exists(subnet_id=None, name=None, subnet_name=None, cidr=None,
- tags=None, zones=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def subnet_exists(
+ subnet_id=None,
+ name=None,
+ subnet_name=None,
+ cidr=None,
+ tags=None,
+ zones=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Check if a subnet exists.
Returns True if the subnet exists, otherwise returns False.
@@ -921,57 +1170,63 @@ def subnet_exists(subnet_id=None, name=None, subnet_name=None, cidr=None,
salt myminion boto_vpc.subnet_exists subnet_id='subnet-6a1fe403'
- '''
+ """
if name:
- log.warning('boto_vpc.subnet_exists: name parameter is deprecated '
- 'use subnet_name instead.')
+ log.warning(
+ "boto_vpc.subnet_exists: name parameter is deprecated "
+ "use subnet_name instead."
+ )
subnet_name = name
if not any((subnet_id, subnet_name, cidr, tags, zones)):
- raise SaltInvocationError('At least one of the following must be '
- 'specified: subnet id, cidr, subnet_name, '
- 'tags, or zones.')
+ raise SaltInvocationError(
+ "At least one of the following must be "
+ "specified: subnet id, cidr, subnet_name, "
+ "tags, or zones."
+ )
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
except BotoServerError as err:
- return {'error': __utils__['boto.get_error'](err)}
+ return {"error": __utils__["boto.get_error"](err)}
- filter_parameters = {'filters': {}}
+ filter_parameters = {"filters": {}}
if subnet_id:
- filter_parameters['subnet_ids'] = [subnet_id]
+ filter_parameters["subnet_ids"] = [subnet_id]
if subnet_name:
- filter_parameters['filters']['tag:Name'] = subnet_name
+ filter_parameters["filters"]["tag:Name"] = subnet_name
if cidr:
- filter_parameters['filters']['cidr'] = cidr
+ filter_parameters["filters"]["cidr"] = cidr
if tags:
for tag_name, tag_value in six.iteritems(tags):
- filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
+ filter_parameters["filters"]["tag:{0}".format(tag_name)] = tag_value
if zones:
- filter_parameters['filters']['availability_zone'] = zones
+ filter_parameters["filters"]["availability_zone"] = zones
try:
subnets = conn.get_all_subnets(**filter_parameters)
except BotoServerError as err:
- boto_err = __utils__['boto.get_error'](err)
- if boto_err.get('aws', {}).get('code') == 'InvalidSubnetID.NotFound':
+ boto_err = __utils__["boto.get_error"](err)
+ if boto_err.get("aws", {}).get("code") == "InvalidSubnetID.NotFound":
# Subnet was not found: handle the error and return False.
- return {'exists': False}
- return {'error': boto_err}
+ return {"exists": False}
+ return {"error": boto_err}
- log.debug('The filters criteria %s matched the following subnets:%s',
- filter_parameters, subnets)
+ log.debug(
+ "The filters criteria %s matched the following subnets:%s",
+ filter_parameters,
+ subnets,
+ )
if subnets:
- log.info('Subnet %s exists.', subnet_name or subnet_id)
- return {'exists': True}
+ log.info("Subnet %s exists.", subnet_name or subnet_id)
+ return {"exists": True}
else:
- log.info('Subnet %s does not exist.', subnet_name or subnet_id)
- return {'exists': False}
+ log.info("Subnet %s does not exist.", subnet_name or subnet_id)
+ return {"exists": False}
-def get_subnet_association(subnets, region=None, key=None, keyid=None,
- profile=None):
- '''
+def get_subnet_association(subnets, region=None, key=None, keyid=None, profile=None):
+ """
Given a subnet (aka: a vpc zone identifier) or list of subnets, returns
vpc association.
@@ -989,35 +1244,37 @@ def get_subnet_association(subnets, region=None, key=None, keyid=None,
salt myminion boto_vpc.get_subnet_association ['subnet-61b47516','subnet-2cb9785b']
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
# subnet_ids=subnets can accept either a string or a list
subnets = conn.get_all_subnets(subnet_ids=subnets)
except BotoServerError as e:
- return {'error': __utils__['boto.get_error'](e)}
+ return {"error": __utils__["boto.get_error"](e)}
# using a set to store vpc_ids - the use of set prevents duplicate
# vpc_id values
vpc_ids = set()
for subnet in subnets:
- log.debug('examining subnet id: %s for vpc_id', subnet.id)
+ log.debug("examining subnet id: %s for vpc_id", subnet.id)
if subnet in subnets:
- log.debug('subnet id: %s is associated with vpc id: %s',
- subnet.id, subnet.vpc_id)
+ log.debug(
+ "subnet id: %s is associated with vpc id: %s", subnet.id, subnet.vpc_id
+ )
vpc_ids.add(subnet.vpc_id)
if not vpc_ids:
- return {'vpc_id': None}
+ return {"vpc_id": None}
elif len(vpc_ids) == 1:
- return {'vpc_id': vpc_ids.pop()}
+ return {"vpc_id": vpc_ids.pop()}
else:
- return {'vpc_ids': list(vpc_ids)}
+ return {"vpc_ids": list(vpc_ids)}
-def describe_subnet(subnet_id=None, subnet_name=None, region=None,
- key=None, keyid=None, profile=None):
- '''
+def describe_subnet(
+ subnet_id=None, subnet_name=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Given a subnet id or name, describe its properties.
Returns a dictionary of interesting properties.
@@ -1031,31 +1288,53 @@ def describe_subnet(subnet_id=None, subnet_name=None, region=None,
salt myminion boto_vpc.describe_subnet subnet_id=subnet-123456
salt myminion boto_vpc.describe_subnet subnet_name=mysubnet
- '''
+ """
try:
- subnet = _get_resource('subnet', name=subnet_name, resource_id=subnet_id,
- region=region, key=key, keyid=keyid, profile=profile)
+ subnet = _get_resource(
+ "subnet",
+ name=subnet_name,
+ resource_id=subnet_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
except BotoServerError as e:
- return {'error': __utils__['boto.get_error'](e)}
+ return {"error": __utils__["boto.get_error"](e)}
if not subnet:
- return {'subnet': None}
- log.debug('Found subnet: %s', subnet.id)
+ return {"subnet": None}
+ log.debug("Found subnet: %s", subnet.id)
- keys = ('id', 'cidr_block', 'availability_zone', 'tags', 'vpc_id')
- ret = {'subnet': dict((k, getattr(subnet, k)) for k in keys)}
- explicit_route_table_assoc = _get_subnet_explicit_route_table(ret['subnet']['id'],
- ret['subnet']['vpc_id'],
- conn=None, region=region,
- key=key, keyid=keyid, profile=profile)
+ keys = ("id", "cidr_block", "availability_zone", "tags", "vpc_id")
+ ret = {"subnet": dict((k, getattr(subnet, k)) for k in keys)}
+ explicit_route_table_assoc = _get_subnet_explicit_route_table(
+ ret["subnet"]["id"],
+ ret["subnet"]["vpc_id"],
+ conn=None,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if explicit_route_table_assoc:
- ret['subnet']['explicit_route_table_association_id'] = explicit_route_table_assoc
+ ret["subnet"][
+ "explicit_route_table_association_id"
+ ] = explicit_route_table_assoc
return ret
-def describe_subnets(subnet_ids=None, subnet_names=None, vpc_id=None, cidr=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe_subnets(
+ subnet_ids=None,
+ subnet_names=None,
+ vpc_id=None,
+ cidr=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a VPC ID or subnet CIDR, returns a list of associated subnets and
their details. Return all subnets if VPC ID or CIDR are not provided.
If a subnet id or CIDR is provided, only its associated subnet details will be
@@ -1081,49 +1360,63 @@ def describe_subnets(subnet_ids=None, subnet_names=None, vpc_id=None, cidr=None,
salt myminion boto_vpc.describe_subnets cidr=10.0.0.0/21
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- filter_parameters = {'filters': {}}
+ filter_parameters = {"filters": {}}
if vpc_id:
- filter_parameters['filters']['vpcId'] = vpc_id
+ filter_parameters["filters"]["vpcId"] = vpc_id
if cidr:
- filter_parameters['filters']['cidrBlock'] = cidr
+ filter_parameters["filters"]["cidrBlock"] = cidr
if subnet_names:
- filter_parameters['filters']['tag:Name'] = subnet_names
+ filter_parameters["filters"]["tag:Name"] = subnet_names
subnets = conn.get_all_subnets(subnet_ids=subnet_ids, **filter_parameters)
- log.debug('The filters criteria %s matched the following subnets: %s',
- filter_parameters, subnets)
+ log.debug(
+ "The filters criteria %s matched the following subnets: %s",
+ filter_parameters,
+ subnets,
+ )
if not subnets:
- return {'subnets': None}
+ return {"subnets": None}
subnets_list = []
- keys = ('id', 'cidr_block', 'availability_zone', 'tags', 'vpc_id')
+ keys = ("id", "cidr_block", "availability_zone", "tags", "vpc_id")
for item in subnets:
subnet = {}
for key in keys:
if hasattr(item, key):
subnet[key] = getattr(item, key)
- explicit_route_table_assoc = _get_subnet_explicit_route_table(subnet['id'], subnet['vpc_id'], conn=conn)
+ explicit_route_table_assoc = _get_subnet_explicit_route_table(
+ subnet["id"], subnet["vpc_id"], conn=conn
+ )
if explicit_route_table_assoc:
- subnet['explicit_route_table_association_id'] = explicit_route_table_assoc
+ subnet[
+ "explicit_route_table_association_id"
+ ] = explicit_route_table_assoc
subnets_list.append(subnet)
- return {'subnets': subnets_list}
+ return {"subnets": subnets_list}
except BotoServerError as e:
- return {'error': __utils__['boto.get_error'](e)}
+ return {"error": __utils__["boto.get_error"](e)}
-def create_internet_gateway(internet_gateway_name=None, vpc_id=None,
- vpc_name=None, tags=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def create_internet_gateway(
+ internet_gateway_name=None,
+ vpc_id=None,
+ vpc_name=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create an Internet Gateway, optionally attaching it to an existing VPC.
Returns the internet gateway id if the internet gateway was created and
@@ -1138,35 +1431,49 @@ def create_internet_gateway(internet_gateway_name=None, vpc_id=None,
salt myminion boto_vpc.create_internet_gateway \\
internet_gateway_name=myigw vpc_name=myvpc
- '''
+ """
try:
if vpc_id or vpc_name:
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
if not vpc_id:
- return {'created': False,
- 'error': {'message': 'VPC {0} does not exist.'.format(vpc_name or vpc_id)}}
+ return {
+ "created": False,
+ "error": {
+ "message": "VPC {0} does not exist.".format(vpc_name or vpc_id)
+ },
+ }
- r = _create_resource('internet_gateway', name=internet_gateway_name,
- tags=tags, region=region, key=key, keyid=keyid,
- profile=profile)
- if r.get('created') and vpc_id:
+ r = _create_resource(
+ "internet_gateway",
+ name=internet_gateway_name,
+ tags=tags,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ if r.get("created") and vpc_id:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- conn.attach_internet_gateway(r['id'], vpc_id)
+ conn.attach_internet_gateway(r["id"], vpc_id)
log.info(
- 'Attached internet gateway %s to VPC %s',
- r['id'], vpc_name or vpc_id
+ "Attached internet gateway %s to VPC %s", r["id"], vpc_name or vpc_id
)
return r
except BotoServerError as e:
- return {'created': False, 'error': __utils__['boto.get_error'](e)}
+ return {"created": False, "error": __utils__["boto.get_error"](e)}
-def delete_internet_gateway(internet_gateway_id=None,
- internet_gateway_name=None,
- detach=False, region=None,
- key=None, keyid=None, profile=None):
- '''
+def delete_internet_gateway(
+ internet_gateway_id=None,
+ internet_gateway_name=None,
+ detach=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Delete an internet gateway (by name or id).
Returns True if the internet gateway was deleted and otherwise False.
@@ -1180,88 +1487,128 @@ def delete_internet_gateway(internet_gateway_id=None,
salt myminion boto_vpc.delete_internet_gateway internet_gateway_id=igw-1a2b3c
salt myminion boto_vpc.delete_internet_gateway internet_gateway_name=myigw
- '''
+ """
try:
if internet_gateway_name:
- internet_gateway_id = _get_resource_id('internet_gateway',
- internet_gateway_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ internet_gateway_id = _get_resource_id(
+ "internet_gateway",
+ internet_gateway_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not internet_gateway_id:
- return {'deleted': False, 'error': {
- 'message': 'internet gateway {0} does not exist.'.format(
- internet_gateway_name)}}
+ return {
+ "deleted": False,
+ "error": {
+ "message": "internet gateway {0} does not exist.".format(
+ internet_gateway_name
+ )
+ },
+ }
if detach:
- igw = _get_resource('internet_gateway',
- resource_id=internet_gateway_id, region=region,
- key=key, keyid=keyid, profile=profile)
+ igw = _get_resource(
+ "internet_gateway",
+ resource_id=internet_gateway_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not igw:
- return {'deleted': False, 'error': {
- 'message': 'internet gateway {0} does not exist.'.format(
- internet_gateway_id)}}
+ return {
+ "deleted": False,
+ "error": {
+ "message": "internet gateway {0} does not exist.".format(
+ internet_gateway_id
+ )
+ },
+ }
if igw.attachments:
- conn = _get_conn(region=region, key=key, keyid=keyid,
- profile=profile)
- conn.detach_internet_gateway(internet_gateway_id,
- igw.attachments[0].vpc_id)
- return _delete_resource('internet_gateway',
- resource_id=internet_gateway_id,
- region=region, key=key, keyid=keyid,
- profile=profile)
+ conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
+ conn.detach_internet_gateway(
+ internet_gateway_id, igw.attachments[0].vpc_id
+ )
+ return _delete_resource(
+ "internet_gateway",
+ resource_id=internet_gateway_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
except BotoServerError as e:
- return {'deleted': False, 'error': __utils__['boto.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto.get_error"](e)}
-def _find_nat_gateways(nat_gateway_id=None, subnet_id=None, subnet_name=None, vpc_id=None, vpc_name=None,
- states=('pending', 'available'),
- region=None, key=None, keyid=None, profile=None):
- '''
+def _find_nat_gateways(
+ nat_gateway_id=None,
+ subnet_id=None,
+ subnet_name=None,
+ vpc_id=None,
+ vpc_name=None,
+ states=("pending", "available"),
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given gateway properties, find and return matching nat gateways
- '''
+ """
if not any((nat_gateway_id, subnet_id, subnet_name, vpc_id, vpc_name)):
- raise SaltInvocationError('At least one of the following must be '
- 'provided: nat_gateway_id, subnet_id, '
- 'subnet_name, vpc_id, or vpc_name.')
- filter_parameters = {'Filter': []}
+ raise SaltInvocationError(
+ "At least one of the following must be "
+ "provided: nat_gateway_id, subnet_id, "
+ "subnet_name, vpc_id, or vpc_name."
+ )
+ filter_parameters = {"Filter": []}
if nat_gateway_id:
- filter_parameters['NatGatewayIds'] = [nat_gateway_id]
+ filter_parameters["NatGatewayIds"] = [nat_gateway_id]
if subnet_name:
- subnet_id = _get_resource_id('subnet', subnet_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ subnet_id = _get_resource_id(
+ "subnet", subnet_name, region=region, key=key, keyid=keyid, profile=profile
+ )
if not subnet_id:
return False
if subnet_id:
- filter_parameters['Filter'].append({'Name': 'subnet-id', 'Values': [subnet_id]})
+ filter_parameters["Filter"].append({"Name": "subnet-id", "Values": [subnet_id]})
if vpc_name:
- vpc_id = _get_resource_id('vpc', vpc_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ vpc_id = _get_resource_id(
+ "vpc", vpc_name, region=region, key=key, keyid=keyid, profile=profile
+ )
if not vpc_id:
return False
if vpc_id:
- filter_parameters['Filter'].append({'Name': 'vpc-id', 'Values': [vpc_id]})
+ filter_parameters["Filter"].append({"Name": "vpc-id", "Values": [vpc_id]})
conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
nat_gateways = []
- for ret in __utils__['boto3.paged_call'](conn3.describe_nat_gateways,
- marker_flag='NextToken', marker_arg='NextToken',
- **filter_parameters):
- for gw in ret.get('NatGateways', []):
- if gw.get('State') in states:
+ for ret in __utils__["boto3.paged_call"](
+ conn3.describe_nat_gateways,
+ marker_flag="NextToken",
+ marker_arg="NextToken",
+ **filter_parameters
+ ):
+ for gw in ret.get("NatGateways", []):
+ if gw.get("State") in states:
nat_gateways.append(gw)
- log.debug('The filters criteria %s matched the following nat gateways: %s',
- filter_parameters, nat_gateways)
+ log.debug(
+ "The filters criteria %s matched the following nat gateways: %s",
+ filter_parameters,
+ nat_gateways,
+ )
if nat_gateways:
return nat_gateways
@@ -1269,11 +1616,19 @@ def _find_nat_gateways(nat_gateway_id=None, subnet_id=None, subnet_name=None, vp
return False
-def nat_gateway_exists(nat_gateway_id=None, subnet_id=None, subnet_name=None,
- vpc_id=None, vpc_name=None,
- states=('pending', 'available'),
- region=None, key=None, keyid=None, profile=None):
- '''
+def nat_gateway_exists(
+ nat_gateway_id=None,
+ subnet_id=None,
+ subnet_name=None,
+ vpc_id=None,
+ vpc_name=None,
+ states=("pending", "available"),
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Checks if a nat gateway exists.
This function requires boto3 to be installed.
@@ -1287,23 +1642,37 @@ def nat_gateway_exists(nat_gateway_id=None, subnet_id=None, subnet_name=None,
salt myminion boto_vpc.nat_gateway_exists nat_gateway_id='nat-03b02643b43216fe7'
salt myminion boto_vpc.nat_gateway_exists subnet_id='subnet-5b05942d'
- '''
+ """
- return bool(_find_nat_gateways(nat_gateway_id=nat_gateway_id,
- subnet_id=subnet_id,
- subnet_name=subnet_name,
- vpc_id=vpc_id,
- vpc_name=vpc_name,
- states=states,
- region=region, key=key, keyid=keyid,
- profile=profile))
+ return bool(
+ _find_nat_gateways(
+ nat_gateway_id=nat_gateway_id,
+ subnet_id=subnet_id,
+ subnet_name=subnet_name,
+ vpc_id=vpc_id,
+ vpc_name=vpc_name,
+ states=states,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ )
-def describe_nat_gateways(nat_gateway_id=None, subnet_id=None, subnet_name=None,
- vpc_id=None, vpc_name=None,
- states=('pending', 'available'),
- region=None, key=None, keyid=None, profile=None):
- '''
+def describe_nat_gateways(
+ nat_gateway_id=None,
+ subnet_id=None,
+ subnet_name=None,
+ vpc_id=None,
+ vpc_name=None,
+ states=("pending", "available"),
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Return a description of nat gateways matching the selection criteria
This function requires boto3 to be installed.
@@ -1315,22 +1684,32 @@ def describe_nat_gateways(nat_gateway_id=None, subnet_id=None, subnet_name=None,
salt myminion boto_vpc.describe_nat_gateways nat_gateway_id='nat-03b02643b43216fe7'
salt myminion boto_vpc.describe_nat_gateways subnet_id='subnet-5b05942d'
- '''
+ """
- return _find_nat_gateways(nat_gateway_id=nat_gateway_id,
- subnet_id=subnet_id,
- subnet_name=subnet_name,
- vpc_id=vpc_id,
- vpc_name=vpc_name,
- states=states,
- region=region, key=key, keyid=keyid,
- profile=profile)
+ return _find_nat_gateways(
+ nat_gateway_id=nat_gateway_id,
+ subnet_id=subnet_id,
+ subnet_name=subnet_name,
+ vpc_id=vpc_id,
+ vpc_name=vpc_name,
+ states=states,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
-def create_nat_gateway(subnet_id=None,
- subnet_name=None, allocation_id=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_nat_gateway(
+ subnet_id=None,
+ subnet_name=None,
+ allocation_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Create a NAT Gateway within an existing subnet. If allocation_id is
specified, the elastic IP address it references is associated with the
gateway. Otherwise, a new allocation_id is created and used.
@@ -1348,43 +1727,69 @@ def create_nat_gateway(subnet_id=None,
salt myminion boto_vpc.create_nat_gateway subnet_name=mysubnet
- '''
+ """
try:
if all((subnet_id, subnet_name)):
- raise SaltInvocationError('Only one of subnet_name or subnet_id may be '
- 'provided.')
+ raise SaltInvocationError(
+ "Only one of subnet_name or subnet_id may be " "provided."
+ )
if subnet_name:
- subnet_id = _get_resource_id('subnet', subnet_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ subnet_id = _get_resource_id(
+ "subnet",
+ subnet_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not subnet_id:
- return {'created': False,
- 'error': {'message': 'Subnet {0} does not exist.'.format(subnet_name)}}
+ return {
+ "created": False,
+ "error": {
+ "message": "Subnet {0} does not exist.".format(subnet_name)
+ },
+ }
else:
- if not _get_resource('subnet', resource_id=subnet_id,
- region=region, key=key, keyid=keyid, profile=profile):
- return {'created': False,
- 'error': {'message': 'Subnet {0} does not exist.'.format(subnet_id)}}
+ if not _get_resource(
+ "subnet",
+ resource_id=subnet_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ):
+ return {
+ "created": False,
+ "error": {
+ "message": "Subnet {0} does not exist.".format(subnet_id)
+ },
+ }
conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
if not allocation_id:
- address = conn3.allocate_address(Domain='vpc')
- allocation_id = address.get('AllocationId')
+ address = conn3.allocate_address(Domain="vpc")
+ allocation_id = address.get("AllocationId")
# Have to go to boto3 to create NAT gateway
r = conn3.create_nat_gateway(SubnetId=subnet_id, AllocationId=allocation_id)
- return {'created': True, 'id': r.get('NatGateway', {}).get('NatGatewayId')}
+ return {"created": True, "id": r.get("NatGateway", {}).get("NatGatewayId")}
except BotoServerError as e:
- return {'created': False, 'error': __utils__['boto.get_error'](e)}
+ return {"created": False, "error": __utils__["boto.get_error"](e)}
-def delete_nat_gateway(nat_gateway_id,
- release_eips=False, region=None,
- key=None, keyid=None, profile=None,
- wait_for_delete=False, wait_for_delete_retries=5):
- '''
+def delete_nat_gateway(
+ nat_gateway_id,
+ release_eips=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ wait_for_delete=False,
+ wait_for_delete_retries=5,
+):
+ """
Delete a nat gateway (by id).
Returns True if the internet gateway was deleted and otherwise False.
@@ -1428,38 +1833,49 @@ def delete_nat_gateway(nat_gateway_id,
salt myminion boto_vpc.delete_nat_gateway nat_gateway_id=igw-1a2b3c
- '''
+ """
try:
conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
gwinfo = conn3.describe_nat_gateways(NatGatewayIds=[nat_gateway_id])
if gwinfo:
- gwinfo = gwinfo.get('NatGateways', [None])[0]
+ gwinfo = gwinfo.get("NatGateways", [None])[0]
conn3.delete_nat_gateway(NatGatewayId=nat_gateway_id)
# wait for deleting nat gateway to finish prior to attempt to release elastic ips
if wait_for_delete:
for retry in range(wait_for_delete_retries, 0, -1):
- if gwinfo and gwinfo['State'] not in ['deleted', 'failed']:
- time.sleep((2 ** (wait_for_delete_retries - retry)) + (random.randint(0, 1000) / 1000.0))
+ if gwinfo and gwinfo["State"] not in ["deleted", "failed"]:
+ time.sleep(
+ (2 ** (wait_for_delete_retries - retry))
+ + (random.randint(0, 1000) / 1000.0)
+ )
gwinfo = conn3.describe_nat_gateways(NatGatewayIds=[nat_gateway_id])
if gwinfo:
- gwinfo = gwinfo.get('NatGateways', [None])[0]
+ gwinfo = gwinfo.get("NatGateways", [None])[0]
continue
break
if release_eips and gwinfo:
- for addr in gwinfo.get('NatGatewayAddresses'):
- conn3.release_address(AllocationId=addr.get('AllocationId'))
- return {'deleted': True}
+ for addr in gwinfo.get("NatGatewayAddresses"):
+ conn3.release_address(AllocationId=addr.get("AllocationId"))
+ return {"deleted": True}
except BotoServerError as e:
- return {'deleted': False, 'error': __utils__['boto.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto.get_error"](e)}
-def create_customer_gateway(vpn_connection_type, ip_address, bgp_asn,
- customer_gateway_name=None, tags=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_customer_gateway(
+ vpn_connection_type,
+ ip_address,
+ bgp_asn,
+ customer_gateway_name=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a valid VPN connection type, a static IP address and a customer
gateway’s Border Gateway Protocol (BGP) Autonomous System Number,
create a customer gateway.
@@ -1473,18 +1889,31 @@ def create_customer_gateway(vpn_connection_type, ip_address, bgp_asn,
salt myminion boto_vpc.create_customer_gateway 'ipsec.1', '12.1.2.3', 65534
- '''
+ """
- return _create_resource('customer_gateway', customer_gateway_name,
- type=vpn_connection_type,
- ip_address=ip_address, bgp_asn=bgp_asn,
- tags=tags, region=region, key=key,
- keyid=keyid, profile=profile)
+ return _create_resource(
+ "customer_gateway",
+ customer_gateway_name,
+ type=vpn_connection_type,
+ ip_address=ip_address,
+ bgp_asn=bgp_asn,
+ tags=tags,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
-def delete_customer_gateway(customer_gateway_id=None, customer_gateway_name=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_customer_gateway(
+ customer_gateway_id=None,
+ customer_gateway_name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a customer gateway ID or name, delete the customer gateway.
Returns True if the customer gateway was deleted and returns False if the customer gateway was not deleted.
@@ -1498,18 +1927,28 @@ def delete_customer_gateway(customer_gateway_id=None, customer_gateway_name=None
salt myminion boto_vpc.delete_customer_gateway 'cgw-b6a247df'
- '''
+ """
- return _delete_resource(resource='customer_gateway',
- name=customer_gateway_name,
- resource_id=customer_gateway_id,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ return _delete_resource(
+ resource="customer_gateway",
+ name=customer_gateway_name,
+ resource_id=customer_gateway_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
-def customer_gateway_exists(customer_gateway_id=None, customer_gateway_name=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def customer_gateway_exists(
+ customer_gateway_id=None,
+ customer_gateway_name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a customer gateway ID, check if the customer gateway ID exists.
Returns True if the customer gateway ID exists; Returns False otherwise.
@@ -1521,18 +1960,35 @@ def customer_gateway_exists(customer_gateway_id=None, customer_gateway_name=None
salt myminion boto_vpc.customer_gateway_exists cgw-b6a247df
salt myminion boto_vpc.customer_gateway_exists customer_gatway_name=mycgw
- '''
+ """
- return resource_exists('customer_gateway', name=customer_gateway_name,
- resource_id=customer_gateway_id,
- region=region, key=key, keyid=keyid, profile=profile)
+ return resource_exists(
+ "customer_gateway",
+ name=customer_gateway_name,
+ resource_id=customer_gateway_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
-def create_dhcp_options(domain_name=None, domain_name_servers=None, ntp_servers=None,
- netbios_name_servers=None, netbios_node_type=None,
- dhcp_options_name=None, tags=None, vpc_id=None, vpc_name=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_dhcp_options(
+ domain_name=None,
+ domain_name_servers=None,
+ ntp_servers=None,
+ netbios_name_servers=None,
+ netbios_node_type=None,
+ dhcp_options_name=None,
+ tags=None,
+ vpc_id=None,
+ vpc_name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given valid DHCP options, create a DHCP options record, optionally associating it with
an existing VPC.
@@ -1550,36 +2006,50 @@ def create_dhcp_options(domain_name=None, domain_name_servers=None, ntp_servers=
netbios_name_servers='[10.0.0.1]' netbios_node_type=1 \\
vpc_name='myvpc'
- '''
+ """
try:
if vpc_id or vpc_name:
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
if not vpc_id:
- return {'created': False,
- 'error': {'message': 'VPC {0} does not exist.'.format(vpc_name or vpc_id)}}
+ return {
+ "created": False,
+ "error": {
+ "message": "VPC {0} does not exist.".format(vpc_name or vpc_id)
+ },
+ }
- r = _create_resource('dhcp_options', name=dhcp_options_name, domain_name=domain_name,
- domain_name_servers=domain_name_servers,
- ntp_servers=ntp_servers, netbios_name_servers=netbios_name_servers,
- netbios_node_type=netbios_node_type,
- region=region, key=key, keyid=keyid,
- profile=profile)
- if r.get('created') and vpc_id:
+ r = _create_resource(
+ "dhcp_options",
+ name=dhcp_options_name,
+ domain_name=domain_name,
+ domain_name_servers=domain_name_servers,
+ ntp_servers=ntp_servers,
+ netbios_name_servers=netbios_name_servers,
+ netbios_node_type=netbios_node_type,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
+ if r.get("created") and vpc_id:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- conn.associate_dhcp_options(r['id'], vpc_id)
- log.info(
- 'Associated options %s to VPC %s',
- r['id'], vpc_name or vpc_id
- )
+ conn.associate_dhcp_options(r["id"], vpc_id)
+ log.info("Associated options %s to VPC %s", r["id"], vpc_name or vpc_id)
return r
except BotoServerError as e:
- return {'created': False, 'error': __utils__['boto.get_error'](e)}
+ return {"created": False, "error": __utils__["boto.get_error"](e)}
-def get_dhcp_options(dhcp_options_name=None, dhcp_options_id=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def get_dhcp_options(
+ dhcp_options_name=None,
+ dhcp_options_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Return a dict with the current values of the requested DHCP options set
CLI Example:
@@ -1589,36 +2059,54 @@ def get_dhcp_options(dhcp_options_name=None, dhcp_options_id=None,
salt myminion boto_vpc.get_dhcp_options 'myfunnydhcpoptionsname'
.. versionadded:: 2016.3.0
- '''
+ """
if not any((dhcp_options_name, dhcp_options_id)):
- raise SaltInvocationError('At least one of the following must be specified: '
- 'dhcp_options_name, dhcp_options_id.')
+ raise SaltInvocationError(
+ "At least one of the following must be specified: "
+ "dhcp_options_name, dhcp_options_id."
+ )
if not dhcp_options_id and dhcp_options_name:
- dhcp_options_id = _get_resource_id('dhcp_options', dhcp_options_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ dhcp_options_id = _get_resource_id(
+ "dhcp_options",
+ dhcp_options_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not dhcp_options_id:
- return {'dhcp_options': {}}
+ return {"dhcp_options": {}}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = conn.get_all_dhcp_options(dhcp_options_ids=[dhcp_options_id])
except BotoServerError as e:
- return {'error': __utils__['boto.get_error'](e)}
+ return {"error": __utils__["boto.get_error"](e)}
if not r:
- return {'dhcp_options': None}
+ return {"dhcp_options": None}
- keys = ('domain_name', 'domain_name_servers', 'ntp_servers',
- 'netbios_name_servers', 'netbios_node_type')
+ keys = (
+ "domain_name",
+ "domain_name_servers",
+ "ntp_servers",
+ "netbios_name_servers",
+ "netbios_node_type",
+ )
- return {'dhcp_options': dict((k, r[0].options.get(k)) for k in keys)}
+ return {"dhcp_options": dict((k, r[0].options.get(k)) for k in keys)}
-def delete_dhcp_options(dhcp_options_id=None, dhcp_options_name=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_dhcp_options(
+ dhcp_options_id=None,
+ dhcp_options_name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Delete dhcp options by id or name.
.. versionadded:: 2015.8.0
@@ -1629,18 +2117,29 @@ def delete_dhcp_options(dhcp_options_id=None, dhcp_options_name=None,
salt myminion boto_vpc.delete_dhcp_options 'dopt-b6a247df'
- '''
+ """
- return _delete_resource(resource='dhcp_options',
- name=dhcp_options_name,
- resource_id=dhcp_options_id,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ return _delete_resource(
+ resource="dhcp_options",
+ name=dhcp_options_name,
+ resource_id=dhcp_options_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
-def associate_dhcp_options_to_vpc(dhcp_options_id, vpc_id=None, vpc_name=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def associate_dhcp_options_to_vpc(
+ dhcp_options_id,
+ vpc_id=None,
+ vpc_name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given valid DHCP options id and a valid VPC id, associate the DHCP options record with the VPC.
Returns True if the DHCP options record were associated and returns False if the DHCP options record was not associated.
@@ -1651,29 +2150,50 @@ def associate_dhcp_options_to_vpc(dhcp_options_id, vpc_id=None, vpc_name=None,
salt myminion boto_vpc.associate_dhcp_options_to_vpc 'dhcp-a0bl34pp' 'vpc-6b1fe402'
- '''
+ """
try:
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
if not vpc_id:
- return {'associated': False,
- 'error': {'message': 'VPC {0} does not exist.'.format(vpc_name or vpc_id)}}
+ return {
+ "associated": False,
+ "error": {
+ "message": "VPC {0} does not exist.".format(vpc_name or vpc_id)
+ },
+ }
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if conn.associate_dhcp_options(dhcp_options_id, vpc_id):
- log.info('DHCP options with id %s were associated with VPC %s',
- dhcp_options_id, vpc_id)
- return {'associated': True}
+ log.info(
+ "DHCP options with id %s were associated with VPC %s",
+ dhcp_options_id,
+ vpc_id,
+ )
+ return {"associated": True}
else:
- log.warning('DHCP options with id %s were not associated with VPC %s',
- dhcp_options_id, vpc_id)
- return {'associated': False, 'error': {'message': 'DHCP options could not be associated.'}}
+ log.warning(
+ "DHCP options with id %s were not associated with VPC %s",
+ dhcp_options_id,
+ vpc_id,
+ )
+ return {
+ "associated": False,
+ "error": {"message": "DHCP options could not be associated."},
+ }
except BotoServerError as e:
- return {'associated': False, 'error': __utils__['boto.get_error'](e)}
+ return {"associated": False, "error": __utils__["boto.get_error"](e)}
-def dhcp_options_exists(dhcp_options_id=None, name=None, dhcp_options_name=None,
- tags=None, region=None, key=None, keyid=None, profile=None):
- '''
+def dhcp_options_exists(
+ dhcp_options_id=None,
+ name=None,
+ dhcp_options_name=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Check if a dhcp option exists.
Returns True if the dhcp option exists; Returns False otherwise.
@@ -1684,23 +2204,40 @@ def dhcp_options_exists(dhcp_options_id=None, name=None, dhcp_options_name=None,
salt myminion boto_vpc.dhcp_options_exists dhcp_options_id='dhcp-a0bl34pp'
- '''
+ """
if name:
- log.warning('boto_vpc.dhcp_options_exists: name parameter is deprecated '
- 'use dhcp_options_name instead.')
+ log.warning(
+ "boto_vpc.dhcp_options_exists: name parameter is deprecated "
+ "use dhcp_options_name instead."
+ )
dhcp_options_name = name
- return resource_exists('dhcp_options', name=dhcp_options_name,
- resource_id=dhcp_options_id, tags=tags,
- region=region, key=key, keyid=keyid,
- profile=profile)
+ return resource_exists(
+ "dhcp_options",
+ name=dhcp_options_name,
+ resource_id=dhcp_options_id,
+ tags=tags,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
-def create_network_acl(vpc_id=None, vpc_name=None, network_acl_name=None,
- subnet_id=None, subnet_name=None, tags=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_network_acl(
+ vpc_id=None,
+ vpc_name=None,
+ network_acl_name=None,
+ subnet_id=None,
+ subnet_name=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a vpc_id, creates a network acl.
Returns the network acl id if successful, otherwise returns False.
@@ -1714,52 +2251,78 @@ def create_network_acl(vpc_id=None, vpc_name=None, network_acl_name=None,
salt myminion boto_vpc.create_network_acl 'vpc-6b1fe402'
- '''
+ """
_id = vpc_name or vpc_id
try:
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
except BotoServerError as e:
- return {'created': False, 'error': __utils__['boto.get_error'](e)}
+ return {"created": False, "error": __utils__["boto.get_error"](e)}
if not vpc_id:
- return {'created': False,
- 'error': {'message': 'VPC {0} does not exist.'.format(_id)}}
+ return {
+ "created": False,
+ "error": {"message": "VPC {0} does not exist.".format(_id)},
+ }
if all((subnet_id, subnet_name)):
- raise SaltInvocationError('Only one of subnet_name or subnet_id may be '
- 'provided.')
+ raise SaltInvocationError(
+ "Only one of subnet_name or subnet_id may be " "provided."
+ )
if subnet_name:
- subnet_id = _get_resource_id('subnet', subnet_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ subnet_id = _get_resource_id(
+ "subnet", subnet_name, region=region, key=key, keyid=keyid, profile=profile
+ )
if not subnet_id:
- return {'created': False,
- 'error': {'message': 'Subnet {0} does not exist.'.format(subnet_name)}}
+ return {
+ "created": False,
+ "error": {"message": "Subnet {0} does not exist.".format(subnet_name)},
+ }
elif subnet_id:
- if not _get_resource('subnet', resource_id=subnet_id,
- region=region, key=key, keyid=keyid, profile=profile):
- return {'created': False,
- 'error': {'message': 'Subnet {0} does not exist.'.format(subnet_id)}}
+ if not _get_resource(
+ "subnet",
+ resource_id=subnet_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ ):
+ return {
+ "created": False,
+ "error": {"message": "Subnet {0} does not exist.".format(subnet_id)},
+ }
- r = _create_resource('network_acl', name=network_acl_name, vpc_id=vpc_id,
- region=region, key=key, keyid=keyid,
- profile=profile)
+ r = _create_resource(
+ "network_acl",
+ name=network_acl_name,
+ vpc_id=vpc_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
- if r.get('created') and subnet_id:
+ if r.get("created") and subnet_id:
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- association_id = conn.associate_network_acl(r['id'], subnet_id)
+ association_id = conn.associate_network_acl(r["id"], subnet_id)
except BotoServerError as e:
- return {'created': False, 'error': __utils__['boto.get_error'](e)}
- r['association_id'] = association_id
+ return {"created": False, "error": __utils__["boto.get_error"](e)}
+ r["association_id"] = association_id
return r
-def delete_network_acl(network_acl_id=None, network_acl_name=None, disassociate=False,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_network_acl(
+ network_acl_id=None,
+ network_acl_name=None,
+ disassociate=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Delete a network acl based on the network_acl_id or network_acl_name provided.
CLI Examples:
@@ -1774,10 +2337,17 @@ def delete_network_acl(network_acl_id=None, network_acl_name=None, disassociate=
salt myminion boto_vpc.delete_network_acl network_acl_name='myacl' \\
disassociate=true
- '''
+ """
if disassociate:
- network_acl = _get_resource('network_acl', name=network_acl_name, region=region, key=key, keyid=keyid, profile=profile)
+ network_acl = _get_resource(
+ "network_acl",
+ name=network_acl_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if network_acl and network_acl.associations:
subnet_id = network_acl.associations[0].subnet_id
try:
@@ -1786,17 +2356,28 @@ def delete_network_acl(network_acl_id=None, network_acl_name=None, disassociate=
except BotoServerError:
pass
- return _delete_resource(resource='network_acl',
- name=network_acl_name,
- resource_id=network_acl_id,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ return _delete_resource(
+ resource="network_acl",
+ name=network_acl_name,
+ resource_id=network_acl_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
-def network_acl_exists(network_acl_id=None, name=None, network_acl_name=None,
- tags=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def network_acl_exists(
+ network_acl_id=None,
+ name=None,
+ network_acl_name=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Checks if a network acl exists.
Returns True if the network acl exists or returns False if it doesn't exist.
@@ -1806,24 +2387,38 @@ def network_acl_exists(network_acl_id=None, name=None, network_acl_name=None,
.. code-block:: bash
salt myminion boto_vpc.network_acl_exists network_acl_id='acl-5fb85d36'
- '''
+ """
if name:
- log.warning('boto_vpc.network_acl_exists: name parameter is deprecated '
- 'use network_acl_name instead.')
+ log.warning(
+ "boto_vpc.network_acl_exists: name parameter is deprecated "
+ "use network_acl_name instead."
+ )
network_acl_name = name
- return resource_exists('network_acl', name=network_acl_name,
- resource_id=network_acl_id, tags=tags,
- region=region, key=key, keyid=keyid,
- profile=profile)
+ return resource_exists(
+ "network_acl",
+ name=network_acl_name,
+ resource_id=network_acl_id,
+ tags=tags,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
-def associate_network_acl_to_subnet(network_acl_id=None, subnet_id=None,
- network_acl_name=None,
- subnet_name=None, region=None,
- key=None, keyid=None, profile=None):
- '''
+def associate_network_acl_to_subnet(
+ network_acl_id=None,
+ subnet_id=None,
+ network_acl_name=None,
+ subnet_name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a network acl and subnet ids or names, associate a network acl to a subnet.
CLI Example:
@@ -1838,41 +2433,71 @@ def associate_network_acl_to_subnet(network_acl_id=None, subnet_id=None,
salt myminion boto_vpc.associate_network_acl_to_subnet \\
network_acl_id='myacl' subnet_id='mysubnet'
- '''
+ """
if network_acl_name:
- network_acl_id = _get_resource_id('network_acl', network_acl_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ network_acl_id = _get_resource_id(
+ "network_acl",
+ network_acl_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not network_acl_id:
- return {'associated': False,
- 'error': {'message': 'Network ACL {0} does not exist.'.format(network_acl_name)}}
+ return {
+ "associated": False,
+ "error": {
+ "message": "Network ACL {0} does not exist.".format(
+ network_acl_name
+ )
+ },
+ }
if subnet_name:
- subnet_id = _get_resource_id('subnet', subnet_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ subnet_id = _get_resource_id(
+ "subnet", subnet_name, region=region, key=key, keyid=keyid, profile=profile
+ )
if not subnet_id:
- return {'associated': False,
- 'error': {'message': 'Subnet {0} does not exist.'.format(subnet_name)}}
+ return {
+ "associated": False,
+ "error": {"message": "Subnet {0} does not exist.".format(subnet_name)},
+ }
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
association_id = conn.associate_network_acl(network_acl_id, subnet_id)
if association_id:
- log.info('Network ACL with id %s was associated with subnet %s',
- network_acl_id, subnet_id)
+ log.info(
+ "Network ACL with id %s was associated with subnet %s",
+ network_acl_id,
+ subnet_id,
+ )
- return {'associated': True, 'id': association_id}
+ return {"associated": True, "id": association_id}
else:
- log.warning('Network ACL with id %s was not associated with subnet %s',
- network_acl_id, subnet_id)
- return {'associated': False, 'error': {'message': 'ACL could not be assocaited.'}}
+ log.warning(
+ "Network ACL with id %s was not associated with subnet %s",
+ network_acl_id,
+ subnet_id,
+ )
+ return {
+ "associated": False,
+ "error": {"message": "ACL could not be assocaited."},
+ }
except BotoServerError as e:
- return {'associated': False, 'error': __utils__['boto.get_error'](e)}
+ return {"associated": False, "error": __utils__["boto.get_error"](e)}
-def disassociate_network_acl(subnet_id=None, vpc_id=None, subnet_name=None, vpc_name=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def disassociate_network_acl(
+ subnet_id=None,
+ vpc_id=None,
+ subnet_name=None,
+ vpc_name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a subnet ID, disassociates a network acl.
CLI Example:
@@ -1881,62 +2506,97 @@ def disassociate_network_acl(subnet_id=None, vpc_id=None, subnet_name=None, vpc_
salt myminion boto_vpc.disassociate_network_acl 'subnet-6a1fe403'
- '''
+ """
if not _exactly_one((subnet_name, subnet_id)):
- raise SaltInvocationError('One (but not both) of subnet_id or subnet_name '
- 'must be provided.')
+ raise SaltInvocationError(
+ "One (but not both) of subnet_id or subnet_name " "must be provided."
+ )
if all((vpc_name, vpc_id)):
- raise SaltInvocationError('Only one of vpc_id or vpc_name '
- 'may be provided.')
+ raise SaltInvocationError("Only one of vpc_id or vpc_name " "may be provided.")
try:
if subnet_name:
- subnet_id = _get_resource_id('subnet', subnet_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ subnet_id = _get_resource_id(
+ "subnet",
+ subnet_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not subnet_id:
- return {'disassociated': False,
- 'error': {'message': 'Subnet {0} does not exist.'.format(subnet_name)}}
+ return {
+ "disassociated": False,
+ "error": {
+ "message": "Subnet {0} does not exist.".format(subnet_name)
+ },
+ }
if vpc_name or vpc_id:
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
association_id = conn.disassociate_network_acl(subnet_id, vpc_id=vpc_id)
- return {'disassociated': True, 'association_id': association_id}
+ return {"disassociated": True, "association_id": association_id}
except BotoServerError as e:
- return {'disassociated': False, 'error': __utils__['boto.get_error'](e)}
+ return {"disassociated": False, "error": __utils__["boto.get_error"](e)}
-def _create_network_acl_entry(network_acl_id=None, rule_number=None, protocol=None,
- rule_action=None, cidr_block=None, egress=None,
- network_acl_name=None, icmp_code=None, icmp_type=None,
- port_range_from=None, port_range_to=None, replace=False,
- region=None, key=None, keyid=None, profile=None):
+def _create_network_acl_entry(
+ network_acl_id=None,
+ rule_number=None,
+ protocol=None,
+ rule_action=None,
+ cidr_block=None,
+ egress=None,
+ network_acl_name=None,
+ icmp_code=None,
+ icmp_type=None,
+ port_range_from=None,
+ port_range_to=None,
+ replace=False,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
if replace:
- rkey = 'replaced'
+ rkey = "replaced"
else:
- rkey = 'created'
+ rkey = "created"
if not _exactly_one((network_acl_name, network_acl_id)):
- raise SaltInvocationError('One (but not both) of network_acl_id or '
- 'network_acl_name must be provided.')
+ raise SaltInvocationError(
+ "One (but not both) of network_acl_id or "
+ "network_acl_name must be provided."
+ )
- for v in ('rule_number', 'protocol', 'rule_action', 'cidr_block'):
+ for v in ("rule_number", "protocol", "rule_action", "cidr_block"):
if locals()[v] is None:
- raise SaltInvocationError('{0} is required.'.format(v))
+ raise SaltInvocationError("{0} is required.".format(v))
if network_acl_name:
- network_acl_id = _get_resource_id('network_acl', network_acl_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ network_acl_id = _get_resource_id(
+ "network_acl",
+ network_acl_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not network_acl_id:
- return {rkey: False,
- 'error': {'message': 'Network ACL {0} does not exist.'.format(network_acl_name or network_acl_id)}}
+ return {
+ rkey: False,
+ "error": {
+ "message": "Network ACL {0} does not exist.".format(
+ network_acl_name or network_acl_id
+ )
+ },
+ }
if isinstance(protocol, six.string_types):
- if protocol == 'all':
+ if protocol == "all":
protocol = -1
else:
try:
@@ -1949,25 +2609,45 @@ def _create_network_acl_entry(network_acl_id=None, rule_number=None, protocol=No
f = conn.replace_network_acl_entry
else:
f = conn.create_network_acl_entry
- created = f(network_acl_id, rule_number, protocol, rule_action,
- cidr_block, egress=egress, icmp_code=icmp_code,
- icmp_type=icmp_type, port_range_from=port_range_from,
- port_range_to=port_range_to)
+ created = f(
+ network_acl_id,
+ rule_number,
+ protocol,
+ rule_action,
+ cidr_block,
+ egress=egress,
+ icmp_code=icmp_code,
+ icmp_type=icmp_type,
+ port_range_from=port_range_from,
+ port_range_to=port_range_to,
+ )
if created:
- log.info('Network ACL entry was %s', rkey)
+ log.info("Network ACL entry was %s", rkey)
else:
- log.warning('Network ACL entry was not %s', rkey)
+ log.warning("Network ACL entry was not %s", rkey)
return {rkey: created}
except BotoServerError as e:
- return {rkey: False, 'error': __utils__['boto.get_error'](e)}
+ return {rkey: False, "error": __utils__["boto.get_error"](e)}
-def create_network_acl_entry(network_acl_id=None, rule_number=None, protocol=None,
- rule_action=None, cidr_block=None, egress=None,
- network_acl_name=None, icmp_code=None, icmp_type=None,
- port_range_from=None, port_range_to=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def create_network_acl_entry(
+ network_acl_id=None,
+ rule_number=None,
+ protocol=None,
+ rule_action=None,
+ cidr_block=None,
+ egress=None,
+ network_acl_name=None,
+ icmp_code=None,
+ icmp_type=None,
+ port_range_from=None,
+ port_range_to=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Creates a network acl entry.
CLI Example:
@@ -1977,18 +2657,30 @@ def create_network_acl_entry(network_acl_id=None, rule_number=None, protocol=Non
salt myminion boto_vpc.create_network_acl_entry 'acl-5fb85d36' '32767' \\
'all' 'deny' '0.0.0.0/0' egress=true
- '''
+ """
kwargs = locals()
return _create_network_acl_entry(**kwargs)
-def replace_network_acl_entry(network_acl_id=None, rule_number=None, protocol=None,
- rule_action=None, cidr_block=None, egress=None,
- network_acl_name=None, icmp_code=None, icmp_type=None,
- port_range_from=None, port_range_to=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def replace_network_acl_entry(
+ network_acl_id=None,
+ rule_number=None,
+ protocol=None,
+ rule_action=None,
+ cidr_block=None,
+ egress=None,
+ network_acl_name=None,
+ icmp_code=None,
+ icmp_type=None,
+ port_range_from=None,
+ port_range_to=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Replaces a network acl entry.
@@ -1999,16 +2691,23 @@ def replace_network_acl_entry(network_acl_id=None, rule_number=None, protocol=No
salt myminion boto_vpc.replace_network_acl_entry 'acl-5fb85d36' '32767' \\
'all' 'deny' '0.0.0.0/0' egress=true
- '''
+ """
kwargs = locals()
return _create_network_acl_entry(replace=True, **kwargs)
-def delete_network_acl_entry(network_acl_id=None, rule_number=None, egress=None,
- network_acl_name=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def delete_network_acl_entry(
+ network_acl_id=None,
+ rule_number=None,
+ egress=None,
+ network_acl_name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Deletes a network acl entry.
CLI Example:
@@ -2017,37 +2716,60 @@ def delete_network_acl_entry(network_acl_id=None, rule_number=None, egress=None,
salt myminion boto_vpc.delete_network_acl_entry 'acl-5fb85d36' '32767'
- '''
+ """
if not _exactly_one((network_acl_name, network_acl_id)):
- raise SaltInvocationError('One (but not both) of network_acl_id or '
- 'network_acl_name must be provided.')
+ raise SaltInvocationError(
+ "One (but not both) of network_acl_id or "
+ "network_acl_name must be provided."
+ )
- for v in ('rule_number', 'egress'):
+ for v in ("rule_number", "egress"):
if locals()[v] is None:
- raise SaltInvocationError('{0} is required.'.format(v))
+ raise SaltInvocationError("{0} is required.".format(v))
if network_acl_name:
- network_acl_id = _get_resource_id('network_acl', network_acl_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ network_acl_id = _get_resource_id(
+ "network_acl",
+ network_acl_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not network_acl_id:
- return {'deleted': False,
- 'error': {'message': 'Network ACL {0} does not exist.'.format(network_acl_name or network_acl_id)}}
+ return {
+ "deleted": False,
+ "error": {
+ "message": "Network ACL {0} does not exist.".format(
+ network_acl_name or network_acl_id
+ )
+ },
+ }
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- deleted = conn.delete_network_acl_entry(network_acl_id, rule_number, egress=egress)
+ deleted = conn.delete_network_acl_entry(
+ network_acl_id, rule_number, egress=egress
+ )
if deleted:
- log.info('Network ACL entry was deleted')
+ log.info("Network ACL entry was deleted")
else:
- log.warning('Network ACL was not deleted')
- return {'deleted': deleted}
+ log.warning("Network ACL was not deleted")
+ return {"deleted": deleted}
except BotoServerError as e:
- return {'deleted': False, 'error': __utils__['boto.get_error'](e)}
+ return {"deleted": False, "error": __utils__["boto.get_error"](e)}
-def create_route_table(vpc_id=None, vpc_name=None, route_table_name=None,
- tags=None, region=None, key=None, keyid=None, profile=None):
- '''
+def create_route_table(
+ vpc_id=None,
+ vpc_name=None,
+ route_table_name=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Creates a route table.
.. versionchanged:: 2015.8.0
@@ -2061,19 +2783,35 @@ def create_route_table(vpc_id=None, vpc_name=None, route_table_name=None,
route_table_name='myroutetable'
salt myminion boto_vpc.create_route_table vpc_name='myvpc' \\
route_table_name='myroutetable'
- '''
+ """
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
if not vpc_id:
- return {'created': False, 'error': {'message': 'VPC {0} does not exist.'.format(vpc_name or vpc_id)}}
+ return {
+ "created": False,
+ "error": {"message": "VPC {0} does not exist.".format(vpc_name or vpc_id)},
+ }
- return _create_resource('route_table', route_table_name, tags=tags,
- vpc_id=vpc_id, region=region, key=key,
- keyid=keyid, profile=profile)
+ return _create_resource(
+ "route_table",
+ route_table_name,
+ tags=tags,
+ vpc_id=vpc_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
-def delete_route_table(route_table_id=None, route_table_name=None,
- region=None, key=None, keyid=None, profile=None):
- '''
+def delete_route_table(
+ route_table_id=None,
+ route_table_name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Deletes a route table.
CLI Examples:
@@ -2083,15 +2821,29 @@ def delete_route_table(route_table_id=None, route_table_name=None,
salt myminion boto_vpc.delete_route_table route_table_id='rtb-1f382e7d'
salt myminion boto_vpc.delete_route_table route_table_name='myroutetable'
- '''
- return _delete_resource(resource='route_table', name=route_table_name,
- resource_id=route_table_id, region=region, key=key,
- keyid=keyid, profile=profile)
+ """
+ return _delete_resource(
+ resource="route_table",
+ name=route_table_name,
+ resource_id=route_table_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
-def route_table_exists(route_table_id=None, name=None, route_table_name=None,
- tags=None, region=None, key=None, keyid=None, profile=None):
- '''
+def route_table_exists(
+ route_table_id=None,
+ name=None,
+ route_table_name=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Checks if a route table exists.
CLI Example:
@@ -2100,23 +2852,42 @@ def route_table_exists(route_table_id=None, name=None, route_table_name=None,
salt myminion boto_vpc.route_table_exists route_table_id='rtb-1f382e7d'
- '''
+ """
if name:
- log.warning('boto_vpc.route_table_exists: name parameter is deprecated '
- 'use route_table_name instead.')
+ log.warning(
+ "boto_vpc.route_table_exists: name parameter is deprecated "
+ "use route_table_name instead."
+ )
route_table_name = name
- return resource_exists('route_table', name=route_table_name,
- resource_id=route_table_id, tags=tags,
- region=region, key=key, keyid=keyid,
- profile=profile)
+ return resource_exists(
+ "route_table",
+ name=route_table_name,
+ resource_id=route_table_id,
+ tags=tags,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
-def route_exists(destination_cidr_block, route_table_name=None, route_table_id=None,
- gateway_id=None, instance_id=None, interface_id=None, tags=None,
- region=None, key=None, keyid=None, profile=None, vpc_peering_connection_id=None):
- '''
+def route_exists(
+ destination_cidr_block,
+ route_table_name=None,
+ route_table_id=None,
+ gateway_id=None,
+ instance_id=None,
+ interface_id=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ vpc_peering_connection_id=None,
+):
+ """
Checks if a route exists.
.. versionadded:: 2015.8.0
@@ -2127,65 +2898,77 @@ def route_exists(destination_cidr_block, route_table_name=None, route_table_id=N
salt myminion boto_vpc.route_exists destination_cidr_block='10.0.0.0/20' gateway_id='local' route_table_name='test'
- '''
+ """
if not any((route_table_name, route_table_id)):
- raise SaltInvocationError('At least one of the following must be specified: route table name or route table id.')
+ raise SaltInvocationError(
+ "At least one of the following must be specified: route table name or route table id."
+ )
if not any((gateway_id, instance_id, interface_id, vpc_peering_connection_id)):
- raise SaltInvocationError('At least one of the following must be specified: gateway id, instance id, '
- 'interface id or VPC peering connection id.')
+ raise SaltInvocationError(
+ "At least one of the following must be specified: gateway id, instance id, "
+ "interface id or VPC peering connection id."
+ )
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- filter_parameters = {'filters': {}}
+ filter_parameters = {"filters": {}}
if route_table_id:
- filter_parameters['route_table_ids'] = [route_table_id]
+ filter_parameters["route_table_ids"] = [route_table_id]
if route_table_name:
- filter_parameters['filters']['tag:Name'] = route_table_name
+ filter_parameters["filters"]["tag:Name"] = route_table_name
if tags:
for tag_name, tag_value in six.iteritems(tags):
- filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
+ filter_parameters["filters"]["tag:{0}".format(tag_name)] = tag_value
route_tables = conn.get_all_route_tables(**filter_parameters)
if len(route_tables) != 1:
- raise SaltInvocationError('Found more than one route table.')
+ raise SaltInvocationError("Found more than one route table.")
- route_check = {'destination_cidr_block': destination_cidr_block,
- 'gateway_id': gateway_id,
- 'instance_id': instance_id,
- 'interface_id': interface_id,
- 'vpc_peering_connection_id': vpc_peering_connection_id
- }
+ route_check = {
+ "destination_cidr_block": destination_cidr_block,
+ "gateway_id": gateway_id,
+ "instance_id": instance_id,
+ "interface_id": interface_id,
+ "vpc_peering_connection_id": vpc_peering_connection_id,
+ }
for route_match in route_tables[0].routes:
- route_dict = {'destination_cidr_block': route_match.destination_cidr_block,
- 'gateway_id': route_match.gateway_id,
- 'instance_id': route_match.instance_id,
- 'interface_id': route_match.interface_id,
- 'vpc_peering_connection_id': vpc_peering_connection_id
- }
+ route_dict = {
+ "destination_cidr_block": route_match.destination_cidr_block,
+ "gateway_id": route_match.gateway_id,
+ "instance_id": route_match.instance_id,
+ "interface_id": route_match.interface_id,
+ "vpc_peering_connection_id": vpc_peering_connection_id,
+ }
route_comp = set(route_dict.items()) ^ set(route_check.items())
if len(route_comp) == 0:
- log.info('Route %s exists.', destination_cidr_block)
- return {'exists': True}
+ log.info("Route %s exists.", destination_cidr_block)
+ return {"exists": True}
- log.warning('Route %s does not exist.', destination_cidr_block)
- return {'exists': False}
+ log.warning("Route %s does not exist.", destination_cidr_block)
+ return {"exists": False}
except BotoServerError as e:
- return {'error': __utils__['boto.get_error'](e)}
+ return {"error": __utils__["boto.get_error"](e)}
-def associate_route_table(route_table_id=None, subnet_id=None,
- route_table_name=None, subnet_name=None,
- region=None, key=None, keyid=None,
- profile=None):
- '''
+def associate_route_table(
+ route_table_id=None,
+ subnet_id=None,
+ route_table_name=None,
+ subnet_name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given a route table and subnet name or id, associates the route table with the subnet.
CLI Example:
@@ -2199,42 +2982,60 @@ def associate_route_table(route_table_id=None, subnet_id=None,
salt myminion boto_vpc.associate_route_table route_table_name='myrtb' \\
subnet_name='mysubnet'
- '''
+ """
if all((subnet_id, subnet_name)):
- raise SaltInvocationError('Only one of subnet_name or subnet_id may be '
- 'provided.')
+ raise SaltInvocationError(
+ "Only one of subnet_name or subnet_id may be " "provided."
+ )
if subnet_name:
- subnet_id = _get_resource_id('subnet', subnet_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ subnet_id = _get_resource_id(
+ "subnet", subnet_name, region=region, key=key, keyid=keyid, profile=profile
+ )
if not subnet_id:
- return {'associated': False,
- 'error': {'message': 'Subnet {0} does not exist.'.format(subnet_name)}}
+ return {
+ "associated": False,
+ "error": {"message": "Subnet {0} does not exist.".format(subnet_name)},
+ }
if all((route_table_id, route_table_name)):
- raise SaltInvocationError('Only one of route_table_name or route_table_id may be '
- 'provided.')
+ raise SaltInvocationError(
+ "Only one of route_table_name or route_table_id may be " "provided."
+ )
if route_table_name:
- route_table_id = _get_resource_id('route_table', route_table_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ route_table_id = _get_resource_id(
+ "route_table",
+ route_table_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not route_table_id:
- return {'associated': False,
- 'error': {'message': 'Route table {0} does not exist.'.format(route_table_name)}}
+ return {
+ "associated": False,
+ "error": {
+ "message": "Route table {0} does not exist.".format(
+ route_table_name
+ )
+ },
+ }
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
association_id = conn.associate_route_table(route_table_id, subnet_id)
- log.info('Route table %s was associated with subnet %s',
- route_table_id, subnet_id)
- return {'association_id': association_id}
+ log.info(
+ "Route table %s was associated with subnet %s", route_table_id, subnet_id
+ )
+ return {"association_id": association_id}
except BotoServerError as e:
- return {'associated': False, 'error': __utils__['boto.get_error'](e)}
+ return {"associated": False, "error": __utils__["boto.get_error"](e)}
-def disassociate_route_table(association_id, region=None, key=None, keyid=None, profile=None):
- '''
+def disassociate_route_table(
+ association_id, region=None, key=None, keyid=None, profile=None
+):
+ """
Dissassociates a route table.
association_id
@@ -2246,22 +3047,30 @@ def disassociate_route_table(association_id, region=None, key=None, keyid=None,
salt myminion boto_vpc.disassociate_route_table 'rtbassoc-d8ccddba'
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if conn.disassociate_route_table(association_id):
- log.info('Route table with association id %s has been disassociated.', association_id)
- return {'disassociated': True}
+ log.info(
+ "Route table with association id %s has been disassociated.",
+ association_id,
+ )
+ return {"disassociated": True}
else:
- log.warning('Route table with association id %s has not been disassociated.', association_id)
- return {'disassociated': False}
+ log.warning(
+ "Route table with association id %s has not been disassociated.",
+ association_id,
+ )
+ return {"disassociated": False}
except BotoServerError as e:
- return {'disassociated': False, 'error': __utils__['boto.get_error'](e)}
+ return {"disassociated": False, "error": __utils__["boto.get_error"](e)}
-def replace_route_table_association(association_id, route_table_id, region=None, key=None, keyid=None, profile=None):
- '''
+def replace_route_table_association(
+ association_id, route_table_id, region=None, key=None, keyid=None, profile=None
+):
+ """
Replaces a route table association.
CLI Example:
@@ -2270,29 +3079,42 @@ def replace_route_table_association(association_id, route_table_id, region=None,
salt myminion boto_vpc.replace_route_table_association 'rtbassoc-d8ccddba' 'rtb-1f382e7d'
- '''
+ """
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- association_id = conn.replace_route_table_association_with_assoc(association_id, route_table_id)
- log.info('Route table %s was reassociated with association id %s',
- route_table_id, association_id)
- return {'replaced': True, 'association_id': association_id}
+ association_id = conn.replace_route_table_association_with_assoc(
+ association_id, route_table_id
+ )
+ log.info(
+ "Route table %s was reassociated with association id %s",
+ route_table_id,
+ association_id,
+ )
+ return {"replaced": True, "association_id": association_id}
except BotoServerError as e:
- return {'replaced': False, 'error': __utils__['boto.get_error'](e)}
+ return {"replaced": False, "error": __utils__["boto.get_error"](e)}
-def create_route(route_table_id=None, destination_cidr_block=None,
- route_table_name=None, gateway_id=None,
- internet_gateway_name=None,
- instance_id=None, interface_id=None,
- vpc_peering_connection_id=None, vpc_peering_connection_name=None,
- region=None, key=None, keyid=None, profile=None,
- nat_gateway_id=None,
- nat_gateway_subnet_name=None,
- nat_gateway_subnet_id=None,
- ):
- '''
+def create_route(
+ route_table_id=None,
+ destination_cidr_block=None,
+ route_table_name=None,
+ gateway_id=None,
+ internet_gateway_name=None,
+ instance_id=None,
+ interface_id=None,
+ vpc_peering_connection_id=None,
+ vpc_peering_connection_name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ nat_gateway_id=None,
+ nat_gateway_subnet_name=None,
+ nat_gateway_subnet_id=None,
+):
+ """
Creates a route.
If a nat gateway is specified, boto3 must be installed
@@ -2303,86 +3125,172 @@ def create_route(route_table_id=None, destination_cidr_block=None,
salt myminion boto_vpc.create_route 'rtb-1f382e7d' '10.0.0.0/16' gateway_id='vgw-a1b2c3'
- '''
+ """
if not _exactly_one((route_table_name, route_table_id)):
- raise SaltInvocationError('One (but not both) of route_table_id or route_table_name '
- 'must be provided.')
+ raise SaltInvocationError(
+ "One (but not both) of route_table_id or route_table_name "
+ "must be provided."
+ )
- if not _exactly_one((gateway_id, internet_gateway_name, instance_id, interface_id, vpc_peering_connection_id,
- nat_gateway_id, nat_gateway_subnet_id, nat_gateway_subnet_name, vpc_peering_connection_name)):
- raise SaltInvocationError('Only one of gateway_id, internet_gateway_name, instance_id, '
- 'interface_id, vpc_peering_connection_id, nat_gateway_id, '
- 'nat_gateway_subnet_id, nat_gateway_subnet_name or vpc_peering_connection_name may be provided.')
+ if not _exactly_one(
+ (
+ gateway_id,
+ internet_gateway_name,
+ instance_id,
+ interface_id,
+ vpc_peering_connection_id,
+ nat_gateway_id,
+ nat_gateway_subnet_id,
+ nat_gateway_subnet_name,
+ vpc_peering_connection_name,
+ )
+ ):
+ raise SaltInvocationError(
+ "Only one of gateway_id, internet_gateway_name, instance_id, "
+ "interface_id, vpc_peering_connection_id, nat_gateway_id, "
+ "nat_gateway_subnet_id, nat_gateway_subnet_name or vpc_peering_connection_name may be provided."
+ )
if destination_cidr_block is None:
- raise SaltInvocationError('destination_cidr_block is required.')
+ raise SaltInvocationError("destination_cidr_block is required.")
try:
if route_table_name:
- route_table_id = _get_resource_id('route_table', route_table_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ route_table_id = _get_resource_id(
+ "route_table",
+ route_table_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not route_table_id:
- return {'created': False,
- 'error': {'message': 'route table {0} does not exist.'.format(route_table_name)}}
+ return {
+ "created": False,
+ "error": {
+ "message": "route table {0} does not exist.".format(
+ route_table_name
+ )
+ },
+ }
if internet_gateway_name:
- gateway_id = _get_resource_id('internet_gateway', internet_gateway_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ gateway_id = _get_resource_id(
+ "internet_gateway",
+ internet_gateway_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not gateway_id:
- return {'created': False,
- 'error': {'message': 'internet gateway {0} does not exist.'.format(internet_gateway_name)}}
+ return {
+ "created": False,
+ "error": {
+ "message": "internet gateway {0} does not exist.".format(
+ internet_gateway_name
+ )
+ },
+ }
if vpc_peering_connection_name:
- vpc_peering_connection_id = _get_resource_id('vpc_peering_connection', vpc_peering_connection_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ vpc_peering_connection_id = _get_resource_id(
+ "vpc_peering_connection",
+ vpc_peering_connection_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not vpc_peering_connection_id:
- return {'created': False,
- 'error': {'message': 'VPC peering connection {0} does not exist.'.format(vpc_peering_connection_name)}}
+ return {
+ "created": False,
+ "error": {
+ "message": "VPC peering connection {0} does not exist.".format(
+ vpc_peering_connection_name
+ )
+ },
+ }
if nat_gateway_subnet_name:
- gws = describe_nat_gateways(subnet_name=nat_gateway_subnet_name,
- region=region, key=key, keyid=keyid, profile=profile)
+ gws = describe_nat_gateways(
+ subnet_name=nat_gateway_subnet_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not gws:
- return {'created': False,
- 'error': {'message': 'nat gateway for {0} does not exist.'.format(nat_gateway_subnet_name)}}
- nat_gateway_id = gws[0]['NatGatewayId']
+ return {
+ "created": False,
+ "error": {
+ "message": "nat gateway for {0} does not exist.".format(
+ nat_gateway_subnet_name
+ )
+ },
+ }
+ nat_gateway_id = gws[0]["NatGatewayId"]
if nat_gateway_subnet_id:
- gws = describe_nat_gateways(subnet_id=nat_gateway_subnet_id,
- region=region, key=key, keyid=keyid, profile=profile)
+ gws = describe_nat_gateways(
+ subnet_id=nat_gateway_subnet_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not gws:
- return {'created': False,
- 'error': {'message': 'nat gateway for {0} does not exist.'.format(nat_gateway_subnet_id)}}
- nat_gateway_id = gws[0]['NatGatewayId']
+ return {
+ "created": False,
+ "error": {
+ "message": "nat gateway for {0} does not exist.".format(
+ nat_gateway_subnet_id
+ )
+ },
+ }
+ nat_gateway_id = gws[0]["NatGatewayId"]
except BotoServerError as e:
- return {'created': False, 'error': __utils__['boto.get_error'](e)}
+ return {"created": False, "error": __utils__["boto.get_error"](e)}
if not nat_gateway_id:
- return _create_resource('route', route_table_id=route_table_id,
- destination_cidr_block=destination_cidr_block,
- gateway_id=gateway_id, instance_id=instance_id,
- interface_id=interface_id, vpc_peering_connection_id=vpc_peering_connection_id,
- region=region, key=key, keyid=keyid, profile=profile)
+ return _create_resource(
+ "route",
+ route_table_id=route_table_id,
+ destination_cidr_block=destination_cidr_block,
+ gateway_id=gateway_id,
+ instance_id=instance_id,
+ interface_id=interface_id,
+ vpc_peering_connection_id=vpc_peering_connection_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
# for nat gateway, boto3 is required
try:
conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
- ret = conn3.create_route(RouteTableId=route_table_id,
- DestinationCidrBlock=destination_cidr_block,
- NatGatewayId=nat_gateway_id)
- return {'created': True, 'id': ret.get('NatGatewayId')}
+ ret = conn3.create_route(
+ RouteTableId=route_table_id,
+ DestinationCidrBlock=destination_cidr_block,
+ NatGatewayId=nat_gateway_id,
+ )
+ return {"created": True, "id": ret.get("NatGatewayId")}
except BotoServerError as e:
- return {'created': False, 'error': __utils__['boto.get_error'](e)}
+ return {"created": False, "error": __utils__["boto.get_error"](e)}
-def delete_route(route_table_id=None, destination_cidr_block=None,
- route_table_name=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def delete_route(
+ route_table_id=None,
+ destination_cidr_block=None,
+ route_table_name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Deletes a route.
CLI Example:
@@ -2391,38 +3299,64 @@ def delete_route(route_table_id=None, destination_cidr_block=None,
salt myminion boto_vpc.delete_route 'rtb-1f382e7d' '10.0.0.0/16'
- '''
+ """
if not _exactly_one((route_table_name, route_table_id)):
- raise SaltInvocationError('One (but not both) of route_table_id or route_table_name '
- 'must be provided.')
+ raise SaltInvocationError(
+ "One (but not both) of route_table_id or route_table_name "
+ "must be provided."
+ )
if destination_cidr_block is None:
- raise SaltInvocationError('destination_cidr_block is required.')
+ raise SaltInvocationError("destination_cidr_block is required.")
try:
if route_table_name:
- route_table_id = _get_resource_id('route_table', route_table_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ route_table_id = _get_resource_id(
+ "route_table",
+ route_table_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not route_table_id:
- return {'created': False,
- 'error': {'message': 'route table {0} does not exist.'.format(route_table_name)}}
+ return {
+ "created": False,
+ "error": {
+ "message": "route table {0} does not exist.".format(
+ route_table_name
+ )
+ },
+ }
except BotoServerError as e:
- return {'created': False, 'error': __utils__['boto.get_error'](e)}
+ return {"created": False, "error": __utils__["boto.get_error"](e)}
- return _delete_resource(resource='route', resource_id=route_table_id,
- destination_cidr_block=destination_cidr_block,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ return _delete_resource(
+ resource="route",
+ resource_id=route_table_id,
+ destination_cidr_block=destination_cidr_block,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
-def replace_route(route_table_id=None, destination_cidr_block=None,
- route_table_name=None, gateway_id=None,
- instance_id=None, interface_id=None,
- region=None, key=None, keyid=None, profile=None,
- vpc_peering_connection_id=None):
- '''
+def replace_route(
+ route_table_id=None,
+ destination_cidr_block=None,
+ route_table_name=None,
+ gateway_id=None,
+ instance_id=None,
+ interface_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ vpc_peering_connection_id=None,
+):
+ """
Replaces a route.
CLI Example:
@@ -2431,47 +3365,73 @@ def replace_route(route_table_id=None, destination_cidr_block=None,
salt myminion boto_vpc.replace_route 'rtb-1f382e7d' '10.0.0.0/16' gateway_id='vgw-a1b2c3'
- '''
+ """
if not _exactly_one((route_table_name, route_table_id)):
- raise SaltInvocationError('One (but not both) of route_table_id or route_table_name '
- 'must be provided.')
+ raise SaltInvocationError(
+ "One (but not both) of route_table_id or route_table_name "
+ "must be provided."
+ )
if destination_cidr_block is None:
- raise SaltInvocationError('destination_cidr_block is required.')
+ raise SaltInvocationError("destination_cidr_block is required.")
try:
if route_table_name:
- route_table_id = _get_resource_id('route_table', route_table_name,
- region=region, key=key,
- keyid=keyid, profile=profile)
+ route_table_id = _get_resource_id(
+ "route_table",
+ route_table_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not route_table_id:
- return {'replaced': False,
- 'error': {'message': 'route table {0} does not exist.'.format(route_table_name)}}
+ return {
+ "replaced": False,
+ "error": {
+ "message": "route table {0} does not exist.".format(
+ route_table_name
+ )
+ },
+ }
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- if conn.replace_route(route_table_id, destination_cidr_block,
- gateway_id=gateway_id, instance_id=instance_id,
- interface_id=interface_id, vpc_peering_connection_id=vpc_peering_connection_id):
+ if conn.replace_route(
+ route_table_id,
+ destination_cidr_block,
+ gateway_id=gateway_id,
+ instance_id=instance_id,
+ interface_id=interface_id,
+ vpc_peering_connection_id=vpc_peering_connection_id,
+ ):
log.info(
- 'Route with cidr block %s on route table %s was replaced',
- route_table_id, destination_cidr_block
+ "Route with cidr block %s on route table %s was replaced",
+ route_table_id,
+ destination_cidr_block,
)
- return {'replaced': True}
+ return {"replaced": True}
else:
log.warning(
- 'Route with cidr block %s on route table %s was not replaced',
- route_table_id, destination_cidr_block
+ "Route with cidr block %s on route table %s was not replaced",
+ route_table_id,
+ destination_cidr_block,
)
- return {'replaced': False}
+ return {"replaced": False}
except BotoServerError as e:
- return {'replaced': False, 'error': __utils__['boto.get_error'](e)}
+ return {"replaced": False, "error": __utils__["boto.get_error"](e)}
-def describe_route_table(route_table_id=None, route_table_name=None,
- tags=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def describe_route_table(
+ route_table_id=None,
+ route_table_name=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given route table properties, return route table details if matching table(s) exist.
.. versionadded:: 2015.8.0
@@ -2482,29 +3442,31 @@ def describe_route_table(route_table_id=None, route_table_name=None,
salt myminion boto_vpc.describe_route_table route_table_id='rtb-1f382e7d'
- '''
+ """
salt.utils.versions.warn_until(
- 'Magnesium',
- 'The \'describe_route_table\' method has been deprecated and '
- 'replaced by \'describe_route_tables\'.'
+ "Magnesium",
+ "The 'describe_route_table' method has been deprecated and "
+ "replaced by 'describe_route_tables'.",
)
if not any((route_table_id, route_table_name, tags)):
- raise SaltInvocationError('At least one of the following must be specified: '
- 'route table id, route table name, or tags.')
+ raise SaltInvocationError(
+ "At least one of the following must be specified: "
+ "route table id, route table name, or tags."
+ )
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
- filter_parameters = {'filters': {}}
+ filter_parameters = {"filters": {}}
if route_table_id:
- filter_parameters['route_table_ids'] = route_table_id
+ filter_parameters["route_table_ids"] = route_table_id
if route_table_name:
- filter_parameters['filters']['tag:Name'] = route_table_name
+ filter_parameters["filters"]["tag:Name"] = route_table_name
if tags:
for tag_name, tag_value in six.iteritems(tags):
- filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
+ filter_parameters["filters"]["tag:{0}".format(tag_name)] = tag_value
route_tables = conn.get_all_route_tables(**filter_parameters)
@@ -2512,28 +3474,40 @@ def describe_route_table(route_table_id=None, route_table_name=None,
return {}
route_table = {}
- keys = ['id', 'vpc_id', 'tags', 'routes', 'associations']
- route_keys = ['destination_cidr_block', 'gateway_id', 'instance_id', 'interface_id', 'vpc_peering_connection_id']
- assoc_keys = ['id', 'main', 'route_table_id', 'subnet_id']
+ keys = ["id", "vpc_id", "tags", "routes", "associations"]
+ route_keys = [
+ "destination_cidr_block",
+ "gateway_id",
+ "instance_id",
+ "interface_id",
+ "vpc_peering_connection_id",
+ ]
+ assoc_keys = ["id", "main", "route_table_id", "subnet_id"]
for item in route_tables:
for key in keys:
if hasattr(item, key):
route_table[key] = getattr(item, key)
- if key == 'routes':
+ if key == "routes":
route_table[key] = _key_iter(key, route_keys, item)
- if key == 'associations':
+ if key == "associations":
route_table[key] = _key_iter(key, assoc_keys, item)
return route_table
except BotoServerError as e:
- return {'error': __utils__['boto.get_error'](e)}
+ return {"error": __utils__["boto.get_error"](e)}
-def describe_route_tables(route_table_id=None, route_table_name=None,
- vpc_id=None,
- tags=None, region=None, key=None, keyid=None,
- profile=None):
- '''
+def describe_route_tables(
+ route_table_id=None,
+ route_table_name=None,
+ vpc_id=None,
+ tags=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Given route table properties, return details of all matching route tables.
This function requires boto3 to be installed.
@@ -2546,86 +3520,108 @@ def describe_route_tables(route_table_id=None, route_table_name=None,
salt myminion boto_vpc.describe_route_tables vpc_id='vpc-a6a9efc3'
- '''
+ """
if not any((route_table_id, route_table_name, tags, vpc_id)):
- raise SaltInvocationError('At least one of the following must be specified: '
- 'route table id, route table name, vpc_id, or tags.')
+ raise SaltInvocationError(
+ "At least one of the following must be specified: "
+ "route table id, route table name, vpc_id, or tags."
+ )
try:
conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
- filter_parameters = {'Filters': []}
+ filter_parameters = {"Filters": []}
if route_table_id:
- filter_parameters['RouteTableIds'] = [route_table_id]
+ filter_parameters["RouteTableIds"] = [route_table_id]
if vpc_id:
- filter_parameters['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]})
+ filter_parameters["Filters"].append({"Name": "vpc-id", "Values": [vpc_id]})
if route_table_name:
- filter_parameters['Filters'].append({'Name': 'tag:Name', 'Values': [route_table_name]})
+ filter_parameters["Filters"].append(
+ {"Name": "tag:Name", "Values": [route_table_name]}
+ )
if tags:
for tag_name, tag_value in six.iteritems(tags):
- filter_parameters['Filters'].append({'Name': 'tag:{0}'.format(tag_name),
- 'Values': [tag_value]})
+ filter_parameters["Filters"].append(
+ {"Name": "tag:{0}".format(tag_name), "Values": [tag_value]}
+ )
- route_tables = conn3.describe_route_tables(**filter_parameters).get('RouteTables', [])
+ route_tables = conn3.describe_route_tables(**filter_parameters).get(
+ "RouteTables", []
+ )
if not route_tables:
return []
tables = []
- keys = {'id': 'RouteTableId',
- 'vpc_id': 'VpcId',
- 'tags': 'Tags',
- 'routes': 'Routes',
- 'associations': 'Associations'
- }
- route_keys = {'destination_cidr_block': 'DestinationCidrBlock',
- 'gateway_id': 'GatewayId',
- 'instance_id': 'Instance',
- 'interface_id': 'NetworkInterfaceId',
- 'nat_gateway_id': 'NatGatewayId',
- 'vpc_peering_connection_id': 'VpcPeeringConnectionId',
- }
- assoc_keys = {'id': 'RouteTableAssociationId',
- 'main': 'Main',
- 'route_table_id': 'RouteTableId',
- 'SubnetId': 'subnet_id',
- }
+ keys = {
+ "id": "RouteTableId",
+ "vpc_id": "VpcId",
+ "tags": "Tags",
+ "routes": "Routes",
+ "associations": "Associations",
+ }
+ route_keys = {
+ "destination_cidr_block": "DestinationCidrBlock",
+ "gateway_id": "GatewayId",
+ "instance_id": "Instance",
+ "interface_id": "NetworkInterfaceId",
+ "nat_gateway_id": "NatGatewayId",
+ "vpc_peering_connection_id": "VpcPeeringConnectionId",
+ }
+ assoc_keys = {
+ "id": "RouteTableAssociationId",
+ "main": "Main",
+ "route_table_id": "RouteTableId",
+ "SubnetId": "subnet_id",
+ }
for item in route_tables:
route_table = {}
for outkey, inkey in six.iteritems(keys):
if inkey in item:
- if outkey == 'routes':
+ if outkey == "routes":
route_table[outkey] = _key_remap(inkey, route_keys, item)
- elif outkey == 'associations':
+ elif outkey == "associations":
route_table[outkey] = _key_remap(inkey, assoc_keys, item)
- elif outkey == 'tags':
+ elif outkey == "tags":
route_table[outkey] = {}
for tagitem in item.get(inkey, []):
- route_table[outkey][tagitem.get('Key')] = tagitem.get('Value')
+ route_table[outkey][tagitem.get("Key")] = tagitem.get(
+ "Value"
+ )
else:
route_table[outkey] = item.get(inkey)
tables.append(route_table)
return tables
except BotoServerError as e:
- return {'error': __utils__['boto.get_error'](e)}
+ return {"error": __utils__["boto.get_error"](e)}
-def _create_dhcp_options(conn, domain_name=None, domain_name_servers=None, ntp_servers=None, netbios_name_servers=None,
- netbios_node_type=None):
- return conn.create_dhcp_options(domain_name=domain_name, domain_name_servers=domain_name_servers,
- ntp_servers=ntp_servers, netbios_name_servers=netbios_name_servers,
- netbios_node_type=netbios_node_type)
+def _create_dhcp_options(
+ conn,
+ domain_name=None,
+ domain_name_servers=None,
+ ntp_servers=None,
+ netbios_name_servers=None,
+ netbios_node_type=None,
+):
+ return conn.create_dhcp_options(
+ domain_name=domain_name,
+ domain_name_servers=domain_name_servers,
+ ntp_servers=ntp_servers,
+ netbios_name_servers=netbios_name_servers,
+ netbios_node_type=netbios_node_type,
+ )
def _maybe_set_name_tag(name, obj):
if name:
obj.add_tag("Name", name)
- log.debug('%s is now named as %s', obj, name)
+ log.debug("%s is now named as %s", obj, name)
def _maybe_set_tags(tags, obj):
@@ -2637,36 +3633,36 @@ def _maybe_set_tags(tags, obj):
except AttributeError:
for tag, value in tags.items():
obj.add_tag(tag, value)
- log.debug('The following tags: %s were added to %s', ', '.join(tags), obj)
+ log.debug("The following tags: %s were added to %s", ", ".join(tags), obj)
def _maybe_set_dns(conn, vpcid, dns_support, dns_hostnames):
if dns_support:
conn.modify_vpc_attribute(vpc_id=vpcid, enable_dns_support=dns_support)
- log.debug('DNS support was set to: %s on vpc %s', dns_support, vpcid)
+ log.debug("DNS support was set to: %s on vpc %s", dns_support, vpcid)
if dns_hostnames:
conn.modify_vpc_attribute(vpc_id=vpcid, enable_dns_hostnames=dns_hostnames)
- log.debug('DNS hostnames was set to: %s on vpc %s', dns_hostnames, vpcid)
+ log.debug("DNS hostnames was set to: %s on vpc %s", dns_hostnames, vpcid)
def _maybe_name_route_table(conn, vpcid, vpc_name):
- route_tables = conn.get_all_route_tables(filters={'vpc_id': vpcid})
+ route_tables = conn.get_all_route_tables(filters={"vpc_id": vpcid})
if not route_tables:
- log.warning('no default route table found')
+ log.warning("no default route table found")
return
default_table = None
for table in route_tables:
- for association in getattr(table, 'associations', {}):
- if getattr(association, 'main', False):
+ for association in getattr(table, "associations", {}):
+ if getattr(association, "main", False):
default_table = table
break
if not default_table:
- log.warning('no default route table found')
+ log.warning("no default route table found")
return
- name = '{0}-default-table'.format(vpc_name)
+ name = "{0}-default-table".format(vpc_name)
_maybe_set_name_tag(name, default_table)
- log.debug('Default route table name was set to: %s on vpc %s', name, vpcid)
+ log.debug("Default route table name was set to: %s on vpc %s", name, vpcid)
def _key_iter(key, keys, item):
@@ -2691,16 +3687,18 @@ def _key_remap(key, keys, item):
return elements_list
-def _get_subnet_explicit_route_table(subnet_id, vpc_id, conn=None, region=None, key=None, keyid=None, profile=None):
- '''
+def _get_subnet_explicit_route_table(
+ subnet_id, vpc_id, conn=None, region=None, key=None, keyid=None, profile=None
+):
+ """
helper function to find subnet explicit route table associations
.. versionadded:: 2016.11.0
- '''
+ """
if not conn:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if conn:
- vpc_route_tables = conn.get_all_route_tables(filters={'vpc_id': vpc_id})
+ vpc_route_tables = conn.get_all_route_tables(filters={"vpc_id": vpc_id})
for vpc_route_table in vpc_route_tables:
for rt_association in vpc_route_table.associations:
if rt_association.subnet_id == subnet_id and not rt_association.main:
@@ -2708,11 +3706,20 @@ def _get_subnet_explicit_route_table(subnet_id, vpc_id, conn=None, region=None,
return None
-def request_vpc_peering_connection(requester_vpc_id=None, requester_vpc_name=None,
- peer_vpc_id=None, peer_vpc_name=None, name=None,
- peer_owner_id=None, region=None,
- key=None, keyid=None, profile=None, dry_run=False):
- '''
+def request_vpc_peering_connection(
+ requester_vpc_id=None,
+ requester_vpc_name=None,
+ peer_vpc_id=None,
+ peer_vpc_name=None,
+ name=None,
+ peer_owner_id=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ dry_run=False,
+):
+ """
Request a VPC peering connection between two VPCs.
.. versionadded:: 2016.11.0
@@ -2766,67 +3773,81 @@ def request_vpc_peering_connection(requester_vpc_id=None, requester_vpc_name=Non
# Specify a region
salt myminion boto_vpc.request_vpc_peering_connection vpc-4a3e622e vpc-be82e9da region=us-west-2
- '''
- conn = _get_conn3(region=region, key=key, keyid=keyid,
- profile=profile)
+ """
+ conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
if name and _vpc_peering_conn_id_for_name(name, conn):
- raise SaltInvocationError('A VPC peering connection with this name already '
- 'exists! Please specify a different name.')
+ raise SaltInvocationError(
+ "A VPC peering connection with this name already "
+ "exists! Please specify a different name."
+ )
if not _exactly_one((requester_vpc_id, requester_vpc_name)):
- raise SaltInvocationError('Exactly one of requester_vpc_id or '
- 'requester_vpc_name is required')
+ raise SaltInvocationError(
+ "Exactly one of requester_vpc_id or " "requester_vpc_name is required"
+ )
if not _exactly_one((peer_vpc_id, peer_vpc_name)):
- raise SaltInvocationError('Exactly one of peer_vpc_id or '
- 'peer_vpc_name is required.')
+ raise SaltInvocationError(
+ "Exactly one of peer_vpc_id or " "peer_vpc_name is required."
+ )
if requester_vpc_name:
- requester_vpc_id = _get_id(vpc_name=requester_vpc_name, region=region, key=key,
- keyid=keyid, profile=profile)
+ requester_vpc_id = _get_id(
+ vpc_name=requester_vpc_name,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile,
+ )
if not requester_vpc_id:
- return {'error': 'Could not resolve VPC name {0} to an ID'.format(requester_vpc_name)}
+ return {
+ "error": "Could not resolve VPC name {0} to an ID".format(
+ requester_vpc_name
+ )
+ }
if peer_vpc_name:
- peer_vpc_id = _get_id(vpc_name=peer_vpc_name, region=region, key=key,
- keyid=keyid, profile=profile)
+ peer_vpc_id = _get_id(
+ vpc_name=peer_vpc_name, region=region, key=key, keyid=keyid, profile=profile
+ )
if not peer_vpc_id:
- return {'error': 'Could not resolve VPC name {0} to an ID'.format(peer_vpc_name)}
+ return {
+ "error": "Could not resolve VPC name {0} to an ID".format(peer_vpc_name)
+ }
try:
- log.debug('Trying to request vpc peering connection')
+ log.debug("Trying to request vpc peering connection")
if not peer_owner_id:
vpc_peering = conn.create_vpc_peering_connection(
- VpcId=requester_vpc_id,
- PeerVpcId=peer_vpc_id,
- DryRun=dry_run)
+ VpcId=requester_vpc_id, PeerVpcId=peer_vpc_id, DryRun=dry_run
+ )
else:
vpc_peering = conn.create_vpc_peering_connection(
VpcId=requester_vpc_id,
PeerVpcId=peer_vpc_id,
PeerOwnerId=peer_owner_id,
- DryRun=dry_run)
- peering = vpc_peering.get('VpcPeeringConnection', {})
- peering_conn_id = peering.get('VpcPeeringConnectionId', 'ERROR')
- msg = 'VPC peering {0} requested.'.format(peering_conn_id)
+ DryRun=dry_run,
+ )
+ peering = vpc_peering.get("VpcPeeringConnection", {})
+ peering_conn_id = peering.get("VpcPeeringConnectionId", "ERROR")
+ msg = "VPC peering {0} requested.".format(peering_conn_id)
log.debug(msg)
if name:
- log.debug('Adding name tag to vpc peering connection')
+ log.debug("Adding name tag to vpc peering connection")
conn.create_tags(
- Resources=[peering_conn_id],
- Tags=[{'Key': 'Name', 'Value': name}]
+ Resources=[peering_conn_id], Tags=[{"Key": "Name", "Value": name}]
)
- log.debug('Applied name tag to vpc peering connection')
- msg += ' With name {0}.'.format(name)
+ log.debug("Applied name tag to vpc peering connection")
+ msg += " With name {0}.".format(name)
- return {'msg': msg}
+ return {"msg": msg}
except botocore.exceptions.ClientError as err:
- log.error('Got an error while trying to request vpc peering')
- return {'error': __utils__['boto.get_error'](err)}
+ log.error("Got an error while trying to request vpc peering")
+ return {"error": __utils__["boto.get_error"](err)}
def _get_peering_connection_ids(name, conn):
- '''
+ """
:param name: The name of the VPC peering connection.
:type name: String
:param conn: The boto aws ec2 connection.
@@ -2834,27 +3855,22 @@ def _get_peering_connection_ids(name, conn):
Returns the VPC peering connection ids
given the VPC peering connection name.
- '''
- filters = [{
- 'Name': 'tag:Name',
- 'Values': [name],
- }, {
- 'Name': 'status-code',
- 'Values': [ACTIVE, PENDING_ACCEPTANCE, PROVISIONING],
- }]
+ """
+ filters = [
+ {"Name": "tag:Name", "Values": [name]},
+ {"Name": "status-code", "Values": [ACTIVE, PENDING_ACCEPTANCE, PROVISIONING]},
+ ]
- peerings = conn.describe_vpc_peering_connections(
- Filters=filters).get('VpcPeeringConnections',
- [])
- return [x['VpcPeeringConnectionId'] for x in peerings]
+ peerings = conn.describe_vpc_peering_connections(Filters=filters).get(
+ "VpcPeeringConnections", []
+ )
+ return [x["VpcPeeringConnectionId"] for x in peerings]
-def describe_vpc_peering_connection(name,
- region=None,
- key=None,
- keyid=None,
- profile=None):
- '''
+def describe_vpc_peering_connection(
+ name, region=None, key=None, keyid=None, profile=None
+):
+ """
Returns any VPC peering connection id(s) for the given VPC
peering connection name.
@@ -2879,23 +3895,15 @@ def describe_vpc_peering_connection(name,
# Specify a region
salt myminion boto_vpc.describe_vpc_peering_connection salt-vpc region=us-west-2
- '''
- conn = _get_conn3(region=region, key=key, keyid=keyid,
- profile=profile)
- return {
- 'VPC-Peerings': _get_peering_connection_ids(name, conn)
- }
+ """
+ conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
+ return {"VPC-Peerings": _get_peering_connection_ids(name, conn)}
def accept_vpc_peering_connection( # pylint: disable=too-many-arguments
- conn_id='',
- name='',
- region=None,
- key=None,
- keyid=None,
- profile=None,
- dry_run=False):
- '''
+ conn_id="", name="", region=None, key=None, keyid=None, profile=None, dry_run=False
+):
+ """
Request a VPC peering connection between two VPCs.
.. versionadded:: 2016.11.0
@@ -2922,58 +3930,70 @@ def accept_vpc_peering_connection( # pylint: disable=too-many-arguments
# specify an id
salt myminion boto_vpc.accept_vpc_peering_connection conn_id=pcx-8a8939e3
- '''
+ """
if not _exactly_one((conn_id, name)):
- raise SaltInvocationError('One (but not both) of '
- 'vpc_peering_connection_id or name '
- 'must be provided.')
+ raise SaltInvocationError(
+ "One (but not both) of "
+ "vpc_peering_connection_id or name "
+ "must be provided."
+ )
- conn = _get_conn3(region=region, key=key, keyid=keyid,
- profile=profile)
+ conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
if name:
conn_id = _vpc_peering_conn_id_for_name(name, conn)
if not conn_id:
- raise SaltInvocationError('No ID found for this '
- 'VPC peering connection! ({0}) '
- 'Please make sure this VPC peering '
- 'connection exists '
- 'or invoke this function with '
- 'a VPC peering connection '
- 'ID'.format(name))
+ raise SaltInvocationError(
+ "No ID found for this "
+ "VPC peering connection! ({0}) "
+ "Please make sure this VPC peering "
+ "connection exists "
+ "or invoke this function with "
+ "a VPC peering connection "
+ "ID".format(name)
+ )
try:
- log.debug('Trying to accept vpc peering connection')
+ log.debug("Trying to accept vpc peering connection")
conn.accept_vpc_peering_connection(
- DryRun=dry_run,
- VpcPeeringConnectionId=conn_id)
- return {'msg': 'VPC peering connection accepted.'}
+ DryRun=dry_run, VpcPeeringConnectionId=conn_id
+ )
+ return {"msg": "VPC peering connection accepted."}
except botocore.exceptions.ClientError as err:
- log.error('Got an error while trying to accept vpc peering')
- return {'error': __utils__['boto.get_error'](err)}
+ log.error("Got an error while trying to accept vpc peering")
+ return {"error": __utils__["boto.get_error"](err)}
def _vpc_peering_conn_id_for_name(name, conn):
- '''
+ """
Get the ID associated with this name
- '''
- log.debug('Retrieving VPC peering connection id')
+ """
+ log.debug("Retrieving VPC peering connection id")
ids = _get_peering_connection_ids(name, conn)
if not ids:
ids = [None] # Let callers handle the case where we have no id
elif len(ids) > 1:
- raise SaltInvocationError('Found multiple VPC peering connections '
- 'with the same name!! '
- 'Please make sure you have only '
- 'one VPC peering connection named {0} '
- 'or invoke this function with a VPC '
- 'peering connection ID'.format(name))
+ raise SaltInvocationError(
+ "Found multiple VPC peering connections "
+ "with the same name!! "
+ "Please make sure you have only "
+ "one VPC peering connection named {0} "
+ "or invoke this function with a VPC "
+ "peering connection ID".format(name)
+ )
return ids[0]
-def delete_vpc_peering_connection(conn_id=None, conn_name=None, region=None,
- key=None, keyid=None, profile=None, dry_run=False):
- '''
+def delete_vpc_peering_connection(
+ conn_id=None,
+ conn_name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+ dry_run=False,
+):
+ """
Delete a VPC peering connection.
.. versionadded:: 2016.11.0
@@ -3011,30 +4031,36 @@ def delete_vpc_peering_connection(conn_id=None, conn_name=None, region=None,
# specify an id
salt myminion boto_vpc.delete_vpc_peering_connection conn_id=pcx-8a8939e3
- '''
+ """
if not _exactly_one((conn_id, conn_name)):
- raise SaltInvocationError('Exactly one of conn_id or '
- 'conn_name must be provided.')
+ raise SaltInvocationError(
+ "Exactly one of conn_id or " "conn_name must be provided."
+ )
conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
if conn_name:
conn_id = _vpc_peering_conn_id_for_name(conn_name, conn)
if not conn_id:
- raise SaltInvocationError("Couldn't resolve VPC peering connection "
- "{0} to an ID".format(conn_name))
+ raise SaltInvocationError(
+ "Couldn't resolve VPC peering connection "
+ "{0} to an ID".format(conn_name)
+ )
try:
- log.debug('Trying to delete vpc peering connection')
- conn.delete_vpc_peering_connection(DryRun=dry_run, VpcPeeringConnectionId=conn_id)
- return {'msg': 'VPC peering connection deleted.'}
+ log.debug("Trying to delete vpc peering connection")
+ conn.delete_vpc_peering_connection(
+ DryRun=dry_run, VpcPeeringConnectionId=conn_id
+ )
+ return {"msg": "VPC peering connection deleted."}
except botocore.exceptions.ClientError as err:
- e = __utils__['boto.get_error'](err)
- log.error('Failed to delete VPC peering %s: %s', conn_name or conn_id, e)
- return {'error': e}
+ e = __utils__["boto.get_error"](err)
+ log.error("Failed to delete VPC peering %s: %s", conn_name or conn_id, e)
+ return {"error": e}
-def is_peering_connection_pending(conn_id=None, conn_name=None, region=None,
- key=None, keyid=None, profile=None):
- '''
+def is_peering_connection_pending(
+ conn_id=None, conn_name=None, region=None, key=None, keyid=None, profile=None
+):
+ """
Check if a VPC peering connection is in the pending state.
.. versionadded:: 2016.11.0
@@ -3068,35 +4094,55 @@ def is_peering_connection_pending(conn_id=None, conn_name=None, region=None,
# specify an id
salt myminion boto_vpc.is_peering_connection_pending conn_id=pcx-8a8939e3
- '''
+ """
if not _exactly_one((conn_id, conn_name)):
- raise SaltInvocationError('Exactly one of conn_id or conn_name must be provided.')
+ raise SaltInvocationError(
+ "Exactly one of conn_id or conn_name must be provided."
+ )
conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
if conn_id:
- vpcs = conn.describe_vpc_peering_connections(VpcPeeringConnectionIds=[conn_id]).get('VpcPeeringConnections', [])
+ vpcs = conn.describe_vpc_peering_connections(
+ VpcPeeringConnectionIds=[conn_id]
+ ).get("VpcPeeringConnections", [])
else:
- filters = [{'Name': 'tag:Name', 'Values': [conn_name]},
- {'Name': 'status-code', 'Values': [ACTIVE, PENDING_ACCEPTANCE, PROVISIONING]}]
- vpcs = conn.describe_vpc_peering_connections(Filters=filters).get('VpcPeeringConnections', [])
+ filters = [
+ {"Name": "tag:Name", "Values": [conn_name]},
+ {
+ "Name": "status-code",
+ "Values": [ACTIVE, PENDING_ACCEPTANCE, PROVISIONING],
+ },
+ ]
+ vpcs = conn.describe_vpc_peering_connections(Filters=filters).get(
+ "VpcPeeringConnections", []
+ )
if not vpcs:
return False
elif len(vpcs) > 1:
- raise SaltInvocationError('Found more than one ID for the VPC peering '
- 'connection ({0}). Please call this function '
- 'with an ID instead.'.format(conn_id or conn_name))
+ raise SaltInvocationError(
+ "Found more than one ID for the VPC peering "
+ "connection ({0}). Please call this function "
+ "with an ID instead.".format(conn_id or conn_name)
+ )
else:
- status = vpcs[0]['Status']['Code']
+ status = vpcs[0]["Status"]["Code"]
return status == PENDING_ACCEPTANCE
-def peering_connection_pending_from_vpc(conn_id=None, conn_name=None, vpc_id=None,
- vpc_name=None, region=None, key=None,
- keyid=None, profile=None):
- '''
+def peering_connection_pending_from_vpc(
+ conn_id=None,
+ conn_name=None,
+ vpc_id=None,
+ vpc_name=None,
+ region=None,
+ key=None,
+ keyid=None,
+ profile=None,
+):
+ """
Check if a VPC peering connection is in the pending state, and requested from the given VPC.
.. versionadded:: 2016.11.0
@@ -3132,36 +4178,46 @@ def peering_connection_pending_from_vpc(conn_id=None, conn_name=None, vpc_id=Non
salt myminion boto_vpc.is_peering_connection_pending name=salt-vpc
- '''
+ """
if not _exactly_one((conn_id, conn_name)):
- raise SaltInvocationError('Exactly one of conn_id or conn_name must be provided.')
+ raise SaltInvocationError(
+ "Exactly one of conn_id or conn_name must be provided."
+ )
if not _exactly_one((vpc_id, vpc_name)):
- raise SaltInvocationError('Exactly one of vpc_id or vpc_name must be provided.')
+ raise SaltInvocationError("Exactly one of vpc_id or vpc_name must be provided.")
if vpc_name:
- vpc_id = check_vpc(vpc_name=vpc_name, region=region, key=key, keyid=keyid, profile=profile)
+ vpc_id = check_vpc(
+ vpc_name=vpc_name, region=region, key=key, keyid=keyid, profile=profile
+ )
if not vpc_id:
- log.warning('Could not resolve VPC name %s to an ID', vpc_name)
+ log.warning("Could not resolve VPC name %s to an ID", vpc_name)
return False
conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
- filters = [{'Name': 'requester-vpc-info.vpc-id', 'Values': [vpc_id]},
- {'Name': 'status-code', 'Values': [ACTIVE, PENDING_ACCEPTANCE, PROVISIONING]}]
+ filters = [
+ {"Name": "requester-vpc-info.vpc-id", "Values": [vpc_id]},
+ {"Name": "status-code", "Values": [ACTIVE, PENDING_ACCEPTANCE, PROVISIONING]},
+ ]
if conn_id:
- filters += [{'Name': 'vpc-peering-connection-id', 'Values': [conn_id]}]
+ filters += [{"Name": "vpc-peering-connection-id", "Values": [conn_id]}]
else:
- filters += [{'Name': 'tag:Name', 'Values': [conn_name]}]
+ filters += [{"Name": "tag:Name", "Values": [conn_name]}]
- vpcs = conn.describe_vpc_peering_connections(Filters=filters).get('VpcPeeringConnections', [])
+ vpcs = conn.describe_vpc_peering_connections(Filters=filters).get(
+ "VpcPeeringConnections", []
+ )
if not vpcs:
return False
elif len(vpcs) > 1:
- raise SaltInvocationError('Found more than one ID for the VPC peering '
- 'connection ({0}). Please call this function '
- 'with an ID instead.'.format(conn_id or conn_name))
+ raise SaltInvocationError(
+ "Found more than one ID for the VPC peering "
+ "connection ({0}). Please call this function "
+ "with an ID instead.".format(conn_id or conn_name)
+ )
else:
- status = vpcs[0]['Status']['Code']
+ status = vpcs[0]["Status"]["Code"]
return bool(status == PENDING_ACCEPTANCE)
diff --git a/salt/modules/bower.py b/salt/modules/bower.py
index 7fbba8d4f7c..1ef0e958b1d 100644
--- a/salt/modules/bower.py
+++ b/salt/modules/bower.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Manage and query Bower packages
===============================
@@ -7,7 +7,7 @@ This module manages the installed packages using Bower.
Note that npm, git and bower must be installed for this module to be
available.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
@@ -20,64 +20,60 @@ import salt.utils.path
from salt.exceptions import CommandExecutionError
from salt.utils.versions import LooseVersion as _LooseVersion
-
log = logging.getLogger(__name__)
# Function alias to make sure not to shadow built-in's
-__func_alias__ = {
- 'list_': 'list'
-}
+__func_alias__ = {"list_": "list"}
def __virtual__():
- '''
+ """
Only work when Bower is installed
- '''
- if salt.utils.path.which('bower') is None:
- return (False, 'The bower module could not be loaded: bower command not found')
+ """
+ if salt.utils.path.which("bower") is None:
+ return (False, "The bower module could not be loaded: bower command not found")
return True
def _check_valid_version():
- '''
+ """
Check the version of Bower to ensure this module will work. Currently
bower must be at least version 1.3.
- '''
+ """
# pylint: disable=no-member
- bower_version = _LooseVersion(
- __salt__['cmd.run']('bower --version'))
- valid_version = _LooseVersion('1.3')
+ bower_version = _LooseVersion(__salt__["cmd.run"]("bower --version"))
+ valid_version = _LooseVersion("1.3")
# pylint: enable=no-member
if bower_version < valid_version:
raise CommandExecutionError(
- '\'bower\' is not recent enough({0} < {1}). '
- 'Please Upgrade.'.format(
- bower_version, valid_version
- )
+ "'bower' is not recent enough({0} < {1}). "
+ "Please Upgrade.".format(bower_version, valid_version)
)
def _construct_bower_command(bower_command):
- '''
+ """
Create bower command line string
- '''
+ """
if not bower_command:
- raise CommandExecutionError(
- 'bower_command, e.g. install, must be specified')
+ raise CommandExecutionError("bower_command, e.g. install, must be specified")
- cmd = ['bower'] + shlex.split(bower_command)
- cmd.extend(['--config.analytics', 'false',
- '--config.interactive', 'false',
- '--allow-root', '--json'])
+ cmd = ["bower"] + shlex.split(bower_command)
+ cmd.extend(
+ [
+ "--config.analytics",
+ "false",
+ "--config.interactive",
+ "false",
+ "--allow-root",
+ "--json",
+ ]
+ )
return cmd
-def install(pkg,
- dir,
- pkgs=None,
- runas=None,
- env=None):
- '''
+def install(pkg, dir, pkgs=None, runas=None, env=None):
+ """
Install a Bower package.
If no package is specified, the dependencies (from bower.json) of the
@@ -109,32 +105,30 @@ def install(pkg,
salt '*' bower.install jquery#2.0 /path/to/project
- '''
+ """
_check_valid_version()
- cmd = _construct_bower_command('install')
+ cmd = _construct_bower_command("install")
if pkg:
cmd.append(pkg)
elif pkgs:
cmd.extend(pkgs)
- result = __salt__['cmd.run_all'](cmd,
- cwd=dir,
- runas=runas,
- env=env,
- python_shell=False)
+ result = __salt__["cmd.run_all"](
+ cmd, cwd=dir, runas=runas, env=env, python_shell=False
+ )
- if result['retcode'] != 0:
- raise CommandExecutionError(result['stderr'])
+ if result["retcode"] != 0:
+ raise CommandExecutionError(result["stderr"])
# If package is already installed, Bower will emit empty dict to STDOUT
- stdout = salt.utils.json.loads(result['stdout'])
+ stdout = salt.utils.json.loads(result["stdout"])
return stdout != {}
def uninstall(pkg, dir, runas=None, env=None):
- '''
+ """
Uninstall a Bower package.
pkg
@@ -158,28 +152,26 @@ def uninstall(pkg, dir, runas=None, env=None):
salt '*' bower.uninstall underscore /path/to/project
- '''
+ """
_check_valid_version()
- cmd = _construct_bower_command('uninstall')
+ cmd = _construct_bower_command("uninstall")
cmd.append(pkg)
- result = __salt__['cmd.run_all'](cmd,
- cwd=dir,
- runas=runas,
- env=env,
- python_shell=False)
+ result = __salt__["cmd.run_all"](
+ cmd, cwd=dir, runas=runas, env=env, python_shell=False
+ )
- if result['retcode'] != 0:
- raise CommandExecutionError(result['stderr'])
+ if result["retcode"] != 0:
+ raise CommandExecutionError(result["stderr"])
# If package is not installed, Bower will emit empty dict to STDOUT
- stdout = salt.utils.json.loads(result['stdout'])
+ stdout = salt.utils.json.loads(result["stdout"])
return stdout != {}
def list_(dir, runas=None, env=None):
- '''
+ """
List installed Bower packages.
dir
@@ -199,26 +191,24 @@ def list_(dir, runas=None, env=None):
salt '*' bower.list /path/to/project
- '''
+ """
_check_valid_version()
- cmd = _construct_bower_command('list')
- cmd.append('--offline')
+ cmd = _construct_bower_command("list")
+ cmd.append("--offline")
- result = __salt__['cmd.run_all'](cmd,
- cwd=dir,
- runas=runas,
- env=env,
- python_shell=False)
+ result = __salt__["cmd.run_all"](
+ cmd, cwd=dir, runas=runas, env=env, python_shell=False
+ )
- if result['retcode'] != 0:
- raise CommandExecutionError(result['stderr'])
+ if result["retcode"] != 0:
+ raise CommandExecutionError(result["stderr"])
- return salt.utils.json.loads(result['stdout'])['dependencies']
+ return salt.utils.json.loads(result["stdout"])["dependencies"]
def prune(dir, runas=None, env=None):
- '''
+ """
.. versionadded:: 2017.7.0
Remove extraneous local Bower packages, i.e. those not referenced in bower.json
@@ -240,19 +230,17 @@ def prune(dir, runas=None, env=None):
salt '*' bower.prune /path/to/project
- '''
+ """
_check_valid_version()
- cmd = _construct_bower_command('prune')
+ cmd = _construct_bower_command("prune")
- result = __salt__['cmd.run_all'](cmd,
- cwd=dir,
- runas=runas,
- env=env,
- python_shell=False)
+ result = __salt__["cmd.run_all"](
+ cmd, cwd=dir, runas=runas, env=env, python_shell=False
+ )
- if result['retcode'] != 0:
- raise CommandExecutionError(result['stderr'])
+ if result["retcode"] != 0:
+ raise CommandExecutionError(result["stderr"])
# Bower returns an empty dictionary if nothing was pruned
- return salt.utils.json.loads(result['stdout'])
+ return salt.utils.json.loads(result["stdout"])
diff --git a/salt/modules/bridge.py b/salt/modules/bridge.py
index 79794b05568..a8859edce43 100644
--- a/salt/modules/bridge.py
+++ b/salt/modules/bridge.py
@@ -1,65 +1,66 @@
# -*- coding: utf-8 -*-
-'''
+"""
Module for gathering and managing bridging information
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
-import sys
import re
+import sys
+
import salt.utils.path
-
-__func_alias__ = {
- 'list_': 'list'
-}
+__func_alias__ = {"list_": "list"}
# Other BSD-like derivatives that use ifconfig may work too
-SUPPORTED_BSD_LIKE = ['FreeBSD', 'NetBSD', 'OpenBSD']
+SUPPORTED_BSD_LIKE = ["FreeBSD", "NetBSD", "OpenBSD"]
def __virtual__():
- '''
+ """
Confirm this module is supported by the OS and the system has
required tools
- '''
+ """
supported_os_tool = {
- 'FreeBSD': 'ifconfig',
- 'Linux': 'brctl',
- 'NetBSD': 'brconfig',
- 'OpenBSD': 'ifconfig'
+ "FreeBSD": "ifconfig",
+ "Linux": "brctl",
+ "NetBSD": "brconfig",
+ "OpenBSD": "ifconfig",
}
- cur_os = __grains__['kernel']
+ cur_os = __grains__["kernel"]
for _os in supported_os_tool:
if cur_os == _os and salt.utils.path.which(supported_os_tool[cur_os]):
return True
- return (False, 'The bridge execution module failed to load: requires one of the following tool/os'
- ' combinations: ifconfig on FreeBSD/OpenBSD, brctl on Linux or brconfig on NetBSD.')
+ return (
+ False,
+ "The bridge execution module failed to load: requires one of the following tool/os"
+ " combinations: ifconfig on FreeBSD/OpenBSD, brctl on Linux or brconfig on NetBSD.",
+ )
def _tool_path(ostool):
- '''
+ """
Internal, returns tools path
- '''
+ """
return salt.utils.path.which(ostool)
def _linux_brshow(br=None):
- '''
+ """
Internal, returns bridges and enslaved interfaces (GNU/Linux - brctl)
- '''
- brctl = _tool_path('brctl')
+ """
+ brctl = _tool_path("brctl")
if br:
- cmd = '{0} show {1}'.format(brctl, br)
+ cmd = "{0} show {1}".format(brctl, br)
else:
- cmd = '{0} show'.format(brctl)
+ cmd = "{0} show".format(brctl)
brs = {}
- for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines():
+ for line in __salt__["cmd.run"](cmd, python_shell=False).splitlines():
# get rid of first line
- if line.startswith('bridge name'):
+ if line.startswith("bridge name"):
continue
# get rid of ^\n's
vals = line.split()
@@ -74,14 +75,14 @@ def _linux_brshow(br=None):
brname = vals[0]
brs[brname] = {
- 'id': vals[1],
- 'stp': vals[2],
+ "id": vals[1],
+ "stp": vals[2],
}
if len(vals) > 3:
- brs[brname]['interfaces'] = [vals[3]]
+ brs[brname]["interfaces"] = [vals[3]]
if len(vals) == 1 and brname:
- brs[brname]['interfaces'].append(vals[0])
+ brs[brname]["interfaces"].append(vals[0])
if br:
try:
@@ -92,82 +93,80 @@ def _linux_brshow(br=None):
def _linux_bradd(br):
- '''
+ """
Internal, creates the bridge
- '''
- brctl = _tool_path('brctl')
- return __salt__['cmd.run']('{0} addbr {1}'.format(brctl, br),
- python_shell=False)
+ """
+ brctl = _tool_path("brctl")
+ return __salt__["cmd.run"]("{0} addbr {1}".format(brctl, br), python_shell=False)
def _linux_brdel(br):
- '''
+ """
Internal, deletes the bridge
- '''
- brctl = _tool_path('brctl')
- return __salt__['cmd.run']('{0} delbr {1}'.format(brctl, br),
- python_shell=False)
+ """
+ brctl = _tool_path("brctl")
+ return __salt__["cmd.run"]("{0} delbr {1}".format(brctl, br), python_shell=False)
def _linux_addif(br, iface):
- '''
+ """
Internal, adds an interface to a bridge
- '''
- brctl = _tool_path('brctl')
- return __salt__['cmd.run']('{0} addif {1} {2}'.format(brctl, br, iface),
- python_shell=False)
+ """
+ brctl = _tool_path("brctl")
+ return __salt__["cmd.run"](
+ "{0} addif {1} {2}".format(brctl, br, iface), python_shell=False
+ )
def _linux_delif(br, iface):
- '''
+ """
Internal, removes an interface from a bridge
- '''
- brctl = _tool_path('brctl')
- return __salt__['cmd.run']('{0} delif {1} {2}'.format(brctl, br, iface),
- python_shell=False)
+ """
+ brctl = _tool_path("brctl")
+ return __salt__["cmd.run"](
+ "{0} delif {1} {2}".format(brctl, br, iface), python_shell=False
+ )
def _linux_stp(br, state):
- '''
+ """
Internal, sets STP state
- '''
- brctl = _tool_path('brctl')
- return __salt__['cmd.run']('{0} stp {1} {2}'.format(brctl, br, state),
- python_shell=False)
+ """
+ brctl = _tool_path("brctl")
+ return __salt__["cmd.run"](
+ "{0} stp {1} {2}".format(brctl, br, state), python_shell=False
+ )
def _bsd_brshow(br=None):
- '''
+ """
Internal, returns bridges and member interfaces (BSD-like: ifconfig)
- '''
- if __grains__['kernel'] == 'NetBSD':
+ """
+ if __grains__["kernel"] == "NetBSD":
return _netbsd_brshow(br)
- ifconfig = _tool_path('ifconfig')
+ ifconfig = _tool_path("ifconfig")
ifaces = {}
if br:
ifaces[br] = br
else:
- cmd = '{0} -g bridge'.format(ifconfig)
- for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines():
+ cmd = "{0} -g bridge".format(ifconfig)
+ for line in __salt__["cmd.run"](cmd, python_shell=False).splitlines():
ifaces[line] = line
brs = {}
for iface in ifaces:
- cmd = '{0} {1}'.format(ifconfig, iface)
- for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines():
- brs[iface] = {
- 'interfaces': [],
- 'stp': 'no'
- }
+ cmd = "{0} {1}".format(ifconfig, iface)
+ for line in __salt__["cmd.run"](cmd, python_shell=False).splitlines():
+ brs[iface] = {"interfaces": [], "stp": "no"}
line = line.lstrip()
- if line.startswith('member:'):
- brs[iface]['interfaces'].append(line.split(' ')[1])
- if 'STP' in line:
- brs[iface]['stp'] = 'yes'
+ if line.startswith("member:"):
+ brs[iface]["interfaces"].append(line.split(" ")[1])
+ if "STP" in line:
+ brs[iface]["stp"] = "yes"
if br:
return brs[br]
@@ -175,36 +174,33 @@ def _bsd_brshow(br=None):
def _netbsd_brshow(br=None):
- '''
+ """
Internal, returns bridges and enslaved interfaces (NetBSD - brconfig)
- '''
- brconfig = _tool_path('brconfig')
+ """
+ brconfig = _tool_path("brconfig")
if br:
- cmd = '{0} {1}'.format(brconfig, br)
+ cmd = "{0} {1}".format(brconfig, br)
else:
- cmd = '{0} -a'.format(brconfig)
+ cmd = "{0} -a".format(brconfig)
brs = {}
start_int = False
- for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines():
- if line.startswith('bridge'):
+ for line in __salt__["cmd.run"](cmd, python_shell=False).splitlines():
+ if line.startswith("bridge"):
start_int = False
- brname = line.split(':')[0] # on NetBSD, always ^bridge([0-9]+):
- brs[brname] = {
- 'interfaces': [],
- 'stp': 'no'
- }
- if 'Interfaces:' in line:
+ brname = line.split(":")[0] # on NetBSD, always ^bridge([0-9]+):
+ brs[brname] = {"interfaces": [], "stp": "no"}
+ if "Interfaces:" in line:
start_int = True
continue
if start_int and brname:
- m = re.match(r'\s*([a-z0-9]+)\s.*<.*>', line)
+ m = re.match(r"\s*([a-z0-9]+)\s.*<.*>", line)
if m:
- brs[brname]['interfaces'].append(m.group(1))
- if 'STP' in line:
- brs[brname]['stp'] = 'yes'
+ brs[brname]["interfaces"].append(m.group(1))
+ if "STP" in line:
+ brs[brname]["stp"] = "yes"
if br:
try:
@@ -215,106 +211,118 @@ def _netbsd_brshow(br=None):
def _bsd_bradd(br):
- '''
+ """
Internal, creates the bridge
- '''
- kernel = __grains__['kernel']
- ifconfig = _tool_path('ifconfig')
+ """
+ kernel = __grains__["kernel"]
+ ifconfig = _tool_path("ifconfig")
if not br:
return False
- if __salt__['cmd.retcode']('{0} {1} create up'.format(ifconfig, br),
- python_shell=False) != 0:
+ if (
+ __salt__["cmd.retcode"](
+ "{0} {1} create up".format(ifconfig, br), python_shell=False
+ )
+ != 0
+ ):
return False
# NetBSD is two cmds
- if kernel == 'NetBSD':
- brconfig = _tool_path('brconfig')
- if __salt__['cmd.retcode']('{0} {1} up'.format(brconfig, br),
- python_shell=False) != 0:
+ if kernel == "NetBSD":
+ brconfig = _tool_path("brconfig")
+ if (
+ __salt__["cmd.retcode"](
+ "{0} {1} up".format(brconfig, br), python_shell=False
+ )
+ != 0
+ ):
return False
return True
def _bsd_brdel(br):
- '''
+ """
Internal, deletes the bridge
- '''
- ifconfig = _tool_path('ifconfig')
+ """
+ ifconfig = _tool_path("ifconfig")
if not br:
return False
- return __salt__['cmd.run']('{0} {1} destroy'.format(ifconfig, br),
- python_shell=False)
+ return __salt__["cmd.run"](
+ "{0} {1} destroy".format(ifconfig, br), python_shell=False
+ )
def _bsd_addif(br, iface):
- '''
+ """
Internal, adds an interface to a bridge
- '''
- kernel = __grains__['kernel']
- if kernel == 'NetBSD':
- cmd = _tool_path('brconfig')
- brcmd = 'add'
+ """
+ kernel = __grains__["kernel"]
+ if kernel == "NetBSD":
+ cmd = _tool_path("brconfig")
+ brcmd = "add"
else:
- cmd = _tool_path('ifconfig')
- brcmd = 'addem'
+ cmd = _tool_path("ifconfig")
+ brcmd = "addem"
if not br or not iface:
return False
- return __salt__['cmd.run']('{0} {1} {2} {3}'.format(cmd, br, brcmd, iface),
- python_shell=False)
+ return __salt__["cmd.run"](
+ "{0} {1} {2} {3}".format(cmd, br, brcmd, iface), python_shell=False
+ )
def _bsd_delif(br, iface):
- '''
+ """
Internal, removes an interface from a bridge
- '''
- kernel = __grains__['kernel']
- if kernel == 'NetBSD':
- cmd = _tool_path('brconfig')
- brcmd = 'delete'
+ """
+ kernel = __grains__["kernel"]
+ if kernel == "NetBSD":
+ cmd = _tool_path("brconfig")
+ brcmd = "delete"
else:
- cmd = _tool_path('ifconfig')
- brcmd = 'deletem'
+ cmd = _tool_path("ifconfig")
+ brcmd = "deletem"
if not br or not iface:
return False
- return __salt__['cmd.run']('{0} {1} {2} {3}'.format(cmd, br, brcmd, iface),
- python_shell=False)
+ return __salt__["cmd.run"](
+ "{0} {1} {2} {3}".format(cmd, br, brcmd, iface), python_shell=False
+ )
def _bsd_stp(br, state, iface):
- '''
+ """
Internal, sets STP state. On BSD-like, it is required to specify the
STP physical interface
- '''
- kernel = __grains__['kernel']
- if kernel == 'NetBSD':
- cmd = _tool_path('brconfig')
+ """
+ kernel = __grains__["kernel"]
+ if kernel == "NetBSD":
+ cmd = _tool_path("brconfig")
else:
- cmd = _tool_path('ifconfig')
+ cmd = _tool_path("ifconfig")
if not br or not iface:
return False
- return __salt__['cmd.run']('{0} {1} {2} {3}'.format(cmd, br, state, iface),
- python_shell=False)
+ return __salt__["cmd.run"](
+ "{0} {1} {2} {3}".format(cmd, br, state, iface), python_shell=False
+ )
def _os_dispatch(func, *args, **kwargs):
- '''
+ """
Internal, dispatches functions by operating system
- '''
- if __grains__['kernel'] in SUPPORTED_BSD_LIKE:
- kernel = 'bsd'
+ """
+ if __grains__["kernel"] in SUPPORTED_BSD_LIKE:
+ kernel = "bsd"
else:
- kernel = __grains__['kernel'].lower()
+ kernel = __grains__["kernel"].lower()
- _os_func = getattr(sys.modules[__name__], '_{0}_{1}'.format(kernel, func))
+ _os_func = getattr(sys.modules[__name__], "_{0}_{1}".format(kernel, func))
if callable(_os_func):
return _os_func(*args, **kwargs)
@@ -324,7 +332,7 @@ def _os_dispatch(func, *args, **kwargs):
def show(br=None):
- '''
+ """
Returns bridges interfaces along with enslaved physical interfaces. If
no interface is given, all bridges are shown, else only the specified
bridge values are returned.
@@ -335,12 +343,12 @@ def show(br=None):
salt '*' bridge.show
salt '*' bridge.show br0
- '''
- return _os_dispatch('brshow', br)
+ """
+ return _os_dispatch("brshow", br)
def list_():
- '''
+ """
Returns the machine's bridges list
CLI Example:
@@ -348,8 +356,8 @@ def list_():
.. code-block:: bash
salt '*' bridge.list
- '''
- brs = _os_dispatch('brshow')
+ """
+ brs = _os_dispatch("brshow")
if not brs:
return None
brlist = []
@@ -360,7 +368,7 @@ def list_():
def interfaces(br=None):
- '''
+ """
Returns interfaces attached to a bridge
CLI Example:
@@ -368,17 +376,17 @@ def interfaces(br=None):
.. code-block:: bash
salt '*' bridge.interfaces br0
- '''
+ """
if not br:
return None
- br_ret = _os_dispatch('brshow', br)
+ br_ret = _os_dispatch("brshow", br)
if br_ret:
- return br_ret['interfaces']
+ return br_ret["interfaces"]
def find_interfaces(*args):
- '''
+ """
Returns the bridge to which the interfaces are bond to
CLI Example:
@@ -386,8 +394,8 @@ def find_interfaces(*args):
.. code-block:: bash
salt '*' bridge.find_interfaces eth0 [eth1...]
- '''
- brs = _os_dispatch('brshow')
+ """
+ brs = _os_dispatch("brshow")
if not brs:
return None
@@ -396,7 +404,7 @@ def find_interfaces(*args):
for iface in args:
for br in brs:
try: # a bridge may not contain interfaces
- if iface in brs[br]['interfaces']:
+ if iface in brs[br]["interfaces"]:
iflist[iface] = br
except Exception: # pylint: disable=broad-except
pass
@@ -405,7 +413,7 @@ def find_interfaces(*args):
def add(br=None):
- '''
+ """
Creates a bridge
CLI Example:
@@ -413,12 +421,12 @@ def add(br=None):
.. code-block:: bash
salt '*' bridge.add br0
- '''
- return _os_dispatch('bradd', br)
+ """
+ return _os_dispatch("bradd", br)
def delete(br=None):
- '''
+ """
Deletes a bridge
CLI Example:
@@ -426,12 +434,12 @@ def delete(br=None):
.. code-block:: bash
salt '*' bridge.delete br0
- '''
- return _os_dispatch('brdel', br)
+ """
+ return _os_dispatch("brdel", br)
def addif(br=None, iface=None):
- '''
+ """
Adds an interface to a bridge
CLI Example:
@@ -439,12 +447,12 @@ def addif(br=None, iface=None):
.. code-block:: bash
salt '*' bridge.addif br0 eth0
- '''
- return _os_dispatch('addif', br, iface)
+ """
+ return _os_dispatch("addif", br, iface)
def delif(br=None, iface=None):
- '''
+ """
Removes an interface from a bridge
CLI Example:
@@ -452,12 +460,12 @@ def delif(br=None, iface=None):
.. code-block:: bash
salt '*' bridge.delif br0 eth0
- '''
- return _os_dispatch('delif', br, iface)
+ """
+ return _os_dispatch("delif", br, iface)
-def stp(br=None, state='disable', iface=None):
- '''
+def stp(br=None, state="disable", iface=None):
+ """
Sets Spanning Tree Protocol state for a bridge
CLI Example:
@@ -476,14 +484,14 @@ def stp(br=None, state='disable', iface=None):
salt '*' bridge.stp bridge0 enable fxp0
salt '*' bridge.stp bridge0 disable fxp0
- '''
- kernel = __grains__['kernel']
- if kernel == 'Linux':
- states = {'enable': 'on', 'disable': 'off'}
- return _os_dispatch('stp', br, states[state])
+ """
+ kernel = __grains__["kernel"]
+ if kernel == "Linux":
+ states = {"enable": "on", "disable": "off"}
+ return _os_dispatch("stp", br, states[state])
elif kernel in SUPPORTED_BSD_LIKE:
- states = {'enable': 'stp', 'disable': '-stp'}
- return _os_dispatch('stp', br, states[state], iface)
+ states = {"enable": "stp", "disable": "-stp"}
+ return _os_dispatch("stp", br, states[state], iface)
else:
return False
diff --git a/salt/modules/bsd_shadow.py b/salt/modules/bsd_shadow.py
index 79e3cf962f1..db9beecfc8d 100644
--- a/salt/modules/bsd_shadow.py
+++ b/salt/modules/bsd_shadow.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Manage the password database on BSD systems
.. important::
@@ -7,34 +7,40 @@ Manage the password database on BSD systems
minion, and it is using a different module (or gives an error similar to
*'shadow.info' is not available*), see :ref:`here
`.
-'''
+"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
+
+import salt.utils.files
+import salt.utils.stringutils
+from salt.exceptions import SaltInvocationError
+
+# Import salt libs
+from salt.ext import six
+
try:
import pwd
except ImportError:
pass
-# Import salt libs
-from salt.ext import six
-import salt.utils.files
-import salt.utils.stringutils
-from salt.exceptions import SaltInvocationError
# Define the module's virtual name
-__virtualname__ = 'shadow'
+__virtualname__ = "shadow"
def __virtual__():
- if 'BSD' in __grains__.get('os', ''):
+ if "BSD" in __grains__.get("os", ""):
return __virtualname__
- return (False, 'The bsd_shadow execution module cannot be loaded: '
- 'only available on BSD family systems.')
+ return (
+ False,
+ "The bsd_shadow execution module cannot be loaded: "
+ "only available on BSD family systems.",
+ )
def default_hash():
- '''
+ """
Returns the default hash used for unset passwords
CLI Example:
@@ -42,12 +48,12 @@ def default_hash():
.. code-block:: bash
salt '*' shadow.default_hash
- '''
- return '*' if __grains__['os'].lower() == 'freebsd' else '*************'
+ """
+ return "*" if __grains__["os"].lower() == "freebsd" else "*************"
def info(name):
- '''
+ """
Return information for the specified user
CLI Example:
@@ -55,35 +61,31 @@ def info(name):
.. code-block:: bash
salt '*' shadow.info someuser
- '''
+ """
try:
data = pwd.getpwnam(name)
- ret = {
- 'name': data.pw_name,
- 'passwd': data.pw_passwd}
+ ret = {"name": data.pw_name, "passwd": data.pw_passwd}
except KeyError:
- return {
- 'name': '',
- 'passwd': ''}
+ return {"name": "", "passwd": ""}
if not isinstance(name, six.string_types):
name = six.text_type(name)
- if ':' in name:
- raise SaltInvocationError('Invalid username \'{0}\''.format(name))
+ if ":" in name:
+ raise SaltInvocationError("Invalid username '{0}'".format(name))
- if __salt__['cmd.has_exec']('pw'):
- change, expire = __salt__['cmd.run_stdout'](
- ['pw', 'user', 'show', name],
- python_shell=False).split(':')[5:7]
- elif __grains__['kernel'] in ('NetBSD', 'OpenBSD'):
+ if __salt__["cmd.has_exec"]("pw"):
+ change, expire = __salt__["cmd.run_stdout"](
+ ["pw", "user", "show", name], python_shell=False
+ ).split(":")[5:7]
+ elif __grains__["kernel"] in ("NetBSD", "OpenBSD"):
try:
- with salt.utils.files.fopen('/etc/master.passwd', 'r') as fp_:
+ with salt.utils.files.fopen("/etc/master.passwd", "r") as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
- if line.startswith('{0}:'.format(name)):
- key = line.split(':')
+ if line.startswith("{0}:".format(name)):
+ key = line.split(":")
change, expire = key[5:7]
- ret['passwd'] = six.text_type(key[1])
+ ret["passwd"] = six.text_type(key[1])
break
except IOError:
change = expire = None
@@ -91,12 +93,12 @@ def info(name):
change = expire = None
try:
- ret['change'] = int(change)
+ ret["change"] = int(change)
except ValueError:
pass
try:
- ret['expire'] = int(expire)
+ ret["expire"] = int(expire)
except ValueError:
pass
@@ -104,7 +106,7 @@ def info(name):
def set_change(name, change):
- '''
+ """
Sets the time at which the password expires (in seconds since the UNIX
epoch). See ``man 8 usermod`` on NetBSD and OpenBSD or ``man 8 pw`` on
FreeBSD.
@@ -116,22 +118,22 @@ def set_change(name, change):
.. code-block:: bash
salt '*' shadow.set_change username 1419980400
- '''
+ """
pre_info = info(name)
- if change == pre_info['change']:
+ if change == pre_info["change"]:
return True
- if __grains__['kernel'] == 'FreeBSD':
- cmd = ['pw', 'user', 'mod', name, '-f', change]
+ if __grains__["kernel"] == "FreeBSD":
+ cmd = ["pw", "user", "mod", name, "-f", change]
else:
- cmd = ['usermod', '-f', change, name]
- __salt__['cmd.run'](cmd, python_shell=False)
+ cmd = ["usermod", "-f", change, name]
+ __salt__["cmd.run"](cmd, python_shell=False)
post_info = info(name)
- if post_info['change'] != pre_info['change']:
- return post_info['change'] == change
+ if post_info["change"] != pre_info["change"]:
+ return post_info["change"] == change
def set_expire(name, expire):
- '''
+ """
Sets the time at which the account expires (in seconds since the UNIX
epoch). See ``man 8 usermod`` on NetBSD and OpenBSD or ``man 8 pw`` on
FreeBSD.
@@ -143,22 +145,22 @@ def set_expire(name, expire):
.. code-block:: bash
salt '*' shadow.set_expire username 1419980400
- '''
+ """
pre_info = info(name)
- if expire == pre_info['expire']:
+ if expire == pre_info["expire"]:
return True
- if __grains__['kernel'] == 'FreeBSD':
- cmd = ['pw', 'user', 'mod', name, '-e', expire]
+ if __grains__["kernel"] == "FreeBSD":
+ cmd = ["pw", "user", "mod", name, "-e", expire]
else:
- cmd = ['usermod', '-e', expire, name]
- __salt__['cmd.run'](cmd, python_shell=False)
+ cmd = ["usermod", "-e", expire, name]
+ __salt__["cmd.run"](cmd, python_shell=False)
post_info = info(name)
- if post_info['expire'] != pre_info['expire']:
- return post_info['expire'] == expire
+ if post_info["expire"] != pre_info["expire"]:
+ return post_info["expire"] == expire
def del_password(name):
- '''
+ """
.. versionadded:: 2015.8.2
Delete the password from name user
@@ -168,15 +170,15 @@ def del_password(name):
.. code-block:: bash
salt '*' shadow.del_password username
- '''
- cmd = 'pw user mod {0} -w none'.format(name)
- __salt__['cmd.run'](cmd, python_shell=False, output_loglevel='quiet')
+ """
+ cmd = "pw user mod {0} -w none".format(name)
+ __salt__["cmd.run"](cmd, python_shell=False, output_loglevel="quiet")
uinfo = info(name)
- return not uinfo['passwd']
+ return not uinfo["passwd"]
def set_password(name, password):
- '''
+ """
Set the password for a named user. The password must be a properly defined
hash. The password hash can be generated with this command:
@@ -202,15 +204,12 @@ def set_password(name, password):
.. code-block:: bash
salt '*' shadow.set_password someuser '$1$UYCIxa628.9qXjpQCjM4a..'
- '''
- if __grains__.get('os', '') == 'FreeBSD':
- cmd = ['pw', 'user', 'mod', name, '-H', '0']
+ """
+ if __grains__.get("os", "") == "FreeBSD":
+ cmd = ["pw", "user", "mod", name, "-H", "0"]
stdin = password
else:
- cmd = ['usermod', '-p', password, name]
+ cmd = ["usermod", "-p", password, name]
stdin = None
- __salt__['cmd.run'](cmd,
- stdin=stdin,
- output_loglevel='quiet',
- python_shell=False)
- return info(name)['passwd'] == password
+ __salt__["cmd.run"](cmd, stdin=stdin, output_loglevel="quiet", python_shell=False)
+ return info(name)["passwd"] == password
diff --git a/salt/modules/btrfs.py b/salt/modules/btrfs.py
index e0ac08c19a5..bde9cb64d9c 100644
--- a/salt/modules/btrfs.py
+++ b/salt/modules/btrfs.py
@@ -14,12 +14,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-'''
+"""
Module for managing BTRFS file systems.
-'''
+"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import itertools
import os
import re
@@ -35,14 +36,14 @@ from salt.ext import six
def __virtual__():
- '''
+ """
Only work on POSIX-like systems
- '''
- return not salt.utils.platform.is_windows() and __grains__.get('kernel') == 'Linux'
+ """
+ return not salt.utils.platform.is_windows() and __grains__.get("kernel") == "Linux"
def version():
- '''
+ """
Return BTRFS version.
CLI Example:
@@ -50,40 +51,40 @@ def version():
.. code-block:: bash
salt '*' btrfs.version
- '''
- out = __salt__['cmd.run_all']("btrfs --version")
- if out.get('stderr'):
- raise CommandExecutionError(out['stderr'])
- return {'version': out['stdout'].split(" ", 1)[-1]}
+ """
+ out = __salt__["cmd.run_all"]("btrfs --version")
+ if out.get("stderr"):
+ raise CommandExecutionError(out["stderr"])
+ return {"version": out["stdout"].split(" ", 1)[-1]}
def _parse_btrfs_info(data):
- '''
+ """
Parse BTRFS device info data.
- '''
+ """
ret = {}
for line in [line for line in data.split("\n") if line][:-1]:
if line.startswith("Label:"):
line = re.sub(r"Label:\s+", "", line)
label, uuid_ = [tkn.strip() for tkn in line.split("uuid:")]
- ret['label'] = label != 'none' and label or None
- ret['uuid'] = uuid_
+ ret["label"] = label != "none" and label or None
+ ret["uuid"] = uuid_
continue
if line.startswith("\tdevid"):
dev_data = re.split(r"\s+", line.strip())
dev_id = dev_data[-1]
ret[dev_id] = {
- 'device_id': dev_data[1],
- 'size': dev_data[3],
- 'used': dev_data[5],
- }
+ "device_id": dev_data[1],
+ "size": dev_data[3],
+ "used": dev_data[5],
+ }
return ret
def info(device):
- '''
+ """
Get BTRFS filesystem information.
CLI Example:
@@ -91,15 +92,15 @@ def info(device):
.. code-block:: bash
salt '*' btrfs.info /dev/sda1
- '''
- out = __salt__['cmd.run_all']("btrfs filesystem show {0}".format(device))
+ """
+ out = __salt__["cmd.run_all"]("btrfs filesystem show {0}".format(device))
salt.utils.fsutils._verify_run(out)
- return _parse_btrfs_info(out['stdout'])
+ return _parse_btrfs_info(out["stdout"])
def devices():
- '''
+ """
Get known BTRFS formatted devices on the system.
CLI Example:
@@ -107,28 +108,30 @@ def devices():
.. code-block:: bash
salt '*' btrfs.devices
- '''
- out = __salt__['cmd.run_all']("blkid -o export")
+ """
+ out = __salt__["cmd.run_all"]("blkid -o export")
salt.utils.fsutils._verify_run(out)
- return salt.utils.fsutils._blkid_output(out['stdout'], fs_type='btrfs')
+ return salt.utils.fsutils._blkid_output(out["stdout"], fs_type="btrfs")
def _defragment_mountpoint(mountpoint):
- '''
+ """
Defragment only one BTRFS mountpoint.
- '''
- out = __salt__['cmd.run_all']("btrfs filesystem defragment -f {0}".format(mountpoint))
+ """
+ out = __salt__["cmd.run_all"](
+ "btrfs filesystem defragment -f {0}".format(mountpoint)
+ )
return {
- 'mount_point': mountpoint,
- 'passed': not out['stderr'],
- 'log': out['stderr'] or False,
- 'range': False,
+ "mount_point": mountpoint,
+ "passed": not out["stderr"],
+ "log": out["stderr"] or False,
+ "range": False,
}
def defragment(path):
- '''
+ """
Defragment mounted BTRFS filesystem.
In order to defragment a filesystem, device should be properly mounted and writable.
@@ -141,30 +144,36 @@ def defragment(path):
salt '*' btrfs.defragment /dev/sda1
salt '*' btrfs.defragment /path/on/filesystem
- '''
+ """
is_device = salt.utils.fsutils._is_device(path)
mounts = salt.utils.fsutils._get_mounts("btrfs")
if is_device and not mounts.get(path):
- raise CommandExecutionError("Device \"{0}\" is not mounted".format(path))
+ raise CommandExecutionError('Device "{0}" is not mounted'.format(path))
result = []
if is_device:
for mount_point in mounts[path]:
- result.append(_defragment_mountpoint(mount_point['mount_point']))
+ result.append(_defragment_mountpoint(mount_point["mount_point"]))
else:
is_mountpoint = False
for mountpoints in six.itervalues(mounts):
for mpnt in mountpoints:
- if path == mpnt['mount_point']:
+ if path == mpnt["mount_point"]:
is_mountpoint = True
break
d_res = _defragment_mountpoint(path)
- if not is_mountpoint and not d_res['passed'] and "range ioctl not supported" in d_res['log']:
- d_res['log'] = "Range ioctl defragmentation is not supported in this kernel."
+ if (
+ not is_mountpoint
+ and not d_res["passed"]
+ and "range ioctl not supported" in d_res["log"]
+ ):
+ d_res[
+ "log"
+ ] = "Range ioctl defragmentation is not supported in this kernel."
if not is_mountpoint:
- d_res['mount_point'] = False
- d_res['range'] = os.path.exists(path) and path or False
+ d_res["mount_point"] = False
+ d_res["range"] = os.path.exists(path) and path or False
result.append(d_res)
@@ -172,7 +181,7 @@ def defragment(path):
def features():
- '''
+ """
List currently available BTRFS features.
CLI Example:
@@ -180,12 +189,14 @@ def features():
.. code-block:: bash
salt '*' btrfs.mkfs_features
- '''
- out = __salt__['cmd.run_all']("mkfs.btrfs -O list-all")
+ """
+ out = __salt__["cmd.run_all"]("mkfs.btrfs -O list-all")
salt.utils.fsutils._verify_run(out)
ret = {}
- for line in [re.sub(r"\s+", " ", line) for line in out['stderr'].split("\n") if " - " in line]:
+ for line in [
+ re.sub(r"\s+", " ", line) for line in out["stderr"].split("\n") if " - " in line
+ ]:
option, description = line.split(" - ", 1)
ret[option] = description
@@ -193,15 +204,19 @@ def features():
def _usage_overall(raw):
- '''
+ """
Parse usage/overall.
- '''
+ """
data = {}
for line in raw.split("\n")[1:]:
- keyset = [item.strip() for item in re.sub(r"\s+", " ", line).split(":", 1) if item.strip()]
+ keyset = [
+ item.strip()
+ for item in re.sub(r"\s+", " ", line).split(":", 1)
+ if item.strip()
+ ]
if len(keyset) == 2:
key = re.sub(r"[()]", "", keyset[0]).replace(" ", "_").lower()
- if key in ['free_estimated', 'global_reserve']: # An extra field
+ if key in ["free_estimated", "global_reserve"]: # An extra field
subk = keyset[1].split("(")
data[key] = subk[0].strip()
subk = subk[1].replace(")", "").split(": ")
@@ -213,10 +228,10 @@ def _usage_overall(raw):
def _usage_specific(raw):
- '''
+ """
Parse usage/specific.
- '''
- get_key = lambda val: dict([tuple(val.split(":")), ])
+ """
+ get_key = lambda val: dict([tuple(val.split(":"))])
raw = raw.split("\n")
section, size, used = raw[0].split(" ")
section = section.replace(",", "_").replace(":", "").lower()
@@ -234,9 +249,9 @@ def _usage_specific(raw):
def _usage_unallocated(raw):
- '''
+ """
Parse usage/unallocated.
- '''
+ """
ret = {}
for line in raw.split("\n")[1:]:
keyset = re.sub(r"\s+", " ", line.strip()).split(" ")
@@ -247,7 +262,7 @@ def _usage_unallocated(raw):
def usage(path):
- '''
+ """
Show in which disk the chunks are allocated.
CLI Example:
@@ -255,16 +270,16 @@ def usage(path):
.. code-block:: bash
salt '*' btrfs.usage /your/mountpoint
- '''
- out = __salt__['cmd.run_all']("btrfs filesystem usage {0}".format(path))
+ """
+ out = __salt__["cmd.run_all"]("btrfs filesystem usage {0}".format(path))
salt.utils.fsutils._verify_run(out)
ret = {}
- for section in out['stdout'].split("\n\n"):
+ for section in out["stdout"].split("\n\n"):
if section.startswith("Overall:\n"):
- ret['overall'] = _usage_overall(section)
+ ret["overall"] = _usage_overall(section)
elif section.startswith("Unallocated:\n"):
- ret['unallocated'] = _usage_unallocated(section)
+ ret["unallocated"] = _usage_unallocated(section)
else:
ret.update(_usage_specific(section))
@@ -272,7 +287,7 @@ def usage(path):
def mkfs(*devices, **kwargs):
- '''
+ """
Create a file system on the specified device. By default wipes out with force.
General options:
@@ -303,14 +318,16 @@ def mkfs(*devices, **kwargs):
salt '*' btrfs.mkfs /dev/sda1
salt '*' btrfs.mkfs /dev/sda1 noforce=True
- '''
+ """
if not devices:
raise CommandExecutionError("No devices specified")
mounts = salt.utils.fsutils._get_mounts("btrfs")
for device in devices:
if mounts.get(device):
- raise CommandExecutionError("Device \"{0}\" should not be mounted".format(device))
+ raise CommandExecutionError(
+ 'Device "{0}" should not be mounted'.format(device)
+ )
cmd = ["mkfs.btrfs"]
@@ -327,16 +344,26 @@ def mkfs(*devices, **kwargs):
if mto:
cmd.append("-m {0}".format(mto))
- for key, option in [("-l", "leafsize"), ("-L", "label"), ("-O", "fts"),
- ("-A", "allocsize"), ("-b", "bytecount"), ("-n", "nodesize"),
- ("-s", "sectorsize")]:
- if option == 'label' and option in kwargs:
- kwargs['label'] = "'{0}'".format(kwargs["label"])
+ for key, option in [
+ ("-l", "leafsize"),
+ ("-L", "label"),
+ ("-O", "fts"),
+ ("-A", "allocsize"),
+ ("-b", "bytecount"),
+ ("-n", "nodesize"),
+ ("-s", "sectorsize"),
+ ]:
+ if option == "label" and option in kwargs:
+ kwargs["label"] = "'{0}'".format(kwargs["label"])
if kwargs.get(option):
cmd.append("{0} {1}".format(key, kwargs.get(option)))
if kwargs.get("uuid"):
- cmd.append("-U {0}".format(kwargs.get("uuid") is True and uuid.uuid1() or kwargs.get("uuid")))
+ cmd.append(
+ "-U {0}".format(
+ kwargs.get("uuid") is True and uuid.uuid1() or kwargs.get("uuid")
+ )
+ )
if kwargs.get("nodiscard"):
cmd.append("-K")
@@ -345,17 +372,17 @@ def mkfs(*devices, **kwargs):
cmd.extend(devices)
- out = __salt__['cmd.run_all'](' '.join(cmd))
+ out = __salt__["cmd.run_all"](" ".join(cmd))
salt.utils.fsutils._verify_run(out)
- ret = {'log': out['stdout']}
- ret.update(__salt__['btrfs.info'](devices[0]))
+ ret = {"log": out["stdout"]}
+ ret.update(__salt__["btrfs.info"](devices[0]))
return ret
def resize(mountpoint, size):
- '''
+ """
Resize filesystem.
General options:
@@ -369,49 +396,66 @@ def resize(mountpoint, size):
salt '*' btrfs.resize /mountpoint size=+1g
salt '*' btrfs.resize /dev/sda1 size=max
- '''
+ """
- if size == 'max':
+ if size == "max":
if not salt.utils.fsutils._is_device(mountpoint):
- raise CommandExecutionError("Mountpoint \"{0}\" should be a valid device".format(mountpoint))
+ raise CommandExecutionError(
+ 'Mountpoint "{0}" should be a valid device'.format(mountpoint)
+ )
if not salt.utils.fsutils._get_mounts("btrfs").get(mountpoint):
- raise CommandExecutionError("Device \"{0}\" should be mounted".format(mountpoint))
- elif len(size) < 3 or size[0] not in '-+' \
- or size[-1] not in 'kKmMgGtTpPeE' or re.sub(r"\d", "", size[1:][:-1]):
- raise CommandExecutionError("Unknown size: \"{0}\". Expected: [+/-][kKmMgGtTpPeE]|max".format(size))
+ raise CommandExecutionError(
+ 'Device "{0}" should be mounted'.format(mountpoint)
+ )
+ elif (
+ len(size) < 3
+ or size[0] not in "-+"
+ or size[-1] not in "kKmMgGtTpPeE"
+ or re.sub(r"\d", "", size[1:][:-1])
+ ):
+ raise CommandExecutionError(
+ 'Unknown size: "{0}". Expected: [+/-][kKmMgGtTpPeE]|max'.format(
+ size
+ )
+ )
- out = __salt__['cmd.run_all']('btrfs filesystem resize {0} {1}'.format(size, mountpoint))
+ out = __salt__["cmd.run_all"](
+ "btrfs filesystem resize {0} {1}".format(size, mountpoint)
+ )
salt.utils.fsutils._verify_run(out)
- ret = {'log': out['stdout']}
- ret.update(__salt__['btrfs.info'](mountpoint))
+ ret = {"log": out["stdout"]}
+ ret.update(__salt__["btrfs.info"](mountpoint))
return ret
def _fsck_ext(device):
- '''
+ """
Check an ext2/ext3/ext4 file system.
This is forced check to determine a filesystem is clean or not.
NOTE: Maybe this function needs to be moved as a standard method in extfs module in a future.
- '''
+ """
msgs = {
- 0: 'No errors',
- 1: 'Filesystem errors corrected',
- 2: 'System should be rebooted',
- 4: 'Filesystem errors left uncorrected',
- 8: 'Operational error',
- 16: 'Usage or syntax error',
- 32: 'Fsck canceled by user request',
- 128: 'Shared-library error',
+ 0: "No errors",
+ 1: "Filesystem errors corrected",
+ 2: "System should be rebooted",
+ 4: "Filesystem errors left uncorrected",
+ 8: "Operational error",
+ 16: "Usage or syntax error",
+ 32: "Fsck canceled by user request",
+ 128: "Shared-library error",
}
- return msgs.get(__salt__['cmd.run_all']("fsck -f -n {0}".format(device))['retcode'], 'Unknown error')
+ return msgs.get(
+ __salt__["cmd.run_all"]("fsck -f -n {0}".format(device))["retcode"],
+ "Unknown error",
+ )
def convert(device, permanent=False, keeplf=False):
- '''
+ """
Convert ext2/3/4 to BTRFS. Device should be mounted.
Filesystem can be converted temporarily so the further processing and rollback is possible,
@@ -430,22 +474,29 @@ def convert(device, permanent=False, keeplf=False):
salt '*' btrfs.convert /dev/sda1
salt '*' btrfs.convert /dev/sda1 permanent=True
- '''
+ """
- out = __salt__['cmd.run_all']("blkid -o export")
+ out = __salt__["cmd.run_all"]("blkid -o export")
salt.utils.fsutils._verify_run(out)
- devices = salt.utils.fsutils._blkid_output(out['stdout'])
+ devices = salt.utils.fsutils._blkid_output(out["stdout"])
if not devices.get(device):
- raise CommandExecutionError("The device \"{0}\" was is not found.".format(device))
+ raise CommandExecutionError('The device "{0}" was is not found.'.format(device))
- if not devices[device]["type"] in ['ext2', 'ext3', 'ext4']:
- raise CommandExecutionError("The device \"{0}\" is a \"{1}\" file system.".format(
- device, devices[device]["type"]))
+ if not devices[device]["type"] in ["ext2", "ext3", "ext4"]:
+ raise CommandExecutionError(
+ 'The device "{0}" is a "{1}" file system.'.format(
+ device, devices[device]["type"]
+ )
+ )
- mountpoint = salt.utils.fsutils._get_mounts(devices[device]["type"]).get(
- device, [{'mount_point': None}])[0].get('mount_point')
- if mountpoint == '/':
- raise CommandExecutionError("""One does not simply converts a root filesystem!
+ mountpoint = (
+ salt.utils.fsutils._get_mounts(devices[device]["type"])
+ .get(device, [{"mount_point": None}])[0]
+ .get("mount_point")
+ )
+ if mountpoint == "/":
+ raise CommandExecutionError(
+ """One does not simply converts a root filesystem!
Converting an extended root filesystem to BTRFS is a careful
and lengthy process, among other steps including the following
@@ -457,94 +508,107 @@ requirements:
For further details, please refer to your OS vendor
documentation regarding this topic.
-""")
+"""
+ )
- salt.utils.fsutils._verify_run(__salt__['cmd.run_all']("umount {0}".format(device)))
+ salt.utils.fsutils._verify_run(__salt__["cmd.run_all"]("umount {0}".format(device)))
ret = {
- 'before': {
- 'fsck_status': _fsck_ext(device),
- 'mount_point': mountpoint,
- 'type': devices[device]["type"],
+ "before": {
+ "fsck_status": _fsck_ext(device),
+ "mount_point": mountpoint,
+ "type": devices[device]["type"],
}
}
- salt.utils.fsutils._verify_run(__salt__['cmd.run_all']("btrfs-convert {0}".format(device)))
- salt.utils.fsutils._verify_run(__salt__['cmd.run_all']("mount {0} {1}".format(device, mountpoint)))
+ salt.utils.fsutils._verify_run(
+ __salt__["cmd.run_all"]("btrfs-convert {0}".format(device))
+ )
+ salt.utils.fsutils._verify_run(
+ __salt__["cmd.run_all"]("mount {0} {1}".format(device, mountpoint))
+ )
# Refresh devices
- out = __salt__['cmd.run_all']("blkid -o export")
+ out = __salt__["cmd.run_all"]("blkid -o export")
salt.utils.fsutils._verify_run(out)
- devices = salt.utils.fsutils._blkid_output(out['stdout'])
+ devices = salt.utils.fsutils._blkid_output(out["stdout"])
- ret['after'] = {
- 'fsck_status': "N/A", # ToDO
- 'mount_point': mountpoint,
- 'type': devices[device]["type"],
+ ret["after"] = {
+ "fsck_status": "N/A", # ToDO
+ "mount_point": mountpoint,
+ "type": devices[device]["type"],
}
# Post-migration procedures
image_path = "{0}/ext2_saved".format(mountpoint)
- orig_fstype = ret['before']['type']
+ orig_fstype = ret["before"]["type"]
if not os.path.exists(image_path):
raise CommandExecutionError(
- "BTRFS migration went wrong: the image \"{0}\" not found!".format(image_path))
+ 'BTRFS migration went wrong: the image "{0}" not found!'.format(image_path)
+ )
if not permanent:
- ret['after']['{0}_image'.format(orig_fstype)] = image_path
- ret['after']['{0}_image_info'.format(orig_fstype)] = os.popen(
- "file {0}/image".format(image_path)).read().strip()
+ ret["after"]["{0}_image".format(orig_fstype)] = image_path
+ ret["after"]["{0}_image_info".format(orig_fstype)] = (
+ os.popen("file {0}/image".format(image_path)).read().strip()
+ )
else:
- ret['after']['{0}_image'.format(orig_fstype)] = 'removed'
- ret['after']['{0}_image_info'.format(orig_fstype)] = 'N/A'
+ ret["after"]["{0}_image".format(orig_fstype)] = "removed"
+ ret["after"]["{0}_image_info".format(orig_fstype)] = "N/A"
- salt.utils.fsutils._verify_run(__salt__['cmd.run_all']("btrfs subvolume delete {0}".format(image_path)))
- out = __salt__['cmd.run_all']("btrfs filesystem balance {0}".format(mountpoint))
+ salt.utils.fsutils._verify_run(
+ __salt__["cmd.run_all"]("btrfs subvolume delete {0}".format(image_path))
+ )
+ out = __salt__["cmd.run_all"]("btrfs filesystem balance {0}".format(mountpoint))
salt.utils.fsutils._verify_run(out)
- ret['after']['balance_log'] = out['stdout']
+ ret["after"]["balance_log"] = out["stdout"]
lost_found = "{0}/lost+found".format(mountpoint)
if os.path.exists(lost_found) and not keeplf:
- salt.utils.fsutils._verify_run(__salt__['cmd.run_all']("rm -rf {0}".format(lost_found)))
+ salt.utils.fsutils._verify_run(
+ __salt__["cmd.run_all"]("rm -rf {0}".format(lost_found))
+ )
return ret
def _restripe(mountpoint, direction, *devices, **kwargs):
- '''
+ """
Restripe BTRFS: add or remove devices from the particular mounted filesystem.
- '''
+ """
fs_log = []
if salt.utils.fsutils._is_device(mountpoint):
raise CommandExecutionError(
- "Mountpount expected, while device \"{0}\" specified".format(mountpoint))
+ 'Mountpount expected, while device "{0}" specified'.format(mountpoint)
+ )
mounted = False
for device, mntpoints in six.iteritems(salt.utils.fsutils._get_mounts("btrfs")):
for mntdata in mntpoints:
- if mntdata['mount_point'] == mountpoint:
+ if mntdata["mount_point"] == mountpoint:
mounted = True
break
if not mounted:
raise CommandExecutionError(
- "No BTRFS device mounted on \"{0}\" mountpoint".format(mountpoint))
+ 'No BTRFS device mounted on "{0}" mountpoint'.format(mountpoint)
+ )
if not devices:
raise CommandExecutionError("No devices specified.")
- available_devices = __salt__['btrfs.devices']()
+ available_devices = __salt__["btrfs.devices"]()
for device in devices:
if device not in six.iterkeys(available_devices):
- raise CommandExecutionError("Device \"{0}\" is not recognized".format(device))
+ raise CommandExecutionError('Device "{0}" is not recognized'.format(device))
- cmd = ['btrfs device {0}'.format(direction)]
+ cmd = ["btrfs device {0}".format(direction)]
for device in devices:
cmd.append(device)
- if direction == 'add':
+ if direction == "add":
if kwargs.get("nodiscard"):
cmd.append("-K")
if kwargs.get("force"):
@@ -552,36 +616,40 @@ def _restripe(mountpoint, direction, *devices, **kwargs):
cmd.append(mountpoint)
- out = __salt__['cmd.run_all'](' '.join(cmd))
+ out = __salt__["cmd.run_all"](" ".join(cmd))
salt.utils.fsutils._verify_run(out)
- if out['stdout']:
- fs_log.append(out['stdout'])
+ if out["stdout"]:
+ fs_log.append(out["stdout"])
- if direction == 'add':
+ if direction == "add":
out = None
data_conversion = kwargs.get("dc")
meta_conversion = kwargs.get("mc")
if data_conversion and meta_conversion:
- out = __salt__['cmd.run_all'](
+ out = __salt__["cmd.run_all"](
"btrfs balance start -dconvert={0} -mconvert={1} {2}".format(
- data_conversion, meta_conversion, mountpoint))
+ data_conversion, meta_conversion, mountpoint
+ )
+ )
else:
- out = __salt__['cmd.run_all']("btrfs filesystem balance {0}".format(mountpoint))
+ out = __salt__["cmd.run_all"](
+ "btrfs filesystem balance {0}".format(mountpoint)
+ )
salt.utils.fsutils._verify_run(out)
- if out['stdout']:
- fs_log.append(out['stdout'])
+ if out["stdout"]:
+ fs_log.append(out["stdout"])
# Summarize the result
ret = {}
if fs_log:
- ret.update({'log': '\n'.join(fs_log)})
- ret.update(__salt__['btrfs.info'](mountpoint))
+ ret.update({"log": "\n".join(fs_log)})
+ ret.update(__salt__["btrfs.info"](mountpoint))
return ret
def add(mountpoint, *devices, **kwargs):
- '''
+ """
Add a devices to a BTRFS filesystem.
General options:
@@ -594,12 +662,12 @@ def add(mountpoint, *devices, **kwargs):
.. code-block:: bash
salt '*' btrfs.add /mountpoint /dev/sda1 /dev/sda2
- '''
- return _restripe(mountpoint, 'add', *devices, **kwargs)
+ """
+ return _restripe(mountpoint, "add", *devices, **kwargs)
def delete(mountpoint, *devices, **kwargs):
- '''
+ """
Remove devices from a BTRFS filesystem.
CLI Example:
@@ -607,14 +675,14 @@ def delete(mountpoint, *devices, **kwargs):
.. code-block:: bash
salt '*' btrfs.delete /mountpoint /dev/sda1 /dev/sda2
- '''
- return _restripe(mountpoint, 'delete', *devices, **kwargs)
+ """
+ return _restripe(mountpoint, "delete", *devices, **kwargs)
def _parse_proplist(data):
- '''
+ """
Parse properties list.
- '''
+ """
out = {}
for line in data.split("\n"):
line = re.split(r"\s+", line, 1)
@@ -625,7 +693,7 @@ def _parse_proplist(data):
def properties(obj, type=None, set=None):
- '''
+ """
List properties for given btrfs object. The object can be path of BTRFS device,
mount point, or any directories/files inside the BTRFS filesystem.
@@ -641,42 +709,56 @@ def properties(obj, type=None, set=None):
salt '*' btrfs.properties /mountpoint
salt '*' btrfs.properties /dev/sda1 type=subvol set='ro=false,label="My Storage"'
- '''
- if type and type not in ['s', 'subvol', 'f', 'filesystem', 'i', 'inode', 'd', 'device']:
- raise CommandExecutionError("Unknown property type: \"{0}\" specified".format(type))
+ """
+ if type and type not in [
+ "s",
+ "subvol",
+ "f",
+ "filesystem",
+ "i",
+ "inode",
+ "d",
+ "device",
+ ]:
+ raise CommandExecutionError(
+ 'Unknown property type: "{0}" specified'.format(type)
+ )
- cmd = ['btrfs']
- cmd.append('property')
- cmd.append(set and 'set' or 'list')
+ cmd = ["btrfs"]
+ cmd.append("property")
+ cmd.append(set and "set" or "list")
if type:
- cmd.append('-t{0}'.format(type))
+ cmd.append("-t{0}".format(type))
cmd.append(obj)
if set:
try:
- for key, value in [[item.strip() for item in keyset.split("=")]
- for keyset in set.split(",")]:
+ for key, value in [
+ [item.strip() for item in keyset.split("=")]
+ for keyset in set.split(",")
+ ]:
cmd.append(key)
cmd.append(value)
except Exception as ex: # pylint: disable=broad-except
raise CommandExecutionError(ex)
- out = __salt__['cmd.run_all'](' '.join(cmd))
+ out = __salt__["cmd.run_all"](" ".join(cmd))
salt.utils.fsutils._verify_run(out)
if not set:
ret = {}
- for prop, descr in six.iteritems(_parse_proplist(out['stdout'])):
- ret[prop] = {'description': descr}
- value = __salt__['cmd.run_all'](
- "btrfs property get {0} {1}".format(obj, prop))['stdout']
- ret[prop]['value'] = value and value.split("=")[-1] or "N/A"
+ for prop, descr in six.iteritems(_parse_proplist(out["stdout"])):
+ ret[prop] = {"description": descr}
+ value = __salt__["cmd.run_all"](
+ "btrfs property get {0} {1}".format(obj, prop)
+ )["stdout"]
+ ret[prop]["value"] = value and value.split("=")[-1] or "N/A"
return ret
def subvolume_exists(path):
- '''
+ """
Check if a subvolume is present in the filesystem.
path
@@ -688,13 +770,13 @@ def subvolume_exists(path):
salt '*' btrfs.subvolume_exists /mnt/var
- '''
- cmd = ['btrfs', 'subvolume', 'show', path]
- return __salt__['cmd.retcode'](cmd, ignore_retcode=True) == 0
+ """
+ cmd = ["btrfs", "subvolume", "show", path]
+ return __salt__["cmd.retcode"](cmd, ignore_retcode=True) == 0
def subvolume_create(name, dest=None, qgroupids=None):
- '''
+ """
Create subvolume `name` in `dest`.
Return True if the subvolume is created, False is the subvolume is
@@ -719,9 +801,9 @@ def subvolume_create(name, dest=None, qgroupids=None):
salt '*' btrfs.subvolume_create var dest=/mnt
salt '*' btrfs.subvolume_create var qgroupids='[200]'
- '''
+ """
if qgroupids and type(qgroupids) is not list:
- raise CommandExecutionError('Qgroupids parameter must be a list')
+ raise CommandExecutionError("Qgroupids parameter must be a list")
if dest:
name = os.path.join(dest, name)
@@ -730,19 +812,19 @@ def subvolume_create(name, dest=None, qgroupids=None):
if subvolume_exists(name):
return False
- cmd = ['btrfs', 'subvolume', 'create']
+ cmd = ["btrfs", "subvolume", "create"]
if type(qgroupids) is list:
- cmd.append('-i')
+ cmd.append("-i")
cmd.extend(qgroupids)
cmd.append(name)
- res = __salt__['cmd.run_all'](cmd)
+ res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
return True
def subvolume_delete(name=None, names=None, commit=None):
- '''
+ """
Delete the subvolume(s) from the filesystem
The user can remove one single subvolume (name) or multiple of
@@ -772,35 +854,36 @@ def subvolume_delete(name=None, names=None, commit=None):
salt '*' btrfs.subvolume_delete /var/volumes/tmp
salt '*' btrfs.subvolume_delete /var/volumes/tmp commit=after
- '''
+ """
if not name and not (names and type(names) is list):
- raise CommandExecutionError('Provide a value for the name parameter')
+ raise CommandExecutionError("Provide a value for the name parameter")
- if commit and commit not in ('after', 'each'):
- raise CommandExecutionError('Value for commit not recognized')
+ if commit and commit not in ("after", "each"):
+ raise CommandExecutionError("Value for commit not recognized")
# Filter the names and take the ones that are still there
- names = [n for n in itertools.chain([name], names or [])
- if n and subvolume_exists(n)]
+ names = [
+ n for n in itertools.chain([name], names or []) if n and subvolume_exists(n)
+ ]
# If the subvolumes are gone, we are done
if not names:
return False
- cmd = ['btrfs', 'subvolume', 'delete']
- if commit == 'after':
- cmd.append('--commit-after')
- elif commit == 'each':
- cmd.append('--commit-each')
+ cmd = ["btrfs", "subvolume", "delete"]
+ if commit == "after":
+ cmd.append("--commit-after")
+ elif commit == "each":
+ cmd.append("--commit-each")
cmd.extend(names)
- res = __salt__['cmd.run_all'](cmd)
+ res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
return True
def subvolume_find_new(name, last_gen):
- '''
+ """
List the recently modified files in a subvolume
name
@@ -815,25 +898,25 @@ def subvolume_find_new(name, last_gen):
salt '*' btrfs.subvolume_find_new /var/volumes/tmp 1024
- '''
- cmd = ['btrfs', 'subvolume', 'find-new', name, last_gen]
+ """
+ cmd = ["btrfs", "subvolume", "find-new", name, last_gen]
- res = __salt__['cmd.run_all'](cmd)
+ res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
- lines = res['stdout'].splitlines()
+ lines = res["stdout"].splitlines()
# Filenames are at the end of each inode line
- files = [l.split()[-1] for l in lines if l.startswith('inode')]
+ files = [l.split()[-1] for l in lines if l.startswith("inode")]
# The last transid is in the last line
transid = lines[-1].split()[-1]
return {
- 'files': files,
- 'transid': transid,
+ "files": files,
+ "transid": transid,
}
def subvolume_get_default(path):
- '''
+ """
Get the default subvolume of the filesystem path
path
@@ -845,13 +928,13 @@ def subvolume_get_default(path):
salt '*' btrfs.subvolume_get_default /var/volumes/tmp
- '''
- cmd = ['btrfs', 'subvolume', 'get-default', path]
+ """
+ cmd = ["btrfs", "subvolume", "get-default", path]
- res = __salt__['cmd.run_all'](cmd)
+ res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
- line = res['stdout'].strip()
+ line = res["stdout"].strip()
# The ID is the second parameter, and the name the last one, or
# '(FS_TREE)'
#
@@ -864,13 +947,13 @@ def subvolume_get_default(path):
id_ = line.split()[1]
name = line.split()[-1]
return {
- 'id': id_,
- 'name': name,
+ "id": id_,
+ "name": name,
}
def _pop(line, key, use_rest):
- '''
+ """
Helper for the line parser.
If key is a prefix of line, will remove ir from the line and will
@@ -879,25 +962,36 @@ def _pop(line, key, use_rest):
If use_rest is True, the value will be the rest of the line.
Return a tuple with the value and the rest of the line.
- '''
+ """
value = None
if line.startswith(key):
- line = line[len(key):].strip()
+ line = line[len(key) :].strip()
if use_rest:
value = line
- line = ''
+ line = ""
else:
- value, line = line.split(' ', 1)
+ value, line = line.split(" ", 1)
return value, line.strip()
-def subvolume_list(path, parent_id=False, absolute=False,
- ogeneration=False, generation=False,
- subvolumes=False, uuid=False, parent_uuid=False,
- sent_subvolume_uuid=False, snapshots=False,
- readonly=False, deleted=False, generation_cmp=None,
- ogeneration_cmp=None, sort=None):
- '''
+def subvolume_list(
+ path,
+ parent_id=False,
+ absolute=False,
+ ogeneration=False,
+ generation=False,
+ subvolumes=False,
+ uuid=False,
+ parent_uuid=False,
+ sent_subvolume_uuid=False,
+ snapshots=False,
+ readonly=False,
+ deleted=False,
+ generation_cmp=None,
+ ogeneration_cmp=None,
+ sort=None,
+):
+ """
List the subvolumes present in the filesystem.
path
@@ -966,45 +1060,49 @@ def subvolume_list(path, parent_id=False, absolute=False,
salt '*' btrfs.subvolume_list /var/volumes/tmp path=True
salt '*' btrfs.subvolume_list /var/volumes/tmp sort='[-rootid]'
- '''
+ """
if sort and type(sort) is not list:
- raise CommandExecutionError('Sort parameter must be a list')
+ raise CommandExecutionError("Sort parameter must be a list")
valid_sorts = [
- ''.join((order, attrib)) for order, attrib in itertools.product(
- ('-', '', '+'), ('rootid', 'gen', 'ogen', 'path'))
+ "".join((order, attrib))
+ for order, attrib in itertools.product(
+ ("-", "", "+"), ("rootid", "gen", "ogen", "path")
+ )
]
if sort and not all(s in valid_sorts for s in sort):
- raise CommandExecutionError('Value for sort not recognized')
+ raise CommandExecutionError("Value for sort not recognized")
- cmd = ['btrfs', 'subvolume', 'list']
+ cmd = ["btrfs", "subvolume", "list"]
- params = ((parent_id, '-p'),
- (absolute, '-a'),
- (ogeneration, '-c'),
- (generation, '-g'),
- (subvolumes, '-o'),
- (uuid, '-u'),
- (parent_uuid, '-q'),
- (sent_subvolume_uuid, '-R'),
- (snapshots, '-s'),
- (readonly, '-r'),
- (deleted, '-d'))
+ params = (
+ (parent_id, "-p"),
+ (absolute, "-a"),
+ (ogeneration, "-c"),
+ (generation, "-g"),
+ (subvolumes, "-o"),
+ (uuid, "-u"),
+ (parent_uuid, "-q"),
+ (sent_subvolume_uuid, "-R"),
+ (snapshots, "-s"),
+ (readonly, "-r"),
+ (deleted, "-d"),
+ )
cmd.extend(p[1] for p in params if p[0])
if generation_cmp:
- cmd.extend(['-G', generation_cmp])
+ cmd.extend(["-G", generation_cmp])
if ogeneration_cmp:
- cmd.extend(['-C', ogeneration_cmp])
+ cmd.extend(["-C", ogeneration_cmp])
# We already validated the content of the list
if sort:
- cmd.append('--sort={}'.format(','.join(sort)))
+ cmd.append("--sort={}".format(",".join(sort)))
cmd.append(path)
- res = __salt__['cmd.run_all'](cmd)
+ res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
# Parse the output. ID and gen are always at the begining, and
@@ -1015,13 +1113,23 @@ def subvolume_list(path, parent_id=False, absolute=False,
# will fail.
#
# This list is in order.
- columns = ('ID', 'gen', 'cgen', 'parent', 'top level', 'otime',
- 'parent_uuid', 'received_uuid', 'uuid', 'path')
+ columns = (
+ "ID",
+ "gen",
+ "cgen",
+ "parent",
+ "top level",
+ "otime",
+ "parent_uuid",
+ "received_uuid",
+ "uuid",
+ "path",
+ )
result = []
- for line in res['stdout'].splitlines():
+ for line in res["stdout"].splitlines():
table = {}
for key in columns:
- value, line = _pop(line, key, key == 'path')
+ value, line = _pop(line, key, key == "path")
if value:
table[key.lower()] = value
# If line is not empty here, we are not able to parse it
@@ -1032,7 +1140,7 @@ def subvolume_list(path, parent_id=False, absolute=False,
def subvolume_set_default(subvolid, path):
- '''
+ """
Set the subvolume as default
subvolid
@@ -1047,16 +1155,16 @@ def subvolume_set_default(subvolid, path):
salt '*' btrfs.subvolume_set_default 257 /var/volumes/tmp
- '''
- cmd = ['btrfs', 'subvolume', 'set-default', subvolid, path]
+ """
+ cmd = ["btrfs", "subvolume", "set-default", subvolid, path]
- res = __salt__['cmd.run_all'](cmd)
+ res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
return True
def subvolume_show(path):
- '''
+ """
Show information of a given subvolume
path
@@ -1068,28 +1176,28 @@ def subvolume_show(path):
salt '*' btrfs.subvolume_show /var/volumes/tmp
- '''
- cmd = ['btrfs', 'subvolume', 'show', path]
+ """
+ cmd = ["btrfs", "subvolume", "show", path]
- res = __salt__['cmd.run_all'](cmd)
+ res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
result = {}
table = {}
# The real name is the first line, later there is a table of
# values separated with colon.
- stdout = res['stdout'].splitlines()
+ stdout = res["stdout"].splitlines()
key = stdout.pop(0)
result[key.strip()] = table
for line in stdout:
- key, value = line.split(':', 1)
+ key, value = line.split(":", 1)
table[key.lower().strip()] = value.strip()
return result
def subvolume_snapshot(source, dest=None, name=None, read_only=False):
- '''
+ """
Create a snapshot of a source subvolume
source
@@ -1112,13 +1220,13 @@ def subvolume_snapshot(source, dest=None, name=None, read_only=False):
salt '*' btrfs.subvolume_snapshot /var/volumes/tmp dest=/.snapshots
salt '*' btrfs.subvolume_snapshot /var/volumes/tmp name=backup
- '''
+ """
if not dest and not name:
- raise CommandExecutionError('Provide parameter dest, name, or both')
+ raise CommandExecutionError("Provide parameter dest, name, or both")
- cmd = ['btrfs', 'subvolume', 'snapshot']
+ cmd = ["btrfs", "subvolume", "snapshot"]
if read_only:
- cmd.append('-r')
+ cmd.append("-r")
if dest and not name:
cmd.append(dest)
if dest and name:
@@ -1126,13 +1234,13 @@ def subvolume_snapshot(source, dest=None, name=None, read_only=False):
if name:
cmd.append(name)
- res = __salt__['cmd.run_all'](cmd)
+ res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
return True
def subvolume_sync(path, subvolids=None, sleep=None):
- '''
+ """
Wait until given subvolume are completely removed from the
filesystem after deletion.
@@ -1152,18 +1260,18 @@ def subvolume_sync(path, subvolids=None, sleep=None):
salt '*' btrfs.subvolume_sync /var/volumes/tmp
salt '*' btrfs.subvolume_sync /var/volumes/tmp subvolids='[257]'
- '''
+ """
if subvolids and type(subvolids) is not list:
- raise CommandExecutionError('Subvolids parameter must be a list')
+ raise CommandExecutionError("Subvolids parameter must be a list")
- cmd = ['btrfs', 'subvolume', 'sync']
+ cmd = ["btrfs", "subvolume", "sync"]
if sleep:
- cmd.extend(['-s', sleep])
+ cmd.extend(["-s", sleep])
cmd.append(path)
if subvolids:
cmd.extend(subvolids)
- res = __salt__['cmd.run_all'](cmd)
+ res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
return True
diff --git a/salt/modules/cabal.py b/salt/modules/cabal.py
index 4ca659e4878..cc754df8567 100644
--- a/salt/modules/cabal.py
+++ b/salt/modules/cabal.py
@@ -1,11 +1,11 @@
# -*- coding: utf-8 -*-
-'''
+"""
Manage and query Cabal packages
===============================
.. versionadded:: 2015.8.0
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
@@ -16,21 +16,20 @@ from salt.exceptions import CommandExecutionError
logger = logging.getLogger(__name__)
# Function alias to make sure not to shadow built-in's
-__func_alias__ = {
- 'list_': 'list'
-}
+__func_alias__ = {"list_": "list"}
def __virtual__():
- '''
+ """
Only work when cabal-install is installed.
- '''
- return (salt.utils.path.which('cabal') is not None) and \
- (salt.utils.path.which('ghc-pkg') is not None)
+ """
+ return (salt.utils.path.which("cabal") is not None) and (
+ salt.utils.path.which("ghc-pkg") is not None
+ )
def update(user=None, env=None):
- '''
+ """
Updates list of known packages.
user
@@ -47,16 +46,12 @@ def update(user=None, env=None):
salt '*' cabal.update
- '''
- return __salt__['cmd.run_all']('cabal update', runas=user, env=env)
+ """
+ return __salt__["cmd.run_all"]("cabal update", runas=user, env=env)
-def install(pkg=None,
- pkgs=None,
- user=None,
- install_global=False,
- env=None):
- '''
+def install(pkg=None, pkgs=None, user=None, install_global=False, env=None):
+ """
Install a cabal package.
pkg
@@ -83,32 +78,28 @@ def install(pkg=None,
salt '*' cabal.install shellcheck
salt '*' cabal.install shellcheck-0.3.5
- '''
+ """
- cmd = ['cabal install']
+ cmd = ["cabal install"]
if install_global:
- cmd.append('--global')
+ cmd.append("--global")
if pkg:
cmd.append('"{0}"'.format(pkg))
elif pkgs:
cmd.append('"{0}"'.format('" "'.join(pkgs)))
- result = __salt__['cmd.run_all'](' '.join(cmd), runas=user, env=env)
+ result = __salt__["cmd.run_all"](" ".join(cmd), runas=user, env=env)
- if result['retcode'] != 0:
- raise CommandExecutionError(result['stderr'])
+ if result["retcode"] != 0:
+ raise CommandExecutionError(result["stderr"])
return result
-def list_(
- pkg=None,
- user=None,
- installed=False,
- env=None):
- '''
+def list_(pkg=None, user=None, installed=False, env=None):
+ """
List packages matching a search string.
pkg
@@ -128,19 +119,19 @@ def list_(
salt '*' cabal.list
salt '*' cabal.list ShellCheck
- '''
- cmd = ['cabal list --simple-output']
+ """
+ cmd = ["cabal list --simple-output"]
if installed:
- cmd.append('--installed')
+ cmd.append("--installed")
if pkg:
cmd.append('"{0}"'.format(pkg))
- result = __salt__['cmd.run_all'](' '.join(cmd), runas=user, env=env)
+ result = __salt__["cmd.run_all"](" ".join(cmd), runas=user, env=env)
packages = {}
- for line in result['stdout'].splitlines():
+ for line in result["stdout"].splitlines():
data = line.split()
package_name = data[0]
package_version = data[1]
@@ -149,10 +140,8 @@ def list_(
return packages
-def uninstall(pkg,
- user=None,
- env=None):
- '''
+def uninstall(pkg, user=None, env=None):
+ """
Uninstall a cabal package.
pkg
@@ -170,13 +159,13 @@ def uninstall(pkg,
salt '*' cabal.uninstall ShellCheck
- '''
- cmd = ['ghc-pkg unregister']
+ """
+ cmd = ["ghc-pkg unregister"]
cmd.append('"{0}"'.format(pkg))
- result = __salt__['cmd.run_all'](' '.join(cmd), runas=user, env=env)
+ result = __salt__["cmd.run_all"](" ".join(cmd), runas=user, env=env)
- if result['retcode'] != 0:
- raise CommandExecutionError(result['stderr'])
+ if result["retcode"] != 0:
+ raise CommandExecutionError(result["stderr"])
return result
diff --git a/salt/modules/capirca_acl.py b/salt/modules/capirca_acl.py
index 277ea7b141d..f5fc60db327 100644
--- a/salt/modules/capirca_acl.py
+++ b/salt/modules/capirca_acl.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Capirca ACL
===========
@@ -20,36 +20,40 @@ The firewall configuration is generated by Capirca_.
.. _Capirca: https://github.com/google/capirca
To install Capirca, execute: ``pip install capirca``.
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
-import re
+import datetime
import inspect
import logging
-import datetime
+import re
-log = logging.getLogger(__file__)
+# Import Salt libs
+import salt.utils.files
# Import third party libs
from salt.ext import six
+
+log = logging.getLogger(__file__)
+
+
try:
import capirca
import capirca.aclgen
import capirca.lib.policy
import capirca.lib.aclgenerator
+
HAS_CAPIRCA = True
except ImportError:
HAS_CAPIRCA = False
-# Import Salt libs
-import salt.utils.files
# ------------------------------------------------------------------------------
# module properties
# ------------------------------------------------------------------------------
-__virtualname__ = 'capirca'
-__proxyenabled__ = ['*']
+__virtualname__ = "capirca"
+__proxyenabled__ = ["*"]
# allow any proxy type
# ------------------------------------------------------------------------------
@@ -58,13 +62,13 @@ __proxyenabled__ = ['*']
def __virtual__():
- '''
+ """
This module requires at least Capirca to work.
- '''
+ """
if HAS_CAPIRCA:
return __virtualname__
else:
- return (False, 'The capirca module (capirca_acl) cannot be loaded.')
+ return (False, "The capirca module (capirca_acl) cannot be loaded.")
# ------------------------------------------------------------------------------
@@ -78,67 +82,67 @@ def __virtual__():
# we can revisit this later if necessary.
_TERM_FIELDS = {
- 'action': [],
- 'address': [],
- 'address_exclude': [],
- 'comment': [],
- 'counter': None,
- 'expiration': None,
- 'destination_address': [],
- 'destination_address_exclude': [],
- 'destination_port': [],
- 'destination_prefix': [],
- 'forwarding_class': [],
- 'forwarding_class_except': [],
- 'logging': [],
- 'log_name': None,
- 'loss_priority': None,
- 'option': [],
- 'owner': None,
- 'policer': None,
- 'port': [],
- 'precedence': [],
- 'principals': [],
- 'protocol': [],
- 'protocol_except': [],
- 'qos': None,
- 'pan_application': [],
- 'routing_instance': None,
- 'source_address': [],
- 'source_address_exclude': [],
- 'source_port': [],
- 'source_prefix': [],
- 'verbatim': [],
- 'packet_length': None,
- 'fragment_offset': None,
- 'hop_limit': None,
- 'icmp_type': [],
- 'icmp_code': None,
- 'ether_type': [],
- 'traffic_class_count': None,
- 'traffic_type': [],
- 'translated': False,
- 'dscp_set': None,
- 'dscp_match': [],
- 'dscp_except': [],
- 'next_ip': None,
- 'flexible_match_range': [],
- 'source_prefix_except': [],
- 'destination_prefix_except': [],
- 'vpn': None,
- 'source_tag': [],
- 'destination_tag': [],
- 'source_interface': None,
- 'destination_interface': None,
- 'platform': [],
- 'platform_exclude': [],
- 'timeout': None,
- 'flattened': False,
- 'flattened_addr': None,
- 'flattened_saddr': None,
- 'flattened_daddr': None,
- 'priority': None,
- 'ttl': None
+ "action": [],
+ "address": [],
+ "address_exclude": [],
+ "comment": [],
+ "counter": None,
+ "expiration": None,
+ "destination_address": [],
+ "destination_address_exclude": [],
+ "destination_port": [],
+ "destination_prefix": [],
+ "forwarding_class": [],
+ "forwarding_class_except": [],
+ "logging": [],
+ "log_name": None,
+ "loss_priority": None,
+ "option": [],
+ "owner": None,
+ "policer": None,
+ "port": [],
+ "precedence": [],
+ "principals": [],
+ "protocol": [],
+ "protocol_except": [],
+ "qos": None,
+ "pan_application": [],
+ "routing_instance": None,
+ "source_address": [],
+ "source_address_exclude": [],
+ "source_port": [],
+ "source_prefix": [],
+ "verbatim": [],
+ "packet_length": None,
+ "fragment_offset": None,
+ "hop_limit": None,
+ "icmp_type": [],
+ "icmp_code": None,
+ "ether_type": [],
+ "traffic_class_count": None,
+ "traffic_type": [],
+ "translated": False,
+ "dscp_set": None,
+ "dscp_match": [],
+ "dscp_except": [],
+ "next_ip": None,
+ "flexible_match_range": [],
+ "source_prefix_except": [],
+ "destination_prefix_except": [],
+ "vpn": None,
+ "source_tag": [],
+ "destination_tag": [],
+ "source_interface": None,
+ "destination_interface": None,
+ "platform": [],
+ "platform_exclude": [],
+ "timeout": None,
+ "flattened": False,
+ "flattened_addr": None,
+ "flattened_saddr": None,
+ "flattened_daddr": None,
+ "priority": None,
+ "ttl": None,
}
# IP-type fields
@@ -146,15 +150,15 @@ _TERM_FIELDS = {
# but they need to be converted to `nacaddr.IP`
# this pre-processing is done in `_clean_term_opts`
_IP_FILEDS = [
- 'source_address',
- 'source_address_exclude',
- 'destination_address',
- 'address',
- 'address_exclude',
- 'flattened_addr',
- 'flattened_saddr',
- 'flattened_daddr',
- 'next_ip'
+ "source_address",
+ "source_address_exclude",
+ "destination_address",
+ "address",
+ "address_exclude",
+ "flattened_addr",
+ "flattened_saddr",
+ "flattened_daddr",
+ "next_ip",
]
_SERVICES = {}
@@ -170,51 +174,61 @@ if HAS_CAPIRCA:
def _add_object(self, obj):
return
- setattr(_TempTerm, 'AddObject', _add_object)
+ setattr(_TempTerm, "AddObject", _add_object)
dumy_term = _TempTerm(None)
for item in dir(dumy_term):
- if hasattr(item, '__func__') or item.startswith('_') or item != item.lower():
+ if hasattr(item, "__func__") or item.startswith("_") or item != item.lower():
continue
_TERM_FIELDS[item] = getattr(dumy_term, item)
class _Policy(capirca.lib.policy.Policy):
- '''
+ """
Extending the Capirca Policy class to allow inserting custom filters.
- '''
+ """
+
def __init__(self):
self.filters = []
- self.filename = ''
+ self.filename = ""
class _Term(capirca.lib.policy.Term):
- '''
+ """
Extending the Capirca Term class to allow setting field valued on the fly.
- '''
+ """
+
def __init__(self):
for field, default in six.iteritems(_TERM_FIELDS):
setattr(self, field, default)
def _import_platform_generator(platform):
- '''
+ """
Given a specific platform (under the Capirca conventions),
return the generator class.
The generator class is identified looking under the module
for a class inheriting the `ACLGenerator` class.
- '''
- log.debug('Using platform: {plat}'.format(plat=platform))
+ """
+ log.debug("Using platform: {plat}".format(plat=platform))
for mod_name, mod_obj in inspect.getmembers(capirca.aclgen):
if mod_name == platform and inspect.ismodule(mod_obj):
- for plat_obj_name, plat_obj in inspect.getmembers(mod_obj): # pylint: disable=unused-variable
- if inspect.isclass(plat_obj) and issubclass(plat_obj, capirca.lib.aclgenerator.ACLGenerator):
- log.debug('Identified Capirca class {cls} for {plat}'.format(
- cls=plat_obj,
- plat=platform))
+ for plat_obj_name, plat_obj in inspect.getmembers(
+ mod_obj
+ ): # pylint: disable=unused-variable
+ if inspect.isclass(plat_obj) and issubclass(
+ plat_obj, capirca.lib.aclgenerator.ACLGenerator
+ ):
+ log.debug(
+ "Identified Capirca class {cls} for {plat}".format(
+ cls=plat_obj, plat=platform
+ )
+ )
return plat_obj
- log.error('Unable to identify any Capirca plaform class for {plat}'.format(plat=platform))
+ log.error(
+ "Unable to identify any Capirca plaform class for {plat}".format(plat=platform)
+ )
def _get_services_mapping():
- '''
+ """
Build a map of services based on the IANA assignment list:
http://www.iana.org/assignments/port-numbers
@@ -227,54 +241,55 @@ def _get_services_mapping():
In the worst case, the user will not be able to specify the
services shortcut and they will need to specify the protocol / port combination
using the source_port / destination_port & protocol fields.
- '''
+ """
if _SERVICES:
return _SERVICES
- services_txt = ''
+ services_txt = ""
try:
- with salt.utils.files.fopen('/etc/services', 'r') as srv_f:
+ with salt.utils.files.fopen("/etc/services", "r") as srv_f:
services_txt = salt.utils.stringutils.to_unicode(srv_f.read())
except IOError as ioe:
- log.error('Unable to read from /etc/services:')
+ log.error("Unable to read from /etc/services:")
log.error(ioe)
return _SERVICES # no mapping possible, sorry
# will return the default mapping
- service_rgx = re.compile(r'^([a-zA-Z0-9-]+)\s+(\d+)\/(tcp|udp)(.*)$')
+ service_rgx = re.compile(r"^([a-zA-Z0-9-]+)\s+(\d+)\/(tcp|udp)(.*)$")
for line in services_txt.splitlines():
service_rgx_s = service_rgx.search(line)
if service_rgx_s and len(service_rgx_s.groups()) == 4:
srv_name, port, protocol, _ = service_rgx_s.groups()
if srv_name not in _SERVICES:
- _SERVICES[srv_name] = {
- 'port': [],
- 'protocol': []
- }
+ _SERVICES[srv_name] = {"port": [], "protocol": []}
try:
- _SERVICES[srv_name]['port'].append(int(port))
+ _SERVICES[srv_name]["port"].append(int(port))
except ValueError as verr:
log.error(verr)
- log.error('Did not read that properly:')
+ log.error("Did not read that properly:")
log.error(line)
- log.error('Please report the above error: {port} does not seem a valid port value!'.format(port=port))
- _SERVICES[srv_name]['protocol'].append(protocol)
+ log.error(
+ "Please report the above error: {port} does not seem a valid port value!".format(
+ port=port
+ )
+ )
+ _SERVICES[srv_name]["protocol"].append(protocol)
return _SERVICES
def _translate_port(port):
- '''
+ """
Look into services and return the port value using the
service name as lookup value.
- '''
+ """
services = _get_services_mapping()
- if port in services and services[port]['port']:
- return services[port]['port'][0]
+ if port in services and services[port]["port"]:
+ return services[port]["port"][0]
return port
def _make_it_list(dict_, field_name, value):
- '''
+ """
Return the object list.
- '''
+ """
prev_value = []
# firsly we'll collect the prev value
if field_name in dict_:
@@ -283,7 +298,7 @@ def _make_it_list(dict_, field_name, value):
return prev_value
elif isinstance(value, (tuple, list)):
# other type of iterables
- if field_name in ('source_port', 'destination_port'):
+ if field_name in ("source_port", "destination_port"):
# port fields are more special
# they can either be a list of integers, either a list of tuples
# list of integers = a list of ports
@@ -309,12 +324,10 @@ def _make_it_list(dict_, field_name, value):
port_start = _translate_port(port_start)
if not isinstance(port_end, int):
port_end = _translate_port(port_end)
- translated_portval.append(
- (port_start, port_end)
- )
+ translated_portval.append((port_start, port_end))
return list(set(prev_value + translated_portval))
return list(set(prev_value + list(value)))
- if field_name in ('source_port', 'destination_port'):
+ if field_name in ("source_port", "destination_port"):
if not isinstance(value, int):
value = _translate_port(value)
return list(set(prev_value + [(value, value)])) # a list of tuples
@@ -323,56 +336,62 @@ def _make_it_list(dict_, field_name, value):
def _clean_term_opts(term_opts):
- '''
+ """
Cleanup the term opts:
- strip Null and empty valuee, defaulting their value to their base definition from _TERM_FIELDS
- convert to `nacaddr.IP` fields from `_IP_FILEDS`
- create lists for those fields requiring it
- '''
+ """
clean_opts = {}
_services = _get_services_mapping()
for field, value in six.iteritems(term_opts):
# firstly we'll process special fields like source_service or destination_services
# which will inject values directly in the source or destination port and protocol
- if field == 'source_service' and value:
+ if field == "source_service" and value:
if isinstance(value, six.string_types):
value = _make_it_list(clean_opts, field, value)
- log.debug('Processing special source services:')
+ log.debug("Processing special source services:")
log.debug(value)
for service in value:
if service and service in _services:
# if valid source_service
# take the port and protocol values from the global and inject in the term config
- clean_opts['source_port'] = _make_it_list(clean_opts,
- 'source_port',
- _services[service]['port'])
- clean_opts['protocol'] = _make_it_list(clean_opts,
- 'protocol',
- _services[service]['protocol'])
- log.debug('Built source_port field, after processing special source services:')
- log.debug(clean_opts.get('source_port'))
- log.debug('Built protocol field, after processing special source services:')
- log.debug(clean_opts.get('protocol'))
- elif field == 'destination_service' and value:
+ clean_opts["source_port"] = _make_it_list(
+ clean_opts, "source_port", _services[service]["port"]
+ )
+ clean_opts["protocol"] = _make_it_list(
+ clean_opts, "protocol", _services[service]["protocol"]
+ )
+ log.debug(
+ "Built source_port field, after processing special source services:"
+ )
+ log.debug(clean_opts.get("source_port"))
+ log.debug("Built protocol field, after processing special source services:")
+ log.debug(clean_opts.get("protocol"))
+ elif field == "destination_service" and value:
if isinstance(value, six.string_types):
value = _make_it_list(clean_opts, field, value)
- log.debug('Processing special destination services:')
+ log.debug("Processing special destination services:")
log.debug(value)
for service in value:
if service and service in _services:
# if valid destination_service
# take the port and protocol values from the global and inject in the term config
- clean_opts['destination_port'] = _make_it_list(clean_opts,
- 'destination_port',
- _services[service]['port'])
- clean_opts['protocol'] = _make_it_list(clean_opts,
- 'protocol',
- _services[service]['protocol'])
- log.debug('Built source_port field, after processing special destination services:')
- log.debug(clean_opts.get('destination_service'))
- log.debug('Built protocol field, after processing special destination services:')
- log.debug(clean_opts.get('protocol'))
+ clean_opts["destination_port"] = _make_it_list(
+ clean_opts, "destination_port", _services[service]["port"]
+ )
+ clean_opts["protocol"] = _make_it_list(
+ clean_opts, "protocol", _services[service]["protocol"]
+ )
+ log.debug(
+ "Built source_port field, after processing special destination services:"
+ )
+ log.debug(clean_opts.get("destination_service"))
+ log.debug(
+ "Built protocol field, after processing special destination services:"
+ )
+ log.debug(clean_opts.get("protocol"))
# not a special field, but it has to be a valid one
elif field in _TERM_FIELDS and value and value != _TERM_FIELDS[field]:
# if not a special field type
@@ -393,9 +412,9 @@ def _clean_term_opts(term_opts):
def _lookup_element(lst, key):
- '''
+ """
Find an dictionary in a list of dictionaries, given its main key.
- '''
+ """
if not lst:
return {}
for ele in lst:
@@ -406,22 +425,20 @@ def _lookup_element(lst, key):
return {}
-def _get_pillar_cfg(pillar_key,
- pillarenv=None,
- saltenv=None):
- '''
+def _get_pillar_cfg(pillar_key, pillarenv=None, saltenv=None):
+ """
Retrieve the pillar data from the right environment.
- '''
- pillar_cfg = __salt__['pillar.get'](pillar_key,
- pillarenv=pillarenv,
- saltenv=saltenv)
+ """
+ pillar_cfg = __salt__["pillar.get"](
+ pillar_key, pillarenv=pillarenv, saltenv=saltenv
+ )
return pillar_cfg
def _cleanup(lst):
- '''
+ """
Return a list of non-empty dictionaries.
- '''
+ """
clean = []
for ele in lst:
if ele and isinstance(ele, dict):
@@ -430,14 +447,14 @@ def _cleanup(lst):
def _merge_list_of_dict(first, second, prepend=True):
- '''
+ """
Merge lists of dictionaries.
Each element of the list is a dictionary having one single key.
That key is then used as unique lookup.
The first element list has higher priority than the second.
When there's an overlap between the two lists,
it won't change the position, but the content.
- '''
+ """
first = _cleanup(first)
second = _cleanup(second)
if not first and not second:
@@ -472,59 +489,66 @@ def _merge_list_of_dict(first, second, prepend=True):
return merged
-def _get_term_object(filter_name,
- term_name,
- pillar_key='acl',
- pillarenv=None,
- saltenv=None,
- merge_pillar=True,
- **term_fields):
- '''
+def _get_term_object(
+ filter_name,
+ term_name,
+ pillar_key="acl",
+ pillarenv=None,
+ saltenv=None,
+ merge_pillar=True,
+ **term_fields
+):
+ """
Return an instance of the ``_Term`` class given the term options.
- '''
- log.debug('Generating config for term {tname} under filter {fname}'.format(
- tname=term_name,
- fname=filter_name
- ))
+ """
+ log.debug(
+ "Generating config for term {tname} under filter {fname}".format(
+ tname=term_name, fname=filter_name
+ )
+ )
term = _Term()
term.name = term_name
term_opts = {}
if merge_pillar:
- term_opts = get_term_pillar(filter_name,
- term_name,
- pillar_key=pillar_key,
- saltenv=saltenv,
- pillarenv=pillarenv)
- log.debug('Merging with pillar data:')
+ term_opts = get_term_pillar(
+ filter_name,
+ term_name,
+ pillar_key=pillar_key,
+ saltenv=saltenv,
+ pillarenv=pillarenv,
+ )
+ log.debug("Merging with pillar data:")
log.debug(term_opts)
term_opts = _clean_term_opts(term_opts)
- log.debug('Cleaning up pillar data:')
+ log.debug("Cleaning up pillar data:")
log.debug(term_opts)
- log.debug('Received processing opts:')
+ log.debug("Received processing opts:")
log.debug(term_fields)
- log.debug('Cleaning up processing opts:')
+ log.debug("Cleaning up processing opts:")
term_fields = _clean_term_opts(term_fields)
log.debug(term_fields)
- log.debug('Final term opts:')
+ log.debug("Final term opts:")
term_opts.update(term_fields)
log.debug(term_fields)
for field, value in six.iteritems(term_opts):
# setting the field attributes to the term instance of _Term
setattr(term, field, value)
- log.debug('Term config:')
+ log.debug("Term config:")
log.debug(six.text_type(term))
return term
-def _get_policy_object(platform,
- filters=None,
- pillar_key='acl',
- pillarenv=None,
- saltenv=None,
- merge_pillar=True):
- '''
+def _get_policy_object(
+ platform,
+ filters=None,
+ pillar_key="acl",
+ pillarenv=None,
+ saltenv=None,
+ merge_pillar=True,
+):
+ """
Return an instance of the ``_Policy`` class given the filters config.
- '''
+ """
policy = _Policy()
policy_filters = []
if not filters:
@@ -535,11 +559,8 @@ def _get_policy_object(platform,
filter_name = filter_.keys()[0]
filter_config = filter_.values()[0]
header = capirca.lib.policy.Header() # same header everywhere
- target_opts = [
- platform,
- filter_name
- ]
- filter_options = filter_config.pop('options', None)
+ target_opts = [platform, filter_name]
+ filter_options = filter_config.pop("options", None)
if filter_options:
filter_options = _make_it_list({}, filter_name, filter_options)
# make sure the filter options are sent as list
@@ -547,38 +568,39 @@ def _get_policy_object(platform,
target = capirca.lib.policy.Target(target_opts)
header.AddObject(target)
filter_terms = []
- for term_ in filter_config.get('terms', []):
+ for term_ in filter_config.get("terms", []):
if term_ and isinstance(term_, dict):
term_name = term_.keys()[0]
term_fields = term_.values()[0]
- term = _get_term_object(filter_name,
- term_name,
- pillar_key=pillar_key,
- pillarenv=pillarenv,
- saltenv=saltenv,
- merge_pillar=merge_pillar,
- **term_fields)
+ term = _get_term_object(
+ filter_name,
+ term_name,
+ pillar_key=pillar_key,
+ pillarenv=pillarenv,
+ saltenv=saltenv,
+ merge_pillar=merge_pillar,
+ **term_fields
+ )
filter_terms.append(term)
- policy_filters.append(
- (header, filter_terms)
- )
+ policy_filters.append((header, filter_terms))
policy.filters = policy_filters
- log.debug('Policy config:')
+ log.debug("Policy config:")
log.debug(six.text_type(policy))
platform_generator = _import_platform_generator(platform)
policy_config = platform_generator(policy, 2)
- log.debug('Generating policy config for {platform}:'.format(
- platform=platform))
+ log.debug("Generating policy config for {platform}:".format(platform=platform))
log.debug(six.text_type(policy_config))
return policy_config
-def _revision_tag(text,
- revision_id=None,
- revision_no=None,
- revision_date=True,
- revision_date_format='%Y/%m/%d'):
- '''
+def _revision_tag(
+ text,
+ revision_id=None,
+ revision_no=None,
+ revision_date=True,
+ revision_date_format="%Y/%m/%d",
+):
+ """
Refactor revision tag comments.
Capirca generates the filter text having the following tag keys:
@@ -589,46 +611,51 @@ def _revision_tag(text,
This function goes through all the config lines and replaces
those tags with the content requested by the user.
If a certain value is not provided, the corresponding tag will be stripped.
- '''
+ """
timestamp = datetime.datetime.now().strftime(revision_date_format)
new_text = []
for line in text.splitlines():
- if '$Id:$' in line:
+ if "$Id:$" in line:
if not revision_id: # if no explicit revision ID required
continue # jump to next line, ignore this one
- line = line.replace('$Id:$', '$Id: {rev_id} $'.format(rev_id=revision_id))
- if '$Revision:$' in line:
+ line = line.replace("$Id:$", "$Id: {rev_id} $".format(rev_id=revision_id))
+ if "$Revision:$" in line:
if not revision_no: # if no explicit revision number required
continue # jump to next line, ignore this one
- line = line.replace('$Revision:$', '$Revision: {rev_no} $'.format(rev_no=revision_no))
- if '$Date:$' in line:
+ line = line.replace(
+ "$Revision:$", "$Revision: {rev_no} $".format(rev_no=revision_no)
+ )
+ if "$Date:$" in line:
if not revision_date:
continue # jump
- line = line.replace('$Date:$', '$Date: {ts} $'.format(ts=timestamp))
+ line = line.replace("$Date:$", "$Date: {ts} $".format(ts=timestamp))
new_text.append(line)
- return '\n'.join(new_text)
+ return "\n".join(new_text)
+
# ------------------------------------------------------------------------------
# callable functions
# ------------------------------------------------------------------------------
-def get_term_config(platform,
- filter_name,
- term_name,
- filter_options=None,
- pillar_key='acl',
- pillarenv=None,
- saltenv=None,
- merge_pillar=True,
- revision_id=None,
- revision_no=None,
- revision_date=True,
- revision_date_format='%Y/%m/%d',
- source_service=None,
- destination_service=None,
- **term_fields):
- '''
+def get_term_config(
+ platform,
+ filter_name,
+ term_name,
+ filter_options=None,
+ pillar_key="acl",
+ pillarenv=None,
+ saltenv=None,
+ merge_pillar=True,
+ revision_id=None,
+ revision_no=None,
+ revision_date=True,
+ revision_date_format="%Y/%m/%d",
+ source_service=None,
+ destination_service=None,
+ **term_fields
+):
+ """
Return the configuration of a single policy term.
platform
@@ -862,50 +889,55 @@ def get_term_config(platform,
remark term-name
permit ip host 1.2.3.4 host 5.6.7.8
exit
- '''
+ """
terms = []
- term = {
- term_name: {
- }
- }
+ term = {term_name: {}}
term[term_name].update(term_fields)
- term[term_name].update({
- 'source_service': _make_it_list({}, 'source_service', source_service),
- 'destination_service': _make_it_list({}, 'destination_service', destination_service),
- })
+ term[term_name].update(
+ {
+ "source_service": _make_it_list({}, "source_service", source_service),
+ "destination_service": _make_it_list(
+ {}, "destination_service", destination_service
+ ),
+ }
+ )
terms.append(term)
if not filter_options:
filter_options = []
- return get_filter_config(platform,
- filter_name,
- filter_options=filter_options,
- terms=terms,
- pillar_key=pillar_key,
- pillarenv=pillarenv,
- saltenv=saltenv,
- merge_pillar=merge_pillar,
- only_lower_merge=True,
- revision_id=revision_id,
- revision_no=revision_no,
- revision_date=revision_date,
- revision_date_format=revision_date_format)
+ return get_filter_config(
+ platform,
+ filter_name,
+ filter_options=filter_options,
+ terms=terms,
+ pillar_key=pillar_key,
+ pillarenv=pillarenv,
+ saltenv=saltenv,
+ merge_pillar=merge_pillar,
+ only_lower_merge=True,
+ revision_id=revision_id,
+ revision_no=revision_no,
+ revision_date=revision_date,
+ revision_date_format=revision_date_format,
+ )
-def get_filter_config(platform,
- filter_name,
- filter_options=None,
- terms=None,
- prepend=True,
- pillar_key='acl',
- pillarenv=None,
- saltenv=None,
- merge_pillar=True,
- only_lower_merge=False,
- revision_id=None,
- revision_no=None,
- revision_date=True,
- revision_date_format='%Y/%m/%d'):
- '''
+def get_filter_config(
+ platform,
+ filter_name,
+ filter_options=None,
+ terms=None,
+ prepend=True,
+ pillar_key="acl",
+ pillarenv=None,
+ saltenv=None,
+ merge_pillar=True,
+ only_lower_merge=False,
+ revision_id=None,
+ revision_no=None,
+ revision_date=True,
+ revision_date_format="%Y/%m/%d",
+):
+ """
Return the configuration of a policy filter.
platform
@@ -999,57 +1031,65 @@ def get_filter_config(platform,
- [5678, 5680]
protocol: tcp
action: accept
- '''
+ """
if not filter_options:
filter_options = []
if not terms:
terms = []
if merge_pillar and not only_lower_merge:
- acl_pillar_cfg = _get_pillar_cfg(pillar_key,
- saltenv=saltenv,
- pillarenv=pillarenv)
+ acl_pillar_cfg = _get_pillar_cfg(
+ pillar_key, saltenv=saltenv, pillarenv=pillarenv
+ )
filter_pillar_cfg = _lookup_element(acl_pillar_cfg, filter_name)
- filter_options = filter_options or filter_pillar_cfg.pop('options', None)
+ filter_options = filter_options or filter_pillar_cfg.pop("options", None)
if filter_pillar_cfg:
# Only when it was able to find the filter in the ACL config
- pillar_terms = filter_pillar_cfg.get('terms', []) # No problem if empty in the pillar
+ pillar_terms = filter_pillar_cfg.get(
+ "terms", []
+ ) # No problem if empty in the pillar
terms = _merge_list_of_dict(terms, pillar_terms, prepend=prepend)
# merge the passed variable with the pillar data
# any filter term not defined here, will be appended from the pillar
# new terms won't be removed
filters = []
- filters.append({
- filter_name: {
- 'options': _make_it_list({}, filter_name, filter_options),
- 'terms': terms
+ filters.append(
+ {
+ filter_name: {
+ "options": _make_it_list({}, filter_name, filter_options),
+ "terms": terms,
+ }
}
- })
- return get_policy_config(platform,
- filters=filters,
- pillar_key=pillar_key,
- pillarenv=pillarenv,
- saltenv=saltenv,
- merge_pillar=merge_pillar,
- only_lower_merge=True,
- revision_id=revision_id,
- revision_no=revision_no,
- revision_date=revision_date,
- revision_date_format=revision_date_format)
+ )
+ return get_policy_config(
+ platform,
+ filters=filters,
+ pillar_key=pillar_key,
+ pillarenv=pillarenv,
+ saltenv=saltenv,
+ merge_pillar=merge_pillar,
+ only_lower_merge=True,
+ revision_id=revision_id,
+ revision_no=revision_no,
+ revision_date=revision_date,
+ revision_date_format=revision_date_format,
+ )
-def get_policy_config(platform,
- filters=None,
- prepend=True,
- pillar_key='acl',
- pillarenv=None,
- saltenv=None,
- merge_pillar=True,
- only_lower_merge=False,
- revision_id=None,
- revision_no=None,
- revision_date=True,
- revision_date_format='%Y/%m/%d'):
- '''
+def get_policy_config(
+ platform,
+ filters=None,
+ prepend=True,
+ pillar_key="acl",
+ pillarenv=None,
+ saltenv=None,
+ merge_pillar=True,
+ only_lower_merge=False,
+ revision_id=None,
+ revision_no=None,
+ revision_date=True,
+ revision_date_format="%Y/%m/%d",
+):
+ """
Return the configuration of the whole policy.
platform
@@ -1181,38 +1221,39 @@ def get_policy_config(platform,
- tcp
- udp
action: reject
- '''
+ """
if not filters:
filters = []
if merge_pillar and not only_lower_merge:
# the pillar key for the policy config is the `pillar_key` itself
- policy_pillar_cfg = _get_pillar_cfg(pillar_key,
- saltenv=saltenv,
- pillarenv=pillarenv)
+ policy_pillar_cfg = _get_pillar_cfg(
+ pillar_key, saltenv=saltenv, pillarenv=pillarenv
+ )
# now, let's merge everything witht the pillar data
# again, this will not remove any extra filters/terms
# but it will merge with the pillar data
# if this behaviour is not wanted, the user can set `merge_pillar` as `False`
filters = _merge_list_of_dict(filters, policy_pillar_cfg, prepend=prepend)
- policy_object = _get_policy_object(platform,
- filters=filters,
- pillar_key=pillar_key,
- pillarenv=pillarenv,
- saltenv=saltenv,
- merge_pillar=merge_pillar)
+ policy_object = _get_policy_object(
+ platform,
+ filters=filters,
+ pillar_key=pillar_key,
+ pillarenv=pillarenv,
+ saltenv=saltenv,
+ merge_pillar=merge_pillar,
+ )
policy_text = six.text_type(policy_object)
- return _revision_tag(policy_text,
- revision_id=revision_id,
- revision_no=revision_no,
- revision_date=revision_date,
- revision_date_format=revision_date_format)
+ return _revision_tag(
+ policy_text,
+ revision_id=revision_id,
+ revision_no=revision_no,
+ revision_date=revision_date,
+ revision_date_format=revision_date_format,
+ )
-def get_filter_pillar(filter_name,
- pillar_key='acl',
- pillarenv=None,
- saltenv=None):
- '''
+def get_filter_pillar(filter_name, pillar_key="acl", pillarenv=None, saltenv=None):
+ """
Helper that can be used inside a state SLS,
in order to get the filter configuration given its name.
@@ -1229,19 +1270,15 @@ def get_filter_pillar(filter_name,
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
- '''
- pillar_cfg = _get_pillar_cfg(pillar_key,
- pillarenv=pillarenv,
- saltenv=saltenv)
+ """
+ pillar_cfg = _get_pillar_cfg(pillar_key, pillarenv=pillarenv, saltenv=saltenv)
return _lookup_element(pillar_cfg, filter_name)
-def get_term_pillar(filter_name,
- term_name,
- pillar_key='acl',
- pillarenv=None,
- saltenv=None):
- '''
+def get_term_pillar(
+ filter_name, term_name, pillar_key="acl", pillarenv=None, saltenv=None
+):
+ """
Helper that can be used inside a state SLS,
in order to get the term configuration given its name,
under a certain filter uniquely identified by its name.
@@ -1262,11 +1299,10 @@ def get_term_pillar(filter_name,
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
- '''
- filter_pillar_cfg = get_filter_pillar(filter_name,
- pillar_key=pillar_key,
- pillarenv=pillarenv,
- saltenv=saltenv)
- term_pillar_cfg = filter_pillar_cfg.get('terms', [])
+ """
+ filter_pillar_cfg = get_filter_pillar(
+ filter_name, pillar_key=pillar_key, pillarenv=pillarenv, saltenv=saltenv
+ )
+ term_pillar_cfg = filter_pillar_cfg.get("terms", [])
term_opts = _lookup_element(term_pillar_cfg, term_name)
return term_opts
diff --git a/salt/modules/cassandra.py b/salt/modules/cassandra.py
index d1b5be18c2b..d03f92e4061 100644
--- a/salt/modules/cassandra.py
+++ b/salt/modules/cassandra.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Cassandra NoSQL Database Module
:depends: - pycassa Cassandra Python adapter
@@ -10,58 +10,67 @@ Cassandra NoSQL Database Module
cassandra.nodetool: /usr/local/bin/nodetool
cassandra.host: localhost
cassandra.thrift_port: 9160
-'''
-from __future__ import absolute_import, unicode_literals, print_function
+"""
+from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
-log = logging.getLogger(__name__)
# Import salt libs
import salt.utils.path
from salt.ext import six
+log = logging.getLogger(__name__)
+
+
HAS_PYCASSA = False
try:
from pycassa.system_manager import SystemManager
+
HAS_PYCASSA = True
except ImportError:
pass
def __virtual__():
- '''
+ """
Only load if pycassa is available and the system is configured
- '''
+ """
if not HAS_PYCASSA:
- return (False, 'The cassandra execution module cannot be loaded: pycassa not installed.')
+ return (
+ False,
+ "The cassandra execution module cannot be loaded: pycassa not installed.",
+ )
- if HAS_PYCASSA and salt.utils.path.which('nodetool'):
- return 'cassandra'
- return (False, 'The cassandra execution module cannot be loaded: nodetool not found.')
+ if HAS_PYCASSA and salt.utils.path.which("nodetool"):
+ return "cassandra"
+ return (
+ False,
+ "The cassandra execution module cannot be loaded: nodetool not found.",
+ )
def _nodetool(cmd):
- '''
+ """
Internal cassandra nodetool wrapper. Some functions are not
available via pycassa so we must rely on nodetool.
- '''
- nodetool = __salt__['config.option']('cassandra.nodetool')
- host = __salt__['config.option']('cassandra.host')
- return __salt__['cmd.run_stdout']('{0} -h {1} {2}'.format(nodetool, host, cmd))
+ """
+ nodetool = __salt__["config.option"]("cassandra.nodetool")
+ host = __salt__["config.option"]("cassandra.host")
+ return __salt__["cmd.run_stdout"]("{0} -h {1} {2}".format(nodetool, host, cmd))
def _sys_mgr():
- '''
+ """
Return a pycassa system manager connection object
- '''
- thrift_port = six.text_type(__salt__['config.option']('cassandra.THRIFT_PORT'))
- host = __salt__['config.option']('cassandra.host')
- return SystemManager('{0}:{1}'.format(host, thrift_port))
+ """
+ thrift_port = six.text_type(__salt__["config.option"]("cassandra.THRIFT_PORT"))
+ host = __salt__["config.option"]("cassandra.host")
+ return SystemManager("{0}:{1}".format(host, thrift_port))
def compactionstats():
- '''
+ """
Return compactionstats info
CLI Example:
@@ -69,12 +78,12 @@ def compactionstats():
.. code-block:: bash
salt '*' cassandra.compactionstats
- '''
- return _nodetool('compactionstats')
+ """
+ return _nodetool("compactionstats")
def version():
- '''
+ """
Return the cassandra version
CLI Example:
@@ -82,12 +91,12 @@ def version():
.. code-block:: bash
salt '*' cassandra.version
- '''
- return _nodetool('version')
+ """
+ return _nodetool("version")
def netstats():
- '''
+ """
Return netstats info
CLI Example:
@@ -95,12 +104,12 @@ def netstats():
.. code-block:: bash
salt '*' cassandra.netstats
- '''
- return _nodetool('netstats')
+ """
+ return _nodetool("netstats")
def tpstats():
- '''
+ """
Return tpstats info
CLI Example:
@@ -108,12 +117,12 @@ def tpstats():
.. code-block:: bash
salt '*' cassandra.tpstats
- '''
- return _nodetool('tpstats')
+ """
+ return _nodetool("tpstats")
def info():
- '''
+ """
Return cassandra node info
CLI Example:
@@ -121,12 +130,12 @@ def info():
.. code-block:: bash
salt '*' cassandra.info
- '''
- return _nodetool('info')
+ """
+ return _nodetool("info")
def ring():
- '''
+ """
Return cassandra ring info
CLI Example:
@@ -134,12 +143,12 @@ def ring():
.. code-block:: bash
salt '*' cassandra.ring
- '''
- return _nodetool('ring')
+ """
+ return _nodetool("ring")
def keyspaces():
- '''
+ """
Return existing keyspaces
CLI Example:
@@ -147,13 +156,13 @@ def keyspaces():
.. code-block:: bash
salt '*' cassandra.keyspaces
- '''
+ """
sys = _sys_mgr()
return sys.list_keyspaces()
def column_families(keyspace=None):
- '''
+ """
Return existing column families for all keyspaces
or just the provided one.
@@ -163,7 +172,7 @@ def column_families(keyspace=None):
salt '*' cassandra.column_families
salt '*' cassandra.column_families
- '''
+ """
sys = _sys_mgr()
ksps = sys.list_keyspaces()
@@ -181,7 +190,7 @@ def column_families(keyspace=None):
def column_family_definition(keyspace, column_family):
- '''
+ """
Return a dictionary of column family definitions for the given
keyspace/column_family
@@ -191,11 +200,11 @@ def column_family_definition(keyspace, column_family):
salt '*' cassandra.column_family_definition
- '''
+ """
sys = _sys_mgr()
try:
return vars(sys.get_keyspace_column_families(keyspace)[column_family])
except Exception: # pylint: disable=broad-except
- log.debug('Invalid Keyspace/CF combination')
+ log.debug("Invalid Keyspace/CF combination")
return None
diff --git a/salt/modules/cassandra_cql.py b/salt/modules/cassandra_cql.py
index 30db93dccce..0d389b2a78c 100644
--- a/salt/modules/cassandra_cql.py
+++ b/salt/modules/cassandra_cql.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Cassandra Database Module
.. versionadded:: 2015.5.0
@@ -78,38 +78,43 @@ queries based on the internal schema of said version.
# defaults to 4, if not set
protocol_version: 3
-'''
+"""
# Import Python Libs
-from __future__ import absolute_import, unicode_literals, print_function
+from __future__ import absolute_import, print_function, unicode_literals
+
import logging
import re
import ssl
# Import Salt Libs
import salt.utils.json
+import salt.utils.versions
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range
-import salt.utils.versions
-SSL_VERSION = 'ssl_version'
+SSL_VERSION = "ssl_version"
log = logging.getLogger(__name__)
-__virtualname__ = 'cassandra_cql'
+__virtualname__ = "cassandra_cql"
HAS_DRIVER = False
try:
# pylint: disable=import-error,no-name-in-module
from cassandra.cluster import Cluster
from cassandra.cluster import NoHostAvailable
- from cassandra.connection import ConnectionException, \
- ConnectionShutdown, OperationTimedOut
+ from cassandra.connection import (
+ ConnectionException,
+ ConnectionShutdown,
+ OperationTimedOut,
+ )
from cassandra.auth import PlainTextAuthProvider
from cassandra.query import dict_factory
+
# pylint: enable=import-error,no-name-in-module
HAS_DRIVER = True
except ImportError:
@@ -117,23 +122,23 @@ except ImportError:
def __virtual__():
- '''
+ """
Return virtual name of the module only if the python driver can be loaded.
:return: The virtual name of the module.
:rtype: str
- '''
+ """
if HAS_DRIVER:
return __virtualname__
- return (False, 'Cannot load cassandra_cql module: python driver not found')
+ return (False, "Cannot load cassandra_cql module: python driver not found")
def _async_log_errors(errors):
- log.error('Cassandra_cql asynchronous call returned: %s', errors)
+ log.error("Cassandra_cql asynchronous call returned: %s", errors)
def _load_properties(property_name, config_option, set_default=False, default=None):
- '''
+ """
Load properties for the cassandra module from config or pillar.
:param property_name: The property to load.
@@ -146,11 +151,13 @@ def _load_properties(property_name, config_option, set_default=False, default=No
:type default: str or int
:return: The property fetched from the configuration or default.
:rtype: str or list of str
- '''
+ """
if not property_name:
- log.debug("No property specified in function, trying to load from salt configuration")
+ log.debug(
+ "No property specified in function, trying to load from salt configuration"
+ )
try:
- options = __salt__['config.option']('cassandra')
+ options = __salt__["config.option"]("cassandra")
except BaseException as e:
log.error("Failed to get cassandra config options. Reason: %s", e)
raise
@@ -158,49 +165,56 @@ def _load_properties(property_name, config_option, set_default=False, default=No
loaded_property = options.get(config_option)
if not loaded_property:
if set_default:
- log.debug('Setting default Cassandra %s to %s', config_option, default)
+ log.debug("Setting default Cassandra %s to %s", config_option, default)
loaded_property = default
else:
- log.error('No cassandra %s specified in the configuration or passed to the module.', config_option)
- raise CommandExecutionError("ERROR: Cassandra {0} cannot be empty.".format(config_option))
+ log.error(
+ "No cassandra %s specified in the configuration or passed to the module.",
+ config_option,
+ )
+ raise CommandExecutionError(
+ "ERROR: Cassandra {0} cannot be empty.".format(config_option)
+ )
return loaded_property
return property_name
def _get_ssl_opts():
- '''
+ """
Parse out ssl_options for Cassandra cluster connection.
Make sure that the ssl_version (if any specified) is valid.
- '''
- sslopts = __salt__['config.option']('cassandra').get('ssl_options', None)
+ """
+ sslopts = __salt__["config.option"]("cassandra").get("ssl_options", None)
ssl_opts = {}
if sslopts:
- ssl_opts['ca_certs'] = sslopts['ca_certs']
+ ssl_opts["ca_certs"] = sslopts["ca_certs"]
if SSL_VERSION in sslopts:
- if not sslopts[SSL_VERSION].startswith('PROTOCOL_'):
- valid_opts = ', '.join(
- [x for x in dir(ssl) if x.startswith('PROTOCOL_')]
+ if not sslopts[SSL_VERSION].startswith("PROTOCOL_"):
+ valid_opts = ", ".join(
+ [x for x in dir(ssl) if x.startswith("PROTOCOL_")]
+ )
+ raise CommandExecutionError(
+ "Invalid protocol_version "
+ "specified! "
+ "Please make sure "
+ "that the ssl protocol"
+ "version is one from the SSL"
+ "module. "
+ "Valid options are "
+ "{0}".format(valid_opts)
)
- raise CommandExecutionError('Invalid protocol_version '
- 'specified! '
- 'Please make sure '
- 'that the ssl protocol'
- 'version is one from the SSL'
- 'module. '
- 'Valid options are '
- '{0}'.format(valid_opts))
else:
- ssl_opts[SSL_VERSION] = \
- getattr(ssl, sslopts[SSL_VERSION])
+ ssl_opts[SSL_VERSION] = getattr(ssl, sslopts[SSL_VERSION])
return ssl_opts
else:
return None
-def _connect(contact_points=None, port=None, cql_user=None, cql_pass=None,
- protocol_version=None):
- '''
+def _connect(
+ contact_points=None, port=None, cql_user=None, cql_pass=None, protocol_version=None
+):
+ """
Connect to a Cassandra cluster.
:param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs.
@@ -215,7 +229,7 @@ def _connect(contact_points=None, port=None, cql_user=None, cql_pass=None,
:type port: int
:return: The session and cluster objects.
:rtype: cluster object, session object
- '''
+ """
# Lazy load the Cassandra cluster and session for this module by creating a
# cluster and session when cql_query is called the first time. Get the
# Cassandra cluster and session from this module's __context__ after it is
@@ -229,61 +243,98 @@ def _connect(contact_points=None, port=None, cql_user=None, cql_pass=None,
# Perhaps if Master/Minion daemons could be enhanced to call an "__unload__"
# function, or something similar for each loaded module, connection pools
# and the like can be gracefully reclaimed/shutdown.
- if (__context__
- and 'cassandra_cql_returner_cluster' in __context__
- and 'cassandra_cql_returner_session' in __context__):
- return __context__['cassandra_cql_returner_cluster'], __context__['cassandra_cql_returner_session']
+ if (
+ __context__
+ and "cassandra_cql_returner_cluster" in __context__
+ and "cassandra_cql_returner_session" in __context__
+ ):
+ return (
+ __context__["cassandra_cql_returner_cluster"],
+ __context__["cassandra_cql_returner_session"],
+ )
else:
- contact_points = _load_properties(property_name=contact_points, config_option='cluster')
- contact_points = contact_points if isinstance(contact_points, list) else contact_points.split(',')
- port = _load_properties(property_name=port, config_option='port', set_default=True, default=9042)
- cql_user = _load_properties(property_name=cql_user, config_option='username', set_default=True, default="cassandra")
- cql_pass = _load_properties(property_name=cql_pass, config_option='password', set_default=True, default="cassandra")
- protocol_version = _load_properties(property_name=protocol_version,
- config_option='protocol_version',
- set_default=True, default=4)
+ contact_points = _load_properties(
+ property_name=contact_points, config_option="cluster"
+ )
+ contact_points = (
+ contact_points
+ if isinstance(contact_points, list)
+ else contact_points.split(",")
+ )
+ port = _load_properties(
+ property_name=port, config_option="port", set_default=True, default=9042
+ )
+ cql_user = _load_properties(
+ property_name=cql_user,
+ config_option="username",
+ set_default=True,
+ default="cassandra",
+ )
+ cql_pass = _load_properties(
+ property_name=cql_pass,
+ config_option="password",
+ set_default=True,
+ default="cassandra",
+ )
+ protocol_version = _load_properties(
+ property_name=protocol_version,
+ config_option="protocol_version",
+ set_default=True,
+ default=4,
+ )
try:
auth_provider = PlainTextAuthProvider(username=cql_user, password=cql_pass)
ssl_opts = _get_ssl_opts()
if ssl_opts:
- cluster = Cluster(contact_points,
- port=port,
- auth_provider=auth_provider,
- ssl_options=ssl_opts,
- protocol_version=protocol_version,
- compression=True)
+ cluster = Cluster(
+ contact_points,
+ port=port,
+ auth_provider=auth_provider,
+ ssl_options=ssl_opts,
+ protocol_version=protocol_version,
+ compression=True,
+ )
else:
- cluster = Cluster(contact_points, port=port,
- auth_provider=auth_provider,
- protocol_version=protocol_version,
- compression=True)
+ cluster = Cluster(
+ contact_points,
+ port=port,
+ auth_provider=auth_provider,
+ protocol_version=protocol_version,
+ compression=True,
+ )
for recontimes in range(1, 4):
try:
session = cluster.connect()
break
except OperationTimedOut:
- log.warning('Cassandra cluster.connect timed out, try %s', recontimes)
+ log.warning(
+ "Cassandra cluster.connect timed out, try %s", recontimes
+ )
if recontimes >= 3:
raise
# TODO: Call cluster.shutdown() when the module is unloaded on shutdown.
- __context__['cassandra_cql_returner_cluster'] = cluster
- __context__['cassandra_cql_returner_session'] = session
- __context__['cassandra_cql_prepared'] = {}
+ __context__["cassandra_cql_returner_cluster"] = cluster
+ __context__["cassandra_cql_returner_session"] = session
+ __context__["cassandra_cql_prepared"] = {}
- log.debug('Successfully connected to Cassandra cluster at %s', contact_points)
+ log.debug(
+ "Successfully connected to Cassandra cluster at %s", contact_points
+ )
return cluster, session
except TypeError:
pass
except (ConnectionException, ConnectionShutdown, NoHostAvailable):
- log.error('Could not connect to Cassandra cluster at %s', contact_points)
- raise CommandExecutionError('ERROR: Could not connect to Cassandra cluster.')
+ log.error("Could not connect to Cassandra cluster at %s", contact_points)
+ raise CommandExecutionError(
+ "ERROR: Could not connect to Cassandra cluster."
+ )
def cql_query(query, contact_points=None, port=None, cql_user=None, cql_pass=None):
- '''
+ """
Run a query on a Cassandra cluster and return a dictionary.
:param query: The query to execute.
@@ -306,14 +357,19 @@ def cql_query(query, contact_points=None, port=None, cql_user=None, cql_pass=Non
.. code-block:: bash
salt 'cassandra-server' cassandra_cql.cql_query "SELECT * FROM users_by_name WHERE first_name = 'jane'"
- '''
+ """
try:
- cluster, session = _connect(contact_points=contact_points, port=port, cql_user=cql_user, cql_pass=cql_pass)
+ cluster, session = _connect(
+ contact_points=contact_points,
+ port=port,
+ cql_user=cql_user,
+ cql_pass=cql_pass,
+ )
except CommandExecutionError:
- log.critical('Could not get Cassandra cluster session.')
+ log.critical("Could not get Cassandra cluster session.")
raise
except BaseException as e:
- log.critical('Unexpected error while getting Cassandra cluster session: %s', e)
+ log.critical("Unexpected error while getting Cassandra cluster session: %s", e)
raise
session.row_factory = dict_factory
@@ -324,11 +380,13 @@ def cql_query(query, contact_points=None, port=None, cql_user=None, cql_pass=Non
# Find the query for the current cluster version.
# https://issues.apache.org/jira/browse/CASSANDRA-6717
if isinstance(query, dict):
- cluster_version = version(contact_points=contact_points,
- port=port,
- cql_user=cql_user,
- cql_pass=cql_pass)
- match = re.match(r'^(\d+)\.(\d+)(?:\.(\d+))?', cluster_version)
+ cluster_version = version(
+ contact_points=contact_points,
+ port=port,
+ cql_user=cql_user,
+ cql_pass=cql_pass,
+ )
+ match = re.match(r"^(\d+)\.(\d+)(?:\.(\d+))?", cluster_version)
major, minor, point = match.groups()
# try to find the specific version in the query dictionary
# then try the major version
@@ -337,12 +395,12 @@ def cql_query(query, contact_points=None, port=None, cql_user=None, cql_pass=Non
query = query[cluster_version]
except KeyError:
query = query.get(major, max(query))
- log.debug('New query is: %s', query)
+ log.debug("New query is: %s", query)
try:
results = session.execute(query)
except BaseException as e:
- log.error('Failed to execute query: %s\n reason: %s', query, e)
+ log.error("Failed to execute query: %s\n reason: %s", query, e)
msg = "ERROR: Cassandra query failed: {0} reason: {1}".format(query, e)
raise CommandExecutionError(msg)
@@ -362,9 +420,18 @@ def cql_query(query, contact_points=None, port=None, cql_user=None, cql_pass=Non
return ret
-def cql_query_with_prepare(query, statement_name, statement_arguments, callback_errors=None, contact_points=None,
- port=None, cql_user=None, cql_pass=None, **kwargs):
- '''
+def cql_query_with_prepare(
+ query,
+ statement_name,
+ statement_arguments,
+ callback_errors=None,
+ contact_points=None,
+ port=None,
+ cql_user=None,
+ cql_pass=None,
+ **kwargs
+):
+ """
Run a query on a Cassandra cluster and return a dictionary.
This function should not be used asynchronously for SELECTs -- it will not
@@ -406,40 +473,46 @@ def cql_query_with_prepare(query, statement_name, statement_arguments, callback_
# Select data, should not be asynchronous because there is not currently a facility to return data from a future
salt this-node cassandra_cql.cql_query_with_prepare "name_select" "SELECT * FROM USERS WHERE first_name=?" \
statement_arguments=['John']
- '''
+ """
# Backward-compatibility with Python 3.7: "async" is a reserved word
- asynchronous = kwargs.get('async', False)
+ asynchronous = kwargs.get("async", False)
try:
- cluster, session = _connect(contact_points=contact_points, port=port,
- cql_user=cql_user, cql_pass=cql_pass)
+ cluster, session = _connect(
+ contact_points=contact_points,
+ port=port,
+ cql_user=cql_user,
+ cql_pass=cql_pass,
+ )
except CommandExecutionError:
- log.critical('Could not get Cassandra cluster session.')
+ log.critical("Could not get Cassandra cluster session.")
raise
except BaseException as e:
- log.critical('Unexpected error while getting Cassandra cluster session: %s', e)
+ log.critical("Unexpected error while getting Cassandra cluster session: %s", e)
raise
- if statement_name not in __context__['cassandra_cql_prepared']:
+ if statement_name not in __context__["cassandra_cql_prepared"]:
try:
bound_statement = session.prepare(query)
- __context__['cassandra_cql_prepared'][statement_name] = bound_statement
+ __context__["cassandra_cql_prepared"][statement_name] = bound_statement
except BaseException as e:
- log.critical('Unexpected error while preparing SQL statement: %s', e)
+ log.critical("Unexpected error while preparing SQL statement: %s", e)
raise
else:
- bound_statement = __context__['cassandra_cql_prepared'][statement_name]
+ bound_statement = __context__["cassandra_cql_prepared"][statement_name]
session.row_factory = dict_factory
ret = []
try:
if asynchronous:
- future_results = session.execute_async(bound_statement.bind(statement_arguments))
+ future_results = session.execute_async(
+ bound_statement.bind(statement_arguments)
+ )
# future_results.add_callbacks(_async_log_errors)
else:
results = session.execute(bound_statement.bind(statement_arguments))
except BaseException as e:
- log.error('Failed to execute query: %s\n reason: %s', query, e)
+ log.error("Failed to execute query: %s\n reason: %s", query, e)
msg = "ERROR: Cassandra query failed: {0} reason: {1}".format(query, e)
raise CommandExecutionError(msg)
@@ -463,7 +536,7 @@ def cql_query_with_prepare(query, statement_name, statement_arguments, callback_
def version(contact_points=None, port=None, cql_user=None, cql_pass=None):
- '''
+ """
Show the Cassandra version.
:param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs.
@@ -484,25 +557,25 @@ def version(contact_points=None, port=None, cql_user=None, cql_pass=None):
salt 'minion1' cassandra_cql.version
salt 'minion1' cassandra_cql.version contact_points=minion1
- '''
- query = '''select release_version
+ """
+ query = """select release_version
from system.local
- limit 1;'''
+ limit 1;"""
try:
ret = cql_query(query, contact_points, port, cql_user, cql_pass)
except CommandExecutionError:
- log.critical('Could not get Cassandra version.')
+ log.critical("Could not get Cassandra version.")
raise
except BaseException as e:
- log.critical('Unexpected error while getting Cassandra version: %s', e)
+ log.critical("Unexpected error while getting Cassandra version: %s", e)
raise
- return ret[0].get('release_version')
+ return ret[0].get("release_version")
def info(contact_points=None, port=None, cql_user=None, cql_pass=None):
- '''
+ """
Show the Cassandra information for this cluster.
:param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs.
@@ -523,9 +596,9 @@ def info(contact_points=None, port=None, cql_user=None, cql_pass=None):
salt 'minion1' cassandra_cql.info
salt 'minion1' cassandra_cql.info contact_points=minion1
- '''
+ """
- query = '''select cluster_name,
+ query = """select cluster_name,
data_center,
partitioner,
host_id,
@@ -535,24 +608,24 @@ def info(contact_points=None, port=None, cql_user=None, cql_pass=None):
schema_version,
thrift_version
from system.local
- limit 1;'''
+ limit 1;"""
ret = {}
try:
ret = cql_query(query, contact_points, port, cql_user, cql_pass)
except CommandExecutionError:
- log.critical('Could not list Cassandra info.')
+ log.critical("Could not list Cassandra info.")
raise
except BaseException as e:
- log.critical('Unexpected error while listing Cassandra info: %s', e)
+ log.critical("Unexpected error while listing Cassandra info: %s", e)
raise
return ret
def list_keyspaces(contact_points=None, port=None, cql_user=None, cql_pass=None):
- '''
+ """
List keyspaces in a Cassandra cluster.
:param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs.
@@ -573,10 +646,10 @@ def list_keyspaces(contact_points=None, port=None, cql_user=None, cql_pass=None)
salt 'minion1' cassandra_cql.list_keyspaces
salt 'minion1' cassandra_cql.list_keyspaces contact_points=minion1 port=9000
- '''
+ """
query = {
- '2': 'select keyspace_name from system.schema_keyspaces;',
- '3': 'select keyspace_name from system_schema.keyspaces;',
+ "2": "select keyspace_name from system.schema_keyspaces;",
+ "3": "select keyspace_name from system_schema.keyspaces;",
}
ret = {}
@@ -584,17 +657,19 @@ def list_keyspaces(contact_points=None, port=None, cql_user=None, cql_pass=None)
try:
ret = cql_query(query, contact_points, port, cql_user, cql_pass)
except CommandExecutionError:
- log.critical('Could not list keyspaces.')
+ log.critical("Could not list keyspaces.")
raise
except BaseException as e:
- log.critical('Unexpected error while listing keyspaces: %s', e)
+ log.critical("Unexpected error while listing keyspaces: %s", e)
raise
return ret
-def list_column_families(keyspace=None, contact_points=None, port=None, cql_user=None, cql_pass=None):
- '''
+def list_column_families(
+ keyspace=None, contact_points=None, port=None, cql_user=None, cql_pass=None
+):
+ """
List column families in a Cassandra cluster for all keyspaces or just the provided one.
:param keyspace: The keyspace to provide the column families for, optional.
@@ -619,14 +694,18 @@ def list_column_families(keyspace=None, contact_points=None, port=None, cql_user
salt 'minion1' cassandra_cql.list_column_families contact_points=minion1
salt 'minion1' cassandra_cql.list_column_families keyspace=system
- '''
+ """
where_clause = "where keyspace_name = '{0}'".format(keyspace) if keyspace else ""
query = {
- '2': '''select columnfamily_name from system.schema_columnfamilies
- {0};'''.format(where_clause),
- '3': '''select column_name from system_schema.columns
- {0};'''.format(where_clause),
+ "2": """select columnfamily_name from system.schema_columnfamilies
+ {0};""".format(
+ where_clause
+ ),
+ "3": """select column_name from system_schema.columns
+ {0};""".format(
+ where_clause
+ ),
}
ret = {}
@@ -634,17 +713,19 @@ def list_column_families(keyspace=None, contact_points=None, port=None, cql_user
try:
ret = cql_query(query, contact_points, port, cql_user, cql_pass)
except CommandExecutionError:
- log.critical('Could not list column families.')
+ log.critical("Could not list column families.")
raise
except BaseException as e:
- log.critical('Unexpected error while listing column families: %s', e)
+ log.critical("Unexpected error while listing column families: %s", e)
raise
return ret
-def keyspace_exists(keyspace, contact_points=None, port=None, cql_user=None, cql_pass=None):
- '''
+def keyspace_exists(
+ keyspace, contact_points=None, port=None, cql_user=None, cql_pass=None
+):
+ """
Check if a keyspace exists in a Cassandra cluster.
:param keyspace The keyspace name to check for.
@@ -665,29 +746,41 @@ def keyspace_exists(keyspace, contact_points=None, port=None, cql_user=None, cql
.. code-block:: bash
salt 'minion1' cassandra_cql.keyspace_exists keyspace=system
- '''
+ """
query = {
- '2': '''select keyspace_name from system.schema_keyspaces
- where keyspace_name = '{0}';'''.format(keyspace),
- '3': '''select keyspace_name from system_schema.keyspaces
- where keyspace_name = '{0}';'''.format(keyspace),
+ "2": """select keyspace_name from system.schema_keyspaces
+ where keyspace_name = '{0}';""".format(
+ keyspace
+ ),
+ "3": """select keyspace_name from system_schema.keyspaces
+ where keyspace_name = '{0}';""".format(
+ keyspace
+ ),
}
try:
ret = cql_query(query, contact_points, port, cql_user, cql_pass)
except CommandExecutionError:
- log.critical('Could not determine if keyspace exists.')
+ log.critical("Could not determine if keyspace exists.")
raise
except BaseException as e:
- log.critical('Unexpected error while determining if keyspace exists: %s', e)
+ log.critical("Unexpected error while determining if keyspace exists: %s", e)
raise
return True if ret else False
-def create_keyspace(keyspace, replication_strategy='SimpleStrategy', replication_factor=1, replication_datacenters=None,
- contact_points=None, port=None, cql_user=None, cql_pass=None):
- '''
+def create_keyspace(
+ keyspace,
+ replication_strategy="SimpleStrategy",
+ replication_factor=1,
+ replication_datacenters=None,
+ contact_points=None,
+ port=None,
+ cql_user=None,
+ cql_pass=None,
+):
+ """
Create a new keyspace in Cassandra.
:param keyspace: The keyspace name
@@ -719,18 +812,18 @@ def create_keyspace(keyspace, replication_strategy='SimpleStrategy', replication
salt 'minion1' cassandra_cql.create_keyspace keyspace=newkeyspace replication_strategy=NetworkTopologyStrategy \
replication_datacenters='{"datacenter_1": 3, "datacenter_2": 2}'
- '''
+ """
existing_keyspace = keyspace_exists(keyspace, contact_points, port)
if not existing_keyspace:
# Add the strategy, replication_factor, etc.
- replication_map = {
- 'class': replication_strategy
- }
+ replication_map = {"class": replication_strategy}
if replication_datacenters:
if isinstance(replication_datacenters, six.string_types):
try:
- replication_datacenter_map = salt.utils.json.loads(replication_datacenters)
+ replication_datacenter_map = salt.utils.json.loads(
+ replication_datacenters
+ )
replication_map.update(**replication_datacenter_map)
except BaseException: # pylint: disable=W0703
log.error("Could not load json replication_datacenters.")
@@ -738,24 +831,28 @@ def create_keyspace(keyspace, replication_strategy='SimpleStrategy', replication
else:
replication_map.update(**replication_datacenters)
else:
- replication_map['replication_factor'] = replication_factor
+ replication_map["replication_factor"] = replication_factor
- query = '''create keyspace {0}
+ query = """create keyspace {0}
with replication = {1}
- and durable_writes = true;'''.format(keyspace, replication_map)
+ and durable_writes = true;""".format(
+ keyspace, replication_map
+ )
try:
cql_query(query, contact_points, port, cql_user, cql_pass)
except CommandExecutionError:
- log.critical('Could not create keyspace.')
+ log.critical("Could not create keyspace.")
raise
except BaseException as e:
- log.critical('Unexpected error while creating keyspace: %s', e)
+ log.critical("Unexpected error while creating keyspace: %s", e)
raise
-def drop_keyspace(keyspace, contact_points=None, port=None, cql_user=None, cql_pass=None):
- '''
+def drop_keyspace(
+ keyspace, contact_points=None, port=None, cql_user=None, cql_pass=None
+):
+ """
Drop a keyspace if it exists in a Cassandra cluster.
:param keyspace: The keyspace to drop.
@@ -778,24 +875,24 @@ def drop_keyspace(keyspace, contact_points=None, port=None, cql_user=None, cql_p
salt 'minion1' cassandra_cql.drop_keyspace keyspace=test
salt 'minion1' cassandra_cql.drop_keyspace keyspace=test contact_points=minion1
- '''
+ """
existing_keyspace = keyspace_exists(keyspace, contact_points, port)
if existing_keyspace:
- query = '''drop keyspace {0};'''.format(keyspace)
+ query = """drop keyspace {0};""".format(keyspace)
try:
cql_query(query, contact_points, port, cql_user, cql_pass)
except CommandExecutionError:
- log.critical('Could not drop keyspace.')
+ log.critical("Could not drop keyspace.")
raise
except BaseException as e:
- log.critical('Unexpected error while dropping keyspace: %s', e)
+ log.critical("Unexpected error while dropping keyspace: %s", e)
raise
return True
def list_users(contact_points=None, port=None, cql_user=None, cql_pass=None):
- '''
+ """
List existing users in this Cassandra cluster.
:param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs.
@@ -816,7 +913,7 @@ def list_users(contact_points=None, port=None, cql_user=None, cql_pass=None):
salt 'minion1' cassandra_cql.list_users
salt 'minion1' cassandra_cql.list_users contact_points=minion1
- '''
+ """
query = "list users;"
ret = {}
@@ -824,17 +921,25 @@ def list_users(contact_points=None, port=None, cql_user=None, cql_pass=None):
try:
ret = cql_query(query, contact_points, port, cql_user, cql_pass)
except CommandExecutionError:
- log.critical('Could not list users.')
+ log.critical("Could not list users.")
raise
except BaseException as e:
- log.critical('Unexpected error while listing users: %s', e)
+ log.critical("Unexpected error while listing users: %s", e)
raise
return ret
-def create_user(username, password, superuser=False, contact_points=None, port=None, cql_user=None, cql_pass=None):
- '''
+def create_user(
+ username,
+ password,
+ superuser=False,
+ contact_points=None,
+ port=None,
+ cql_user=None,
+ cql_pass=None,
+):
+ """
Create a new cassandra user with credentials and superuser status.
:param username: The name of the new user.
@@ -863,28 +968,42 @@ def create_user(username, password, superuser=False, contact_points=None, port=N
salt 'minion1' cassandra_cql.create_user username=joe password=secret superuser=True
salt 'minion1' cassandra_cql.create_user username=joe password=secret superuser=True contact_points=minion1
- '''
- superuser_cql = 'superuser' if superuser else 'nosuperuser'
- query = '''create user if not exists {0} with password '{1}' {2};'''.format(username, password, superuser_cql)
- log.debug("Attempting to create a new user with username=%s superuser=%s", username, superuser_cql)
+ """
+ superuser_cql = "superuser" if superuser else "nosuperuser"
+ query = """create user if not exists {0} with password '{1}' {2};""".format(
+ username, password, superuser_cql
+ )
+ log.debug(
+ "Attempting to create a new user with username=%s superuser=%s",
+ username,
+ superuser_cql,
+ )
# The create user query doesn't actually return anything if the query succeeds.
# If the query fails, catch the exception, log a messange and raise it again.
try:
cql_query(query, contact_points, port, cql_user, cql_pass)
except CommandExecutionError:
- log.critical('Could not create user.')
+ log.critical("Could not create user.")
raise
except BaseException as e:
- log.critical('Unexpected error while creating user: %s', e)
+ log.critical("Unexpected error while creating user: %s", e)
raise
return True
-def list_permissions(username=None, resource=None, resource_type='keyspace', permission=None, contact_points=None,
- port=None, cql_user=None, cql_pass=None):
- '''
+def list_permissions(
+ username=None,
+ resource=None,
+ resource_type="keyspace",
+ permission=None,
+ contact_points=None,
+ port=None,
+ cql_user=None,
+ cql_pass=None,
+):
+ """
List permissions.
:param username: The name of the user to list permissions for.
@@ -916,9 +1035,13 @@ def list_permissions(username=None, resource=None, resource_type='keyspace', per
salt 'minion1' cassandra_cql.list_permissions username=joe resource=test_table resource_type=table \
permission=select contact_points=minion1
- '''
- keyspace_cql = "{0} {1}".format(resource_type, resource) if resource else "all keyspaces"
- permission_cql = "{0} permission".format(permission) if permission else "all permissions"
+ """
+ keyspace_cql = (
+ "{0} {1}".format(resource_type, resource) if resource else "all keyspaces"
+ )
+ permission_cql = (
+ "{0} permission".format(permission) if permission else "all permissions"
+ )
query = "list {0} on {1}".format(permission_cql, keyspace_cql)
if username:
@@ -931,18 +1054,26 @@ def list_permissions(username=None, resource=None, resource_type='keyspace', per
try:
ret = cql_query(query, contact_points, port, cql_user, cql_pass)
except CommandExecutionError:
- log.critical('Could not list permissions.')
+ log.critical("Could not list permissions.")
raise
except BaseException as e:
- log.critical('Unexpected error while listing permissions: %s', e)
+ log.critical("Unexpected error while listing permissions: %s", e)
raise
return ret
-def grant_permission(username, resource=None, resource_type='keyspace', permission=None, contact_points=None, port=None,
- cql_user=None, cql_pass=None):
- '''
+def grant_permission(
+ username,
+ resource=None,
+ resource_type="keyspace",
+ permission=None,
+ contact_points=None,
+ port=None,
+ cql_user=None,
+ cql_pass=None,
+):
+ """
Grant permissions to a user.
:param username: The name of the user to grant permissions to.
@@ -974,19 +1105,23 @@ def grant_permission(username, resource=None, resource_type='keyspace', permissi
salt 'minion1' cassandra_cql.grant_permission username=joe resource=test_table resource_type=table \
permission=select contact_points=minion1
- '''
- permission_cql = "grant {0}".format(permission) if permission else "grant all permissions"
- resource_cql = "on {0} {1}".format(resource_type, resource) if resource else "on all keyspaces"
+ """
+ permission_cql = (
+ "grant {0}".format(permission) if permission else "grant all permissions"
+ )
+ resource_cql = (
+ "on {0} {1}".format(resource_type, resource) if resource else "on all keyspaces"
+ )
query = "{0} {1} to {2}".format(permission_cql, resource_cql, username)
log.debug("Attempting to grant permissions with query '%s'", query)
try:
cql_query(query, contact_points, port, cql_user, cql_pass)
except CommandExecutionError:
- log.critical('Could not grant permissions.')
+ log.critical("Could not grant permissions.")
raise
except BaseException as e:
- log.critical('Unexpected error while granting permissions: %s', e)
+ log.critical("Unexpected error while granting permissions: %s", e)
raise
return True
diff --git a/salt/modules/celery.py b/salt/modules/celery.py
index 9d917d96b23..8dc787c4346 100644
--- a/salt/modules/celery.py
+++ b/salt/modules/celery.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Support for scheduling celery tasks. The worker is independent of salt and thus can run in a different
virtualenv or on a different python version, as long as broker, backend and serializer configurations match.
Also note that celery and packages required by the celery broker, e.g. redis must be installed to load
@@ -7,7 +7,7 @@ the salt celery execution module.
.. note::
A new app (and thus new connections) is created for each task execution
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
@@ -25,23 +25,36 @@ log = logging.getLogger(__name__)
try:
from celery import Celery
from celery.exceptions import TimeoutError # pylint: disable=no-name-in-module
+
HAS_CELERY = True
except ImportError:
HAS_CELERY = False
def __virtual__():
- '''
+ """
Only load if celery libraries exist.
- '''
+ """
if not HAS_CELERY:
- return False, 'The celery module could not be loaded: celery library not found'
+ return False, "The celery module could not be loaded: celery library not found"
return True
-def run_task(task_name, args=None, kwargs=None, broker=None, backend=None, wait_for_result=False, timeout=None,
- propagate=True, interval=0.5, no_ack=True, raise_timeout=True, config=None):
- '''
+def run_task(
+ task_name,
+ args=None,
+ kwargs=None,
+ broker=None,
+ backend=None,
+ wait_for_result=False,
+ timeout=None,
+ propagate=True,
+ interval=0.5,
+ no_ack=True,
+ raise_timeout=True,
+ config=None,
+):
+ """
Execute celery tasks. For celery specific parameters see celery documentation.
@@ -88,9 +101,9 @@ def run_task(task_name, args=None, kwargs=None, broker=None, backend=None, wait_
config
Config dict for celery app, See celery documentation
- '''
+ """
if not broker:
- raise SaltInvocationError('broker parameter is required')
+ raise SaltInvocationError("broker parameter is required")
with Celery(broker=broker, backend=backend, set_as_current=False) as app:
if config:
@@ -103,10 +116,16 @@ def run_task(task_name, args=None, kwargs=None, broker=None, backend=None, wait_
if wait_for_result:
try:
- return async_result.get(timeout=timeout, propagate=propagate,
- interval=interval, no_ack=no_ack)
+ return async_result.get(
+ timeout=timeout,
+ propagate=propagate,
+ interval=interval,
+ no_ack=no_ack,
+ )
except TimeoutError as ex:
- log.error('Waiting for the result of a celery task execution timed out.')
+ log.error(
+ "Waiting for the result of a celery task execution timed out."
+ )
if raise_timeout:
six.reraise(*sys.exc_info())
return False
diff --git a/salt/modules/ceph.py b/salt/modules/ceph.py
index c8c1b607837..f3df17cdc1b 100644
--- a/salt/modules/ceph.py
+++ b/salt/modules/ceph.py
@@ -1,22 +1,23 @@
# -*- coding: utf-8 -*-
-'''
+"""
Module to provide ceph control with salt.
:depends: - ceph_cfg Python module
.. versionadded:: 2016.11.0
-'''
+"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
-import logging
+import logging
log = logging.getLogger(__name__)
-__virtualname__ = 'ceph'
+__virtualname__ = "ceph"
try:
import ceph_cfg
+
HAS_CEPH_CFG = True
except ImportError:
HAS_CEPH_CFG = False
@@ -24,13 +25,15 @@ except ImportError:
def __virtual__():
if HAS_CEPH_CFG is False:
- msg = 'ceph_cfg unavailable: {0} execution module cant be loaded '.format(__virtualname__)
+ msg = "ceph_cfg unavailable: {0} execution module cant be loaded ".format(
+ __virtualname__
+ )
return False, msg
return __virtualname__
def partition_list():
- '''
+ """
List partitions by disk
CLI Example:
@@ -38,12 +41,12 @@ def partition_list():
.. code-block:: bash
salt '*' ceph.partition_list
- '''
+ """
return ceph_cfg.partition_list()
def partition_list_osd():
- '''
+ """
List all OSD data partitions by partition
CLI Example:
@@ -51,12 +54,12 @@ def partition_list_osd():
.. code-block:: bash
salt '*' ceph.partition_list_osd
- '''
+ """
return ceph_cfg.partition_list_osd()
def partition_list_journal():
- '''
+ """
List all OSD journal partitions by partition
CLI Example:
@@ -64,12 +67,12 @@ def partition_list_journal():
.. code-block:: bash
salt '*' ceph.partition_list_journal
- '''
+ """
return ceph_cfg.partition_list_journal()
def osd_discover():
- '''
+ """
List all OSD by cluster
CLI Example:
@@ -78,12 +81,12 @@ def osd_discover():
salt '*' ceph.osd_discover
- '''
+ """
return ceph_cfg.osd_discover()
def partition_is(dev):
- '''
+ """
Check whether a given device path is a partition or a full disk.
CLI Example:
@@ -91,12 +94,12 @@ def partition_is(dev):
.. code-block:: bash
salt '*' ceph.partition_is /dev/sdc1
- '''
+ """
return ceph_cfg.partition_is(dev)
def zap(target=None, **kwargs):
- '''
+ """
Destroy the partition table and content of a given disk.
.. code-block:: bash
@@ -113,7 +116,7 @@ def zap(target=None, **kwargs):
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
- '''
+ """
if target is not None:
log.warning("Depricated use of function, use kwargs")
target = kwargs.get("dev", target)
@@ -122,7 +125,7 @@ def zap(target=None, **kwargs):
def osd_prepare(**kwargs):
- '''
+ """
Prepare an OSD
CLI Example:
@@ -157,12 +160,12 @@ def osd_prepare(**kwargs):
journal_uuid
set the OSD journal UUID. If set will return if OSD with journal UUID already exists.
- '''
+ """
return ceph_cfg.osd_prepare(**kwargs)
def osd_activate(**kwargs):
- '''
+ """
Activate an OSD
CLI Example:
@@ -170,12 +173,12 @@ def osd_activate(**kwargs):
.. code-block:: bash
salt '*' ceph.osd_activate 'osd_dev'='/dev/vdc'
- '''
+ """
return ceph_cfg.osd_activate(**kwargs)
def keyring_create(**kwargs):
- '''
+ """
Create keyring for cluster
CLI Example:
@@ -195,12 +198,12 @@ def keyring_create(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.keyring_create(**kwargs)
def keyring_save(**kwargs):
- '''
+ """
Create save keyring locally
CLI Example:
@@ -220,12 +223,12 @@ def keyring_save(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.keyring_save(**kwargs)
def keyring_purge(**kwargs):
- '''
+ """
Delete keyring for cluster
CLI Example:
@@ -247,12 +250,12 @@ def keyring_purge(**kwargs):
The cluster name. Defaults to ``ceph``.
If no ceph config file is found, this command will fail.
- '''
+ """
return ceph_cfg.keyring_purge(**kwargs)
def keyring_present(**kwargs):
- '''
+ """
Returns ``True`` if the keyring is present on disk, otherwise ``False``
CLI Example:
@@ -272,12 +275,12 @@ def keyring_present(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.keyring_present(**kwargs)
def keyring_auth_add(**kwargs):
- '''
+ """
Add keyring to authorized list
CLI Example:
@@ -297,12 +300,12 @@ def keyring_auth_add(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.keyring_auth_add(**kwargs)
def keyring_auth_del(**kwargs):
- '''
+ """
Remove keyring from authorised list
CLI Example:
@@ -322,12 +325,12 @@ def keyring_auth_del(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.keyring_auth_del(**kwargs)
def mon_is(**kwargs):
- '''
+ """
Returns ``True`` if the target is a mon node, otherwise ``False``
CLI Example:
@@ -343,12 +346,12 @@ def mon_is(**kwargs):
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
- '''
+ """
return ceph_cfg.mon_is(**kwargs)
def mon_status(**kwargs):
- '''
+ """
Get status from mon daemon
CLI Example:
@@ -364,12 +367,12 @@ def mon_status(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.status(**kwargs)
def mon_quorum(**kwargs):
- '''
+ """
Returns ``True`` if the mon daemon is in the quorum, otherwise ``False``
CLI Example:
@@ -385,12 +388,12 @@ def mon_quorum(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.mon_quorum(**kwargs)
def mon_active(**kwargs):
- '''
+ """
Returns ``True`` if the mon daemon is running, otherwise ``False``
CLI Example:
@@ -406,12 +409,12 @@ def mon_active(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.mon_active(**kwargs)
def mon_create(**kwargs):
- '''
+ """
Create a mon node
CLI Example:
@@ -427,12 +430,12 @@ def mon_create(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.mon_create(**kwargs)
def rgw_pools_create(**kwargs):
- '''
+ """
Create pools for rgw
CLI Example:
@@ -446,12 +449,12 @@ def rgw_pools_create(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.rgw_pools_create(**kwargs)
def rgw_pools_missing(**kwargs):
- '''
+ """
Show pools missing for rgw
CLI Example:
@@ -465,12 +468,12 @@ def rgw_pools_missing(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.rgw_pools_missing(**kwargs)
def rgw_create(**kwargs):
- '''
+ """
Create a rgw
CLI Example:
@@ -490,12 +493,12 @@ def rgw_create(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.rgw_create(**kwargs)
def rgw_destroy(**kwargs):
- '''
+ """
Remove a rgw
CLI Example:
@@ -515,12 +518,12 @@ def rgw_destroy(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.rgw_destroy(**kwargs)
def mds_create(**kwargs):
- '''
+ """
Create a mds
CLI Example:
@@ -548,12 +551,12 @@ def mds_create(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.mds_create(**kwargs)
def mds_destroy(**kwargs):
- '''
+ """
Remove a mds
CLI Example:
@@ -573,12 +576,12 @@ def mds_destroy(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.mds_destroy(**kwargs)
def keyring_auth_list(**kwargs):
- '''
+ """
List all cephx authorization keys
CLI Example:
@@ -594,12 +597,12 @@ def keyring_auth_list(**kwargs):
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
- '''
+ """
return ceph_cfg.keyring_auth_list(**kwargs)
def pool_list(**kwargs):
- '''
+ """
List all pools
CLI Example:
@@ -615,12 +618,12 @@ def pool_list(**kwargs):
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
- '''
+ """
return ceph_cfg.pool_list(**kwargs)
def pool_add(pool_name, **kwargs):
- '''
+ """
Create a pool
CLI Example:
@@ -651,12 +654,12 @@ def pool_add(pool_name, **kwargs):
crush_ruleset
The crush map rule set
- '''
+ """
return ceph_cfg.pool_add(pool_name, **kwargs)
def pool_del(pool_name, **kwargs):
- '''
+ """
Delete a pool
CLI Example:
@@ -672,12 +675,12 @@ def pool_del(pool_name, **kwargs):
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
- '''
+ """
return ceph_cfg.pool_del(pool_name, **kwargs)
def purge(**kwargs):
- '''
+ """
purge ceph configuration on the node
CLI Example:
@@ -693,12 +696,12 @@ def purge(**kwargs):
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
- '''
+ """
return ceph_cfg.purge(**kwargs)
def ceph_version():
- '''
+ """
Get the version of ceph installed
CLI Example:
@@ -706,12 +709,12 @@ def ceph_version():
.. code-block:: bash
salt '*' ceph.ceph_version
- '''
+ """
return ceph_cfg.ceph_version()
def cluster_quorum(**kwargs):
- '''
+ """
Get the cluster's quorum status
CLI Example:
@@ -727,12 +730,12 @@ def cluster_quorum(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.cluster_quorum(**kwargs)
def cluster_status(**kwargs):
- '''
+ """
Get the cluster status, including health if in quorum
CLI Example:
@@ -748,5 +751,5 @@ def cluster_status(**kwargs):
cluster_name
The cluster name. Defaults to ``ceph``.
- '''
+ """
return ceph_cfg.cluster_status(**kwargs)
diff --git a/salt/modules/chassis.py b/salt/modules/chassis.py
index d2169d807c3..bd92bc5e41c 100644
--- a/salt/modules/chassis.py
+++ b/salt/modules/chassis.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-'''
+"""
Glue execution module to link to the :mod:`fx2 proxymodule `.
Depends: :mod:`iDRAC Remote execution module (salt.modules.dracr) `
@@ -12,41 +12,44 @@ called ``chconfig``. That function looks up the function passed in the ``cmd``
parameter in :mod:`salt.modules.dracr ` and calls it.
.. versionadded:: 2015.8.2
-'''
-from __future__ import absolute_import, unicode_literals, print_function
+"""
+from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
-import salt.utils.platform
+import salt.utils.platform
log = logging.getLogger(__name__)
-__proxyenabled__ = ['fx2']
-__virtualname__ = 'chassis'
+__proxyenabled__ = ["fx2"]
+__virtualname__ = "chassis"
def __virtual__():
- '''
+ """
Only work on proxy
- '''
+ """
if salt.utils.platform.is_proxy():
return __virtualname__
- return (False, 'The chassis execution module cannot be loaded: '
- 'this only works in proxy minions.')
+ return (
+ False,
+ "The chassis execution module cannot be loaded: "
+ "this only works in proxy minions.",
+ )
def chassis_credentials():
- proxyprefix = __opts__['proxy']['proxytype']
- (username, password) = __proxy__[proxyprefix+'.find_credentials']()
+ proxyprefix = __opts__["proxy"]["proxytype"]
+ (username, password) = __proxy__[proxyprefix + ".find_credentials"]()
return (username, password)
def cmd(cmd, *args, **kwargs):
- proxyprefix = __opts__['proxy']['proxytype']
+ proxyprefix = __opts__["proxy"]["proxytype"]
(username, password) = chassis_credentials()
- kwargs['admin_username'] = username
- kwargs['admin_password'] = password
- kwargs['host'] = __proxy__[proxyprefix+'.host']()
- proxycmd = __opts__['proxy']['proxytype'] + '.chconfig'
+ kwargs["admin_username"] = username
+ kwargs["admin_password"] = password
+ kwargs["host"] = __proxy__[proxyprefix + ".host"]()
+ proxycmd = __opts__["proxy"]["proxytype"] + ".chconfig"
return __proxy__[proxycmd](cmd, *args, **kwargs)
diff --git a/salt/modules/chef.py b/salt/modules/chef.py
index 786508b0d4a..028dcdb3d0c 100644
--- a/salt/modules/chef.py
+++ b/salt/modules/chef.py
@@ -1,18 +1,20 @@
# -*- coding: utf-8 -*-
-'''
+"""
Execute chef in server or solo mode
-'''
+"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+
import logging
import os
import tempfile
+import salt.utils.decorators.path
+
# Import Salt libs
import salt.utils.path
import salt.utils.platform
-import salt.utils.decorators.path
# Import 3rd-party libs
from salt.ext import six
@@ -21,43 +23,36 @@ log = logging.getLogger(__name__)
def __virtual__():
- '''
+ """
Only load if chef is installed
- '''
- if not salt.utils.path.which('chef-client'):
- return (False, 'Cannot load chef module: chef-client not found')
+ """
+ if not salt.utils.path.which("chef-client"):
+ return (False, "Cannot load chef module: chef-client not found")
return True
def _default_logfile(exe_name):
- '''
+ """
Retrieve the logfile name
- '''
+ """
if salt.utils.platform.is_windows():
- tmp_dir = os.path.join(__opts__['cachedir'], 'tmp')
+ tmp_dir = os.path.join(__opts__["cachedir"], "tmp")
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
- logfile_tmp = tempfile.NamedTemporaryFile(dir=tmp_dir,
- prefix=exe_name,
- suffix='.log',
- delete=False)
+ logfile_tmp = tempfile.NamedTemporaryFile(
+ dir=tmp_dir, prefix=exe_name, suffix=".log", delete=False
+ )
logfile = logfile_tmp.name
logfile_tmp.close()
else:
- logfile = salt.utils.path.join(
- '/var/log',
- '{0}.log'.format(exe_name)
- )
+ logfile = salt.utils.path.join("/var/log", "{0}.log".format(exe_name))
return logfile
-@salt.utils.decorators.path.which('chef-client')
-def client(whyrun=False,
- localmode=False,
- logfile=None,
- **kwargs):
- '''
+@salt.utils.decorators.path.which("chef-client")
+def client(whyrun=False, localmode=False, logfile=None, **kwargs):
+ """
Execute a chef client run and return a dict with the stderr, stdout,
return code, and pid.
@@ -123,29 +118,29 @@ def client(whyrun=False,
whyrun
Enable whyrun mode when set to True
- '''
+ """
if logfile is None:
- logfile = _default_logfile('chef-client')
- args = ['chef-client',
- '--no-color',
- '--once',
- '--logfile "{0}"'.format(logfile),
- '--format doc']
+ logfile = _default_logfile("chef-client")
+ args = [
+ "chef-client",
+ "--no-color",
+ "--once",
+ '--logfile "{0}"'.format(logfile),
+ "--format doc",
+ ]
if whyrun:
- args.append('--why-run')
+ args.append("--why-run")
if localmode:
- args.append('--local-mode')
+ args.append("--local-mode")
return _exec_cmd(*args, **kwargs)
-@salt.utils.decorators.path.which('chef-solo')
-def solo(whyrun=False,
- logfile=None,
- **kwargs):
- '''
+@salt.utils.decorators.path.which("chef-solo")
+def solo(whyrun=False, logfile=None, **kwargs):
+ """
Execute a chef solo run and return a dict with the stderr, stdout,
return code, and pid.
@@ -192,16 +187,18 @@ def solo(whyrun=False,
whyrun
Enable whyrun mode when set to True
- '''
+ """
if logfile is None:
- logfile = _default_logfile('chef-solo')
- args = ['chef-solo',
- '--no-color',
- '--logfile "{0}"'.format(logfile),
- '--format doc']
+ logfile = _default_logfile("chef-solo")
+ args = [
+ "chef-solo",
+ "--no-color",
+ '--logfile "{0}"'.format(logfile),
+ "--format doc",
+ ]
if whyrun:
- args.append('--why-run')
+ args.append("--why-run")
return _exec_cmd(*args, **kwargs)
@@ -209,12 +206,15 @@ def solo(whyrun=False,
def _exec_cmd(*args, **kwargs):
# Compile the command arguments
- cmd_args = ' '.join(args)
- cmd_kwargs = ''.join([
- ' --{0} {1}'.format(k, v)
- for k, v in six.iteritems(kwargs) if not k.startswith('__')
- ])
- cmd_exec = '{0}{1}'.format(cmd_args, cmd_kwargs)
- log.debug('Chef command: {0}'.format(cmd_exec))
+ cmd_args = " ".join(args)
+ cmd_kwargs = "".join(
+ [
+ " --{0} {1}".format(k, v)
+ for k, v in six.iteritems(kwargs)
+ if not k.startswith("__")
+ ]
+ )
+ cmd_exec = "{0}{1}".format(cmd_args, cmd_kwargs)
+ log.debug("Chef command: {0}".format(cmd_exec))
- return __salt__['cmd.run_all'](cmd_exec, python_shell=False)
+ return __salt__["cmd.run_all"](cmd_exec, python_shell=False)
diff --git a/salt/modules/chocolatey.py b/salt/modules/chocolatey.py
index 1b2c17674db..04fcf1120d8 100644
--- a/salt/modules/chocolatey.py
+++ b/salt/modules/chocolatey.py
@@ -1,10 +1,10 @@
# -*- coding: utf-8 -*-
-'''
+"""
A dead simple module wrapping calls to the Chocolatey package manager
(http://chocolatey.org)
.. versionadded:: 2014.1.0
-'''
+"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
@@ -12,115 +12,124 @@ import logging
import os
import re
import tempfile
-from requests.structures import CaseInsensitiveDict
# Import salt libs
import salt.utils.data
import salt.utils.platform
+from requests.structures import CaseInsensitiveDict
+from salt.exceptions import (
+ CommandExecutionError,
+ CommandNotFoundError,
+ SaltInvocationError,
+)
from salt.utils.versions import LooseVersion as _LooseVersion
-from salt.exceptions import CommandExecutionError, CommandNotFoundError, \
- SaltInvocationError
-
log = logging.getLogger(__name__)
-__func_alias__ = {
- 'list_': 'list'
-}
+__func_alias__ = {"list_": "list"}
def __virtual__():
- '''
+ """
Confirm this module is on a Windows system running Vista or later.
While it is possible to make Chocolatey run under XP and Server 2003 with
an awful lot of hassle (e.g. SSL is completely broken), the PowerShell shim
for simulating UAC forces a GUI prompt, and is not compatible with
salt-minion running as SYSTEM.
- '''
+ """
if not salt.utils.platform.is_windows():
- return (False, 'Cannot load module chocolatey: Chocolatey requires '
- 'Windows')
+ return (False, "Cannot load module chocolatey: Chocolatey requires Windows")
- if __grains__['osrelease'] in ('XP', '2003Server'):
- return (False, 'Cannot load module chocolatey: Chocolatey requires '
- 'Windows Vista or later')
+ if __grains__["osrelease"] in ("XP", "2003Server"):
+ return (
+ False,
+ "Cannot load module chocolatey: Chocolatey requires "
+ "Windows Vista or later",
+ )
- return 'chocolatey'
+ return "chocolatey"
def _clear_context():
- '''
+ """
Clear variables stored in __context__. Run this function when a new version
of chocolatey is installed.
- '''
- choco_items = [x for x in __context__ if x.startswith('chocolatey.')]
+ """
+ choco_items = [x for x in __context__ if x.startswith("chocolatey.")]
for var in choco_items:
__context__.pop(var)
def _yes():
- '''
+ """
Returns ['--yes'] if on v0.9.9.0 or later, otherwise returns an empty list
Confirm all prompts (--yes_ is available on v0.9.9.0 or later
- '''
- if 'chocolatey._yes' in __context__:
- return __context__['chocolatey._yes']
- if _LooseVersion(chocolatey_version()) >= _LooseVersion('0.9.9'):
- answer = ['--yes']
+ """
+ if "chocolatey._yes" in __context__:
+ return __context__["chocolatey._yes"]
+ if _LooseVersion(chocolatey_version()) >= _LooseVersion("0.9.9"):
+ answer = ["--yes"]
else:
answer = []
- __context__['chocolatey._yes'] = answer
- return __context__['chocolatey._yes']
+ __context__["chocolatey._yes"] = answer
+ return __context__["chocolatey._yes"]
def _no_progress():
- '''
+ """
Returns ['--no-progress'] if on v0.10.4 or later, otherwise returns an
empty list
- '''
- if 'chocolatey._no_progress' in __context__:
- return __context__['chocolatey._no_progress']
- if _LooseVersion(chocolatey_version()) >= _LooseVersion('0.10.4'):
- answer = ['--no-progress']
+ """
+ if "chocolatey._no_progress" in __context__:
+ return __context__["chocolatey._no_progress"]
+ if _LooseVersion(chocolatey_version()) >= _LooseVersion("0.10.4"):
+ answer = ["--no-progress"]
else:
- log.warning('--no-progress unsupported in choco < 0.10.4')
+ log.warning("--no-progress unsupported in choco < 0.10.4")
answer = []
- __context__['chocolatey._no_progress'] = answer
- return __context__['chocolatey._no_progress']
+ __context__["chocolatey._no_progress"] = answer
+ return __context__["chocolatey._no_progress"]
def _find_chocolatey():
- '''
+ """
Returns the full path to chocolatey.bat on the host.
- '''
+ """
# Check context
- if 'chocolatey._path' in __context__:
- return __context__['chocolatey._path']
+ if "chocolatey._path" in __context__:
+ return __context__["chocolatey._path"]
# Check the path
- choc_path = __salt__['cmd.which']('chocolatey.exe')
+ choc_path = __salt__["cmd.which"]("chocolatey.exe")
if choc_path:
- __context__['chocolatey._path'] = choc_path
- return __context__['chocolatey._path']
+ __context__["chocolatey._path"] = choc_path
+ return __context__["chocolatey._path"]
# Check in common locations
choc_defaults = [
- os.path.join(os.environ.get('ProgramData'), 'Chocolatey', 'bin', 'chocolatey.exe'),
- os.path.join(os.environ.get('SystemDrive'), 'Chocolatey', 'bin', 'chocolatey.bat')]
+ os.path.join(
+ os.environ.get("ProgramData"), "Chocolatey", "bin", "chocolatey.exe"
+ ),
+ os.path.join(
+ os.environ.get("SystemDrive"), "Chocolatey", "bin", "chocolatey.bat"
+ ),
+ ]
for choc_exe in choc_defaults:
if os.path.isfile(choc_exe):
- __context__['chocolatey._path'] = choc_exe
- return __context__['chocolatey._path']
+ __context__["chocolatey._path"] = choc_exe
+ return __context__["chocolatey._path"]
# Not installed, raise an error
- err = ('Chocolatey not installed. Use chocolatey.bootstrap to '
- 'install the Chocolatey package manager.')
+ err = (
+ "Chocolatey not installed. Use chocolatey.bootstrap to "
+ "install the Chocolatey package manager."
+ )
raise CommandExecutionError(err)
def chocolatey_version():
- '''
+ """
Returns the version of Chocolatey installed on the minion.
CLI Example:
@@ -128,20 +137,20 @@ def chocolatey_version():
.. code-block:: bash
salt '*' chocolatey.chocolatey_version
- '''
- if 'chocolatey._version' in __context__:
- return __context__['chocolatey._version']
+ """
+ if "chocolatey._version" in __context__:
+ return __context__["chocolatey._version"]
cmd = [_find_chocolatey()]
- cmd.append('-v')
- out = __salt__['cmd.run'](cmd, python_shell=False)
- __context__['chocolatey._version'] = out
+ cmd.append("-v")
+ out = __salt__["cmd.run"](cmd, python_shell=False)
+ __context__["chocolatey._version"] = out
- return __context__['chocolatey._version']
+ return __context__["chocolatey._version"]
def bootstrap(force=False):
- '''
+ """
Download and install the latest version of the Chocolatey package manager
via the official bootstrap.
@@ -162,86 +171,103 @@ def bootstrap(force=False):
salt '*' chocolatey.bootstrap
salt '*' chocolatey.bootstrap force=True
- '''
+ """
# Check if Chocolatey is already present in the path
try:
choc_path = _find_chocolatey()
except CommandExecutionError:
choc_path = None
if choc_path and not force:
- return 'Chocolatey found at {0}'.format(choc_path)
+ return "Chocolatey found at {0}".format(choc_path)
# The following lookup tables are required to determine the correct
# download required to install PowerShell. That's right, there's more
# than one! You're welcome.
ps_downloads = {
- ('Vista', 'x86'): 'http://download.microsoft.com/download/A/7/5/A75BC017-63CE-47D6-8FA4-AFB5C21BAC54/Windows6.0-KB968930-x86.msu',
- ('Vista', 'AMD64'): 'http://download.microsoft.com/download/3/C/8/3C8CF51E-1D9D-4DAA-AAEA-5C48D1CD055C/Windows6.0-KB968930-x64.msu',
- ('2008Server', 'x86'): 'http://download.microsoft.com/download/F/9/E/F9EF6ACB-2BA8-4845-9C10-85FC4A69B207/Windows6.0-KB968930-x86.msu',
- ('2008Server', 'AMD64'): 'http://download.microsoft.com/download/2/8/6/28686477-3242-4E96-9009-30B16BED89AF/Windows6.0-KB968930-x64.msu'
+ (
+ "Vista",
+ "x86",
+ ): "http://download.microsoft.com/download/A/7/5/A75BC017-63CE-47D6-8FA4-AFB5C21BAC54/Windows6.0-KB968930-x86.msu",
+ (
+ "Vista",
+ "AMD64",
+ ): "http://download.microsoft.com/download/3/C/8/3C8CF51E-1D9D-4DAA-AAEA-5C48D1CD055C/Windows6.0-KB968930-x64.msu",
+ (
+ "2008Server",
+ "x86",
+ ): "http://download.microsoft.com/download/F/9/E/F9EF6ACB-2BA8-4845-9C10-85FC4A69B207/Windows6.0-KB968930-x86.msu",
+ (
+ "2008Server",
+ "AMD64",
+ ): "http://download.microsoft.com/download/2/8/6/28686477-3242-4E96-9009-30B16BED89AF/Windows6.0-KB968930-x64.msu",
}
# It took until .NET v4.0 for Microsoft got the hang of making installers,
# this should work under any version of Windows
- net4_url = 'http://download.microsoft.com/download/1/B/E/1BE39E79-7E39-46A3-96FF-047F95396215/dotNetFx40_Full_setup.exe'
+ net4_url = "http://download.microsoft.com/download/1/B/E/1BE39E79-7E39-46A3-96FF-047F95396215/dotNetFx40_Full_setup.exe"
temp_dir = tempfile.gettempdir()
# Check if PowerShell is installed. This should be the case for every
# Windows release following Server 2008.
- ps_path = 'C:\\Windows\\SYSTEM32\\WindowsPowerShell\\v1.0\\powershell.exe'
+ ps_path = "C:\\Windows\\SYSTEM32\\WindowsPowerShell\\v1.0\\powershell.exe"
- if not __salt__['cmd.has_exec'](ps_path):
- if (__grains__['osrelease'], __grains__['cpuarch']) in ps_downloads:
+ if not __salt__["cmd.has_exec"](ps_path):
+ if (__grains__["osrelease"], __grains__["cpuarch"]) in ps_downloads:
# Install the appropriate release of PowerShell v2.0
- url = ps_downloads[(__grains__['osrelease'], __grains__['cpuarch'])]
- dest = os.path.join(temp_dir, 'powershell.exe')
- __salt__['cp.get_url'](url, dest)
- cmd = [dest, '/quiet', '/norestart']
- result = __salt__['cmd.run_all'](cmd, python_shell=False)
- if result['retcode'] != 0:
- err = ('Installing Windows PowerShell failed. Please run the '
- 'installer GUI on the host to get a more specific '
- 'reason.')
+ url = ps_downloads[(__grains__["osrelease"], __grains__["cpuarch"])]
+ dest = os.path.join(temp_dir, "powershell.exe")
+ __salt__["cp.get_url"](url, dest)
+ cmd = [dest, "/quiet", "/norestart"]
+ result = __salt__["cmd.run_all"](cmd, python_shell=False)
+ if result["retcode"] != 0:
+ err = (
+ "Installing Windows PowerShell failed. Please run the "
+ "installer GUI on the host to get a more specific "
+ "reason."
+ )
raise CommandExecutionError(err)
else:
- err = 'Windows PowerShell not found'
+ err = "Windows PowerShell not found"
raise CommandNotFoundError(err)
# Run the .NET Framework 4 web installer
- dest = os.path.join(temp_dir, 'dotnet4.exe')
- __salt__['cp.get_url'](net4_url, dest)
- cmd = [dest, '/q', '/norestart']
- result = __salt__['cmd.run_all'](cmd, python_shell=False)
- if result['retcode'] != 0:
- err = ('Installing .NET v4.0 failed. Please run the installer GUI on '
- 'the host to get a more specific reason.')
+ dest = os.path.join(temp_dir, "dotnet4.exe")
+ __salt__["cp.get_url"](net4_url, dest)
+ cmd = [dest, "/q", "/norestart"]
+ result = __salt__["cmd.run_all"](cmd, python_shell=False)
+ if result["retcode"] != 0:
+ err = (
+ "Installing .NET v4.0 failed. Please run the installer GUI on "
+ "the host to get a more specific reason."
+ )
raise CommandExecutionError(err)
# Run the Chocolatey bootstrap.
cmd = (
- '{0} -NoProfile -ExecutionPolicy unrestricted '
+ "{0} -NoProfile -ExecutionPolicy unrestricted "
'-Command "iex ((new-object net.webclient).'
- 'DownloadString(\'https://chocolatey.org/install.ps1\'))" '
- '&& SET PATH=%PATH%;%systemdrive%\\chocolatey\\bin'
- .format(ps_path)
+ "DownloadString('https://chocolatey.org/install.ps1'))\" "
+ "&& SET PATH=%PATH%;%systemdrive%\\chocolatey\\bin".format(ps_path)
)
- result = __salt__['cmd.run_all'](cmd, python_shell=True)
+ result = __salt__["cmd.run_all"](cmd, python_shell=True)
- if result['retcode'] != 0:
- err = 'Bootstrapping Chocolatey failed: {0}'.format(result['stderr'])
+ if result["retcode"] != 0:
+ err = "Bootstrapping Chocolatey failed: {0}".format(result["stderr"])
raise CommandExecutionError(err)
- return result['stdout']
+ return result["stdout"]
-def list_(narrow=None,
- all_versions=False,
- pre_versions=False,
- source=None,
- local_only=False,
- exact=False):
- '''
+def list_(
+ narrow=None,
+ all_versions=False,
+ pre_versions=False,
+ source=None,
+ local_only=False,
+ exact=False,
+):
+ """
Instructs Chocolatey to pull a vague package list from the repository.
Args:
@@ -279,43 +305,43 @@ def list_(narrow=None,
salt '*' chocolatey.list
salt '*' chocolatey.list all_versions=True
- '''
+ """
choc_path = _find_chocolatey()
- cmd = [choc_path, 'list']
+ cmd = [choc_path, "list"]
if narrow:
cmd.append(narrow)
if salt.utils.data.is_true(all_versions):
- cmd.append('--allversions')
+ cmd.append("--allversions")
if salt.utils.data.is_true(pre_versions):
- cmd.append('--prerelease')
+ cmd.append("--prerelease")
if source:
- cmd.extend(['--source', source])
+ cmd.extend(["--source", source])
if local_only:
- cmd.append('--local-only')
+ cmd.append("--local-only")
if exact:
- cmd.append('--exact')
+ cmd.append("--exact")
# This is needed to parse the output correctly
- cmd.append('--limit-output')
+ cmd.append("--limit-output")
- result = __salt__['cmd.run_all'](cmd, python_shell=False)
+ result = __salt__["cmd.run_all"](cmd, python_shell=False)
# Chocolatey introduced Enhanced Exit Codes starting with version 0.10.12
# Exit Code 2 means there were no results, but is not a failure
# This may start to effect other functions in the future as Chocolatey
# moves more functions to this new paradigm
# https://github.com/chocolatey/choco/issues/1758
- if result['retcode'] not in [0, 2]:
- err = 'Running chocolatey failed: {0}'.format(result['stdout'])
+ if result["retcode"] not in [0, 2]:
+ err = "Running chocolatey failed: {0}".format(result["stdout"])
raise CommandExecutionError(err)
ret = CaseInsensitiveDict({})
- pkg_re = re.compile(r'(\S+)\|(\S+)')
- for line in result['stdout'].split('\n'):
+ pkg_re = re.compile(r"(\S+)\|(\S+)")
+ for line in result["stdout"].split("\n"):
if line.startswith("No packages"):
return ret
for name, ver in pkg_re.findall(line):
- if 'chocolatey' in name:
+ if "chocolatey" in name:
continue
if name not in ret:
ret[name] = []
@@ -325,7 +351,7 @@ def list_(narrow=None,
def list_webpi():
- '''
+ """
Instructs Chocolatey to pull a full package list from the Microsoft Web PI
repository.
@@ -337,20 +363,20 @@ def list_webpi():
.. code-block:: bash
salt '*' chocolatey.list_webpi
- '''
+ """
choc_path = _find_chocolatey()
- cmd = [choc_path, 'list', '--source', 'webpi']
- result = __salt__['cmd.run_all'](cmd, python_shell=False)
+ cmd = [choc_path, "list", "--source", "webpi"]
+ result = __salt__["cmd.run_all"](cmd, python_shell=False)
- if result['retcode'] != 0:
- err = 'Running chocolatey failed: {0}'.format(result['stdout'])
+ if result["retcode"] != 0:
+ err = "Running chocolatey failed: {0}".format(result["stdout"])
raise CommandExecutionError(err)
- return result['stdout']
+ return result["stdout"]
def list_windowsfeatures():
- '''
+ """
Instructs Chocolatey to pull a full package list from the Windows Features
list, via the Deployment Image Servicing and Management tool.
@@ -362,30 +388,32 @@ def list_windowsfeatures():
.. code-block:: bash
salt '*' chocolatey.list_windowsfeatures
- '''
+ """
choc_path = _find_chocolatey()
- cmd = [choc_path, 'list', '--source', 'windowsfeatures']
- result = __salt__['cmd.run_all'](cmd, python_shell=False)
+ cmd = [choc_path, "list", "--source", "windowsfeatures"]
+ result = __salt__["cmd.run_all"](cmd, python_shell=False)
- if result['retcode'] != 0:
- err = 'Running chocolatey failed: {0}'.format(result['stdout'])
+ if result["retcode"] != 0:
+ err = "Running chocolatey failed: {0}".format(result["stdout"])
raise CommandExecutionError(err)
- return result['stdout']
+ return result["stdout"]
-def install(name,
- version=None,
- source=None,
- force=False,
- pre_versions=False,
- install_args=None,
- override_args=False,
- force_x86=False,
- package_args=None,
- allow_multiple=False,
- execution_timeout=None):
- '''
+def install(
+ name,
+ version=None,
+ source=None,
+ force=False,
+ pre_versions=False,
+ install_args=None,
+ override_args=False,
+ force_x86=False,
+ package_args=None,
+ allow_multiple=False,
+ execution_timeout=None,
+):
+ """
Instructs Chocolatey to install a package.
Args:
@@ -455,54 +483,55 @@ def install(name,
salt '*' chocolatey.install
salt '*' chocolatey.install version=
salt '*' chocolatey.install install_args= override_args=True
- '''
+ """
if force and allow_multiple:
raise SaltInvocationError(
- 'Cannot use \'force\' in conjunction with \'allow_multiple\'')
+ "Cannot use 'force' in conjunction with 'allow_multiple'"
+ )
choc_path = _find_chocolatey()
# chocolatey helpfully only supports a single package argument
# CORRECTION: it also supports multiple package names separated by spaces
# but any additional arguments apply to ALL packages specified
- cmd = [choc_path, 'install', name]
+ cmd = [choc_path, "install", name]
if version:
- cmd.extend(['--version', version])
+ cmd.extend(["--version", version])
if source:
- cmd.extend(['--source', source])
+ cmd.extend(["--source", source])
if salt.utils.data.is_true(force):
- cmd.append('--force')
+ cmd.append("--force")
if salt.utils.data.is_true(pre_versions):
- cmd.append('--prerelease')
+ cmd.append("--prerelease")
if install_args:
- cmd.extend(['--installarguments', install_args])
+ cmd.extend(["--installarguments", install_args])
if override_args:
- cmd.append('--overridearguments')
+ cmd.append("--overridearguments")
if force_x86:
- cmd.append('--forcex86')
+ cmd.append("--forcex86")
if package_args:
- cmd.extend(['--packageparameters', package_args])
+ cmd.extend(["--packageparameters", package_args])
if allow_multiple:
- cmd.append('--allow-multiple')
+ cmd.append("--allow-multiple")
if execution_timeout:
- cmd.extend(['--execution-timeout', execution_timeout])
+ cmd.extend(["--execution-timeout", execution_timeout])
# Salt doesn't need to see the progress
cmd.extend(_no_progress())
cmd.extend(_yes())
- result = __salt__['cmd.run_all'](cmd, python_shell=False)
+ result = __salt__["cmd.run_all"](cmd, python_shell=False)
- if result['retcode'] not in [0, 1641, 3010]:
- err = 'Running chocolatey failed: {0}'.format(result['stdout'])
+ if result["retcode"] not in [0, 1641, 3010]:
+ err = "Running chocolatey failed: {0}".format(result["stdout"])
raise CommandExecutionError(err)
- if name == 'chocolatey':
+ if name == "chocolatey":
_clear_context()
- return result['stdout']
+ return result["stdout"]
def install_cygwin(name, install_args=None, override_args=False):
- '''
+ """
Instructs Chocolatey to install a package via Cygwin.
name
@@ -524,15 +553,14 @@ def install_cygwin(name, install_args=None, override_args=False):
salt '*' chocolatey.install_cygwin
salt '*' chocolatey.install_cygwin install_args=