Merge pull request #52037 from Ch3LL/bp-51201

Backport #51201 into 2019.2
This commit is contained in:
Daniel Wozniak 2019-03-07 12:45:16 -07:00 committed by GitHub
commit 96935c989e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
38 changed files with 198 additions and 277 deletions

View file

@ -625,11 +625,7 @@ class Cloud(object):
for driver, details in six.iteritems(drivers):
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error(
'Public cloud provider {0} is not available'.format(
driver
)
)
log.error('Public cloud provider %s is not available', driver)
continue
if alias not in pmap:
pmap[alias] = {}
@ -642,10 +638,8 @@ class Cloud(object):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'{0}()\' while querying for '
'running nodes: {1}'.format(fun, err),
# Show the traceback if the debug logging level is
# enabled
'Failed to execute \'%s()\' while querying for '
'running nodes: %s', fun, err,
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
@ -681,11 +675,7 @@ class Cloud(object):
fun = '{0}.{1}'.format(driver, this_query)
if fun not in self.clouds:
log.error(
'Public cloud provider {0} is not available'.format(
driver
)
)
log.error('Public cloud provider %s is not available', driver)
continue
multiprocessing_data.append({
@ -779,11 +769,7 @@ class Cloud(object):
for driver, providers_data in six.iteritems(provider_by_driver):
fun = '{0}.optimize_providers'.format(driver)
if fun not in self.clouds:
log.debug(
'The \'{0}\' cloud driver is unable to be optimized.'.format(
driver
)
)
log.debug('The \'%s\' cloud driver is unable to be optimized.', driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
@ -816,10 +802,9 @@ class Cloud(object):
# The capability to gather locations is not supported by this
# cloud module
log.debug(
'The \'{0}\' cloud driver defined under \'{1}\' provider '
'alias is unable to get the locations information'.format(
driver, alias
)
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the locations information',
driver, alias
)
continue
@ -835,11 +820,8 @@ class Cloud(object):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'{0}()\': {1}'.format(
fun, err
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
@ -859,11 +841,9 @@ class Cloud(object):
# The capability to gather images is not supported by this
# cloud module
log.debug(
'The \'{0}\' cloud driver defined under \'{1}\' provider '
'alias is unable to get the images information'.format(
driver,
alias
)
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the images information',
driver, alias
)
continue
@ -878,11 +858,8 @@ class Cloud(object):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'{0}()\': {1}'.format(
fun, err
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
@ -902,11 +879,9 @@ class Cloud(object):
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
'The \'{0}\' cloud driver defined under \'{1}\' provider '
'alias is unable to get the sizes information'.format(
driver,
alias
)
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the sizes information',
driver, alias
)
continue
@ -921,11 +896,8 @@ class Cloud(object):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'{0}()\': {1}'.format(
fun, err
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
@ -998,7 +970,7 @@ class Cloud(object):
})
# destroying in parallel
if self.opts['parallel'] and len(parallel_data) > 0:
if self.opts['parallel'] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if 'pool_size' in self.opts:
@ -1006,7 +978,7 @@ class Cloud(object):
else:
pool_size = len(parallel_data)
log.info('Destroying in parallel mode; '
'Cloud pool size: {0}'.format(pool_size))
'Cloud pool size: %s', pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
@ -1192,11 +1164,9 @@ class Cloud(object):
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'{0[name]}\' using \'{0[provider]}\' as the provider '
'cannot complete since \'{1}\' is not available'.format(
vm_,
driver
)
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
@ -1210,13 +1180,12 @@ class Cloud(object):
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'{0}\' VM settings.'.format(
vm_['name']
)
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'{0[name]}\''.format(vm_))
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
@ -1242,11 +1211,7 @@ class Cloud(object):
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug(
'Generating the master keys for \'{0[name]}\''.format(
vm_
)
)
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
@ -1322,10 +1287,8 @@ class Cloud(object):
break
except KeyError as exc:
log.exception(
'Failed to create VM {0}. Configuration value {1} needs '
'to be set'.format(
vm_['name'], exc
)
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
@ -1334,11 +1297,7 @@ class Cloud(object):
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info(
'Running {0} on {1}'.format(
self.opts['start_action'], vm_['name']
)
)
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
@ -1376,11 +1335,9 @@ class Cloud(object):
fun = '{0}.{1}'.format(driver, extra_['action'])
if fun not in self.clouds:
log.error(
'Creating \'{0[name]}\' using \'{0[provider]}\' as the provider '
'cannot complete since \'{1}\' is not available'.format(
extra_,
driver
)
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
extra_['name'], extra_['provider'], driver
)
return
@ -1392,11 +1349,9 @@ class Cloud(object):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
(
'Failed to perform {0[provider]}.{0[action]} '
'on {0[name]}. '
'Configuration value {1} needs to be set'
).format(extra_, exc)
'Failed to perform %s.%s on %s. '
'Configuration value %s needs to be set',
extra_['provider'], extra_['action'], extra_['name'], exc
)
return output
@ -1445,7 +1400,7 @@ class Cloud(object):
if name in vms:
prov = vms[name]['provider']
driv = vms[name]['driver']
msg = u'{0} already exists under {1}:{2}'.format(
msg = '{0} already exists under {1}:{2}'.format(
name, prov, driv
)
log.error(msg)
@ -1506,11 +1461,7 @@ class Cloud(object):
valid_function = True
fun = '{0}.{1}'.format(driver, self.opts['action'])
if fun not in self.clouds:
log.info(
'\'{0}()\' is not available. Not actioning...'.format(
fun
)
)
log.info('\'%s()\' is not available. Not actioning...', fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
@ -1613,9 +1564,8 @@ class Cloud(object):
)
log.debug(
'Trying to execute \'{0}\' with the following kwargs: {1}'.format(
fun, kwargs
)
'Trying to execute \'%s\' with the following kwargs: %s',
fun, kwargs
)
with salt.utils.context.func_globals_inject(
@ -1646,16 +1596,15 @@ class Cloud(object):
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
'The cloud driver, \'{0}\', configured under the '
'\'{1}\' cloud provider alias, could not be loaded. '
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias, could not be loaded. '
'Please check your provider configuration files and '
'ensure all required dependencies are installed '
'for the \'{0}\' driver.\n'
'In rare cases, this could indicate the \'{2}()\' '
'function could not be found.\nRemoving \'{0}\' from '
'the available providers list'.format(
driver, alias, fun
)
'for the \'%s\' driver.\n'
'In rare cases, this could indicate the \'%s()\' '
'function could not be found.\nRemoving \'%s\' from '
'the available providers list',
driver, alias, driver, fun, driver
)
self.opts['providers'][alias].pop(driver)
@ -1672,10 +1621,10 @@ class Cloud(object):
):
if self.clouds[fun]() is False:
log.warning(
'The cloud driver, \'{0}\', configured under the '
'\'{1}\' cloud provider alias is not properly '
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias is not properly '
'configured. Removing it from the available '
'providers list.'.format(driver, alias)
'providers list.', driver, alias
)
self.opts['providers'][alias].pop(driver)
@ -1773,8 +1722,9 @@ class Map(Cloud):
state_action = matching_states[action]
except KeyError:
log.error(
'The use of \'{0}\' as an action is not supported in this context. '
'Only \'start\', \'stop\', and \'reboot\' are supported options.'.format(action)
'The use of \'%s\' as an action is not supported '
'in this context. Only \'start\', \'stop\', and '
'\'reboot\' are supported options.', action
)
raise SaltCloudException()
if vm_details != 'Absent' and vm_details['state'].lower() in state_action:
@ -1792,8 +1742,8 @@ class Map(Cloud):
pass
elif self.opts.get('map_pillar') not in self.opts.get('maps'):
log.error(
'The specified map not found in pillar at \'cloud:maps:{0}\''.format(
self.opts['map_pillar'])
'The specified map not found in pillar at '
'\'cloud:maps:%s\'', self.opts['map_pillar']
)
raise SaltCloudNotFound()
else:
@ -1811,8 +1761,8 @@ class Map(Cloud):
if not os.path.isfile(self.opts['map']):
if not (self.opts['map']).startswith('salt://'):
log.error(
'The specified map file does not exist: \'{0}\''.format(
self.opts['map'])
'The specified map file does not exist: \'%s\'',
self.opts['map']
)
raise SaltCloudNotFound()
if (self.opts['map']).startswith('salt://'):
@ -1829,9 +1779,8 @@ class Map(Cloud):
)
except Exception as exc:
log.error(
'Rendering map {0} failed, render error:\n{1}'.format(
self.opts['map'], exc
),
'Rendering map %s failed, render error:\n%s',
self.opts['map'], exc,
exc_info_on_loglevel=logging.DEBUG
)
return {}
@ -1995,13 +1944,12 @@ class Map(Cloud):
deprecated = 'map_{0}'.format(setting)
if deprecated in overrides:
log.warning(
'The use of \'{0}\' on the \'{1}\' mapping has '
'The use of \'%s\' on the \'%s\' mapping has '
'been deprecated. The preferred way now is to '
'just define \'{2}\'. For now, salt-cloud will do '
'just define \'%s\'. For now, salt-cloud will do '
'the proper thing and convert the deprecated '
'mapping into the preferred one.'.format(
deprecated, nodename, setting
)
'mapping into the preferred one.',
deprecated, nodename, setting
)
overrides[setting] = overrides.pop(deprecated)
@ -2015,7 +1963,7 @@ class Map(Cloud):
)
del overrides['minion']['grains']
# remove minion key if now is empty dict
if len(overrides['minion']) == 0:
if not overrides['minion']:
del overrides['minion']
nodedata = salt.utils.dictupdate.update(nodedata, overrides)
@ -2052,8 +2000,8 @@ class Map(Cloud):
# Machine already removed
break
log.warning("'{0}' already exists, removing from "
'the create map.'.format(name))
log.warning("'%s' already exists, removing from "
'the create map.', name)
if 'existing' not in ret:
ret['existing'] = {}
@ -2082,10 +2030,10 @@ class Map(Cloud):
raise SaltCloudException(msg)
# Go through the create list and calc dependencies
for key, val in six.iteritems(dmap['create']):
log.info('Calculating dependencies for {0}'.format(key))
log.info('Calculating dependencies for %s', key)
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug('Got execution order {0} for {1}'.format(level, key))
log.debug('Got execution order %s for %s', level, key)
dmap['create'][key]['level'] = level
try:
@ -2094,10 +2042,10 @@ class Map(Cloud):
existing_list = six.iteritems({})
for key, val in existing_list:
log.info('Calculating dependencies for {0}'.format(key))
log.info('Calculating dependencies for %s', key)
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug('Got execution order {0} for {1}'.format(level, key))
log.debug('Got execution order %s for %s', level, key)
dmap['existing'][key]['level'] = level
# Now sort the create list based on dependencies
@ -2115,7 +2063,7 @@ class Map(Cloud):
if profile.get('make_master', False) is True
))
master_minion_name = master_name
log.debug('Creating new master \'{0}\''.format(master_name))
log.debug('Creating new master \'%s\'', master_name)
if salt.config.get_cloud_config_value(
'deploy',
master_profile,
@ -2127,9 +2075,7 @@ class Map(Cloud):
)
# Generate the master keys
log.debug(
'Generating master keys for \'{0[name]}\''.format(master_profile)
)
log.debug('Generating master keys for \'%s\'', master_profile['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
@ -2166,9 +2112,7 @@ class Map(Cloud):
if make_minion is False:
continue
log.debug(
'Generating minion keys for \'{0[name]}\''.format(profile)
)
log.debug('Generating minion keys for \'%s\'', profile['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
@ -2282,23 +2226,20 @@ class Map(Cloud):
output[name].pop('deploy_kwargs', None)
except SaltCloudException as exc:
log.error(
'Failed to deploy \'{0}\'. Error: {1}'.format(
name, exc
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
'Failed to deploy \'%s\'. Error: %s',
name, exc, exc_info_on_loglevel=logging.DEBUG
)
output[name] = {'Error': str(exc)}
for name in dmap.get('destroy', ()):
output[name] = self.destroy(name)
if self.opts['parallel'] and len(parallel_data) > 0:
if self.opts['parallel'] and parallel_data:
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Cloud pool size: {0}'.format(pool_size))
log.info('Cloud pool size: %s', pool_size)
output_multip = enter_mainloop(
_create_multiprocessing, parallel_data, pool_size=pool_size)
# We have deployed in parallel, now do start action in
@ -2314,11 +2255,7 @@ class Map(Cloud):
out = {}
for group in actionlist:
log.info(
'Running {0} on {1}'.format(
self.opts['start_action'], ', '.join(group)
)
)
log.info('Running %s on %s', self.opts['start_action'], ', '.join(group))
client = salt.client.get_local_client()
out.update(client.cmd(
','.join(group), self.opts['start_action'],
@ -2358,11 +2295,8 @@ def create_multiprocessing(parallel_data, queue=None):
)
except SaltCloudException as exc:
log.error(
'Failed to deploy \'{0[name]}\'. Error: {1}'.format(
parallel_data, exc
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
'Failed to deploy \'%s\'. Error: %s',
parallel_data['name'], exc, exc_info_on_loglevel=logging.DEBUG
)
return {parallel_data['name']: {'Error': str(exc)}}
@ -2397,11 +2331,8 @@ def destroy_multiprocessing(parallel_data, queue=None):
except SaltCloudException as exc:
log.error(
'Failed to destroy {0}. Error: {1}'.format(
parallel_data['name'], exc
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
'Failed to destroy %s. Error: %s',
parallel_data['name'], exc, exc_info_on_loglevel=logging.DEBUG
)
return {parallel_data['name']: {'Error': str(exc)}}
@ -2435,11 +2366,8 @@ def run_parallel_map_providers_query(data, queue=None):
)
except Exception as err:
log.debug(
'Failed to execute \'{0}()\' while querying for running '
'nodes: {1}'.format(data['fun'], err),
# Show the traceback if the debug logging level is
# enabled
exc_info_on_loglevel=logging.DEBUG
'Failed to execute \'%s()\' while querying for running nodes: %s',
data['fun'], err, exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any nodes
return data['alias'], data['driver'], ()

View file

@ -292,7 +292,7 @@ class SaltCloud(salt.utils.parsers.SaltCloudParser):
salt.utils.cloud.store_password_in_keyring(provider_name, username)
elif self.config.get('map', None) and \
self.selected_query_option is None:
if len(mapper.rendered_map) == 0:
if not mapper.rendered_map:
sys.stderr.write('No nodes defined in this map')
self.exit(salt.defaults.exitcodes.EX_GENERIC)
try:

View file

@ -360,7 +360,7 @@ def list_nodes_full(call=None):
ret[name]['private_ips'] = items[item]['IpAddress']
if item == 'VpcAttributes':
vpc_ips = items[item]['PrivateIpAddress']['IpAddress']
if len(vpc_ips) > 0:
if vpc_ips:
ret[name]['private_ips'] = vpc_ips
ret[name][item] = value
@ -709,9 +709,9 @@ def create(vm_):
finally:
raise SaltCloudSystemExit(six.text_type(exc))
if len(data['public_ips']) > 0:
if data['public_ips']:
ssh_ip = data['public_ips'][0]
elif len(data['private_ips']) > 0:
elif data['private_ips']:
ssh_ip = data['private_ips'][0]
else:
log.info('No available ip:cant connect to salt')
@ -975,7 +975,7 @@ def show_image(kwargs, call=None):
# DescribeImages so far support input multi-image. And
# if not found certain image, the response will include
# blank image list other than 'not found' error message
if 'Code' in items or len(items['Images']['Image']) == 0:
if 'Code' in items or not items['Images']['Image']:
raise SaltCloudNotFound('The specified image could not be found.')
log.debug(

View file

@ -852,7 +852,7 @@ def create_network_interface(call=None, kwargs=None):
]
break
except CloudError as exc:
log.error('There was a cloud error: {0}'.format(exc))
log.error('There was a cloud error: %s', exc)
count += 1
if count > 120:
raise ValueError('Timed out waiting for public IP Address.')
@ -1412,9 +1412,9 @@ def create(vm_):
Query node data.
'''
data = show_instance(name, call='action')
ip_address = None
if len(data.keys()) == 0:
if not data:
return False
ip_address = None
if bootstrap_interface == 'public':
ip_address = data['public_ips'][0]
if bootstrap_interface == 'private':

View file

@ -288,12 +288,14 @@ def create(vm_):
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
# pylint: disable=not-callable
kwargs = {
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_),
'location': get_location(conn, vm_),
}
# pylint: enable=not-callable
sg = get_security_groups(conn, vm_)
if sg is not False:

View file

@ -139,7 +139,7 @@ def get_dependencies():
def _query_node_data(vm_, data):
running = False
try:
node = show_instance(vm_['name'], 'action')
node = show_instance(vm_['name'], 'action') # pylint: disable=not-callable
running = (node['state'] == NodeState.RUNNING)
log.debug('Loaded node data for %s:\nname: %s\nstate: %s',
vm_['name'], pprint.pformat(node['name']), node['state'])
@ -292,7 +292,7 @@ def create(vm_):
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
destroy(vm_['name']) # pylint: disable=not-callable
except SaltCloudSystemExit:
pass
finally:
@ -386,7 +386,7 @@ def create_lb(kwargs=None, call=None):
for member in membersList:
try:
log.debug('Member: %s', member)
node = get_node(conn, member)
node = get_node(conn, member) # pylint: disable=not-callable
log.debug('Node: %s', node)
ip = node.private_ips[0]
except Exception as err:
@ -494,7 +494,7 @@ def stop(name, call=None):
salt-cloud -a stop vm_name
'''
conn = get_conn()
node = get_node(conn, name)
node = get_node(conn, name) # pylint: disable=not-callable
log.debug('Node of Cloud VM: %s', node)
status = conn.ex_shutdown_graceful(node)
@ -518,7 +518,7 @@ def start(name, call=None):
'''
conn = get_conn()
node = get_node(conn, name)
node = get_node(conn, name) # pylint: disable=not-callable
log.debug('Node of Cloud VM: %s', node)
status = conn.ex_start_node(node)

View file

@ -2644,7 +2644,7 @@ def create(vm_=None, call=None):
vm_['instance_id_list'].append(instance['instanceId'])
vm_['instance_id'] = vm_['instance_id_list'].pop()
if len(vm_['instance_id_list']) > 0:
if vm_['instance_id_list']:
# Multiple instances were spun up, get one now, and queue the rest
queue_instances(vm_['instance_id_list'])

View file

@ -454,7 +454,7 @@ def __get_host(node, vm_):
ip_address = node.public_ips[0]
log.info('Salt node data. Public_ip: %s', ip_address)
if len(ip_address) > 0:
if ip_address:
return ip_address
return node.name
@ -550,7 +550,7 @@ def _parse_allow(allow):
seen_protos[pairs[0]].append(pairs[1])
for k in seen_protos:
d = {'IPProtocol': k}
if len(seen_protos[k]) > 0:
if seen_protos[k]:
d['ports'] = seen_protos[k]
allow_dict.append(d)
log.debug("firewall allowed protocols/ports: %s", allow_dict)
@ -1274,7 +1274,7 @@ def create_address(kwargs=None, call=None):
transport=__opts__['transport']
)
log.info('Created GCE Address '+name)
log.info('Created GCE Address %s', name)
return _expand_address(addy)
@ -1344,7 +1344,7 @@ def delete_address(kwargs=None, call=None):
transport=__opts__['transport']
)
log.info('Deleted GCE Address ' + name)
log.info('Deleted GCE Address %s', name)
return result

View file

@ -111,9 +111,9 @@ def create(vm_):
host_ip = vm_['assign_public_ip']
else:
public_ips = list_public_ips()
if len(public_ips.keys()) < 1:
if not public_ips:
raise SaltCloudException('No more IPs available')
host_ip = list(public_ips)[0]
host_ip = next(iter(public_ips))
create_kwargs = {
'name': vm_['name'],

View file

@ -1037,9 +1037,12 @@ def _decode_linode_plan_label(label):
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning('An outdated Linode plan label was detected in your Cloud Profile ({}).'
' Please update the profile to use'
' the new label format ({}) for the requested Linode plan size.'.format(label, new_label))
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
@ -1557,7 +1560,7 @@ def _query(action=None,
)
if 'ERRORARRAY' in result['dict']:
if len(result['dict']['ERRORARRAY']):
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:

View file

@ -398,11 +398,11 @@ def list_nodes(conn=None, call=None):
size = node.get('hardware').get('fixed_instance_size_id', 'Custom size')
if node.get('private_networks') and len(node['private_networks']) > 0:
if node.get('private_networks'):
for private_ip in node['private_networks']:
private_ips.append(private_ip)
if node.get('ips') and len(node['ips']) > 0:
if node.get('ips'):
for public_ip in node['ips']:
public_ips.append(public_ip['ip'])

View file

@ -708,9 +708,9 @@ def create(vm_):
data = show_instance(vm_['name'], conn=conn, call='action')
if 'wait_for_metadata' in vm_:
for key, value in six.iteritems(vm_.get('wait_for_metadata', {})):
log.debug('Waiting for metadata: {0}={1}'.format(key, value))
log.debug('Waiting for metadata: %s=%s', key, value)
if data['metadata'].get(key, None) != value:
log.debug('Metadata is not ready: {0}={1}'.format(key, data['metadata'].get(key, None)))
log.debug('Metadata is not ready: %s=%s', key, data['metadata'].get(key))
return False
return preferred_ip(vm_, data[ssh_interface(vm_)])
try:

View file

@ -372,8 +372,7 @@ def get_datacenter_id():
try:
conn.get_datacenter(datacenter_id=datacenter_id)
except PBNotFoundError:
log.error('Failed to get datacenter: {0}'.format(
datacenter_id))
log.error('Failed to get datacenter: %s', datacenter_id)
raise
return datacenter_id
@ -607,7 +606,7 @@ def list_nodes_full(conn=None, call=None):
node['private_ips'] = []
if item['entities']['nics']['items'] > 0:
for nic in item['entities']['nics']['items']:
if len(nic['properties']['ips']) > 0:
if nic['properties']['ips']:
pass
ip_address = nic['properties']['ips'][0]
if salt.utils.cloud.is_public_ip(ip_address):
@ -877,11 +876,8 @@ def create(vm_):
return False
except Exception as exc: # pylint: disable=W0703
log.error(
'Error creating {0} \n\n'
'Error: \n{1}'.format(
vm_['name'], exc
),
exc_info_on_loglevel=logging.DEBUG
'Error creating %s \n\nError: \n%s',
vm_['name'], exc, exc_info_on_loglevel=logging.DEBUG
)
return False
@ -1008,12 +1004,12 @@ def destroy(name, call=None):
# The server is deleted and now is safe to delete the volumes
if delete_volumes:
for vol in attached_volumes['items']:
log.debug('Deleting volume {0}'.format(vol['id']))
log.debug('Deleting volume %s', vol['id'])
conn.delete_volume(
datacenter_id=datacenter_id,
volume_id=vol['id']
)
log.debug('Deleted volume {0}'.format(vol['id']))
log.debug('Deleted volume %s', vol['id'])
__utils__['cloud.fire_event'](
'event',

View file

@ -356,7 +356,7 @@ def show_image(kwargs, call=None):
items = query(params=params)
if len(items['image_set']) == 0:
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}

View file

@ -487,7 +487,7 @@ def create(vm_):
for node in node_info:
if node['id'] == response['id'] and \
'passwords' in node['operatingSystem'] and \
len(node['operatingSystem']['passwords']) > 0:
node['operatingSystem']['passwords']:
return node['operatingSystem']['passwords'][0]['username'], node['operatingSystem']['passwords'][0]['password']
time.sleep(5)
return False

View file

@ -390,7 +390,7 @@ def create(vm_):
for node in node_info:
if node['id'] == response['id'] \
and 'passwords' in node['operatingSystem'] \
and len(node['operatingSystem']['passwords']) > 0:
and node['operatingSystem']['passwords']:
return node['operatingSystem']['passwords'][0]['password']
time.sleep(5)
return False

View file

@ -204,7 +204,7 @@ def create(vm_info):
vb_start_vm(vm_name, timeout=boot_timeout)
ips = vb_wait_for_network_address(wait_for_ip_timeout, machine_name=vm_name, wait_for_pattern=wait_for_pattern)
if len(ips):
if ips:
ip = ips[interface_index]
log.info("[ %s ] IPv4 is: %s", vm_name, ip)
# ssh or smb using ip and install salt only if deploy is True

View file

@ -3043,7 +3043,7 @@ def create_datacenter(kwargs=None, call=None):
'You must specify name of the new datacenter to be created.'
)
if len(datacenter_name) >= 80 or len(datacenter_name) <= 0:
if not datacenter_name or len(datacenter_name) >= 80:
raise SaltCloudSystemExit(
'The datacenter name must be a non empty string of less than 80 characters.'
)
@ -4383,7 +4383,7 @@ def create_datastore_cluster(kwargs=None, call=None):
'You must specify name of the new datastore cluster to be created.'
)
if len(datastore_cluster_name) >= 80 or len(datastore_cluster_name) <= 0:
if not datastore_cluster_name or len(datastore_cluster_name) >= 80:
raise SaltCloudSystemExit(
'The datastore cluster name must be a non empty string of less than 80 characters.'
)

View file

@ -238,7 +238,7 @@ def get_vm_ip(name=None, session=None, call=None):
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if len(session.xenapi.VIF.get_ipv4_addresses(vif)) != 0:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
@ -520,7 +520,7 @@ def _determine_resource_pool(session, vm_):
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if len(pool) <= 0:
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
@ -1023,7 +1023,7 @@ def destroy(name=None, call=None):
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: ' + record['power_state'])
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
@ -1307,7 +1307,7 @@ def get_pv_args(name, session=None, call=None):
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if len(pv_args) > 0:
if pv_args:
return pv_args
return None

View file

@ -2312,14 +2312,15 @@ def include_config(include, orig_path, verbose, exit_on_config_errors=False):
# Catch situation where user typos path in configuration; also warns
# for empty include directory (which might be by design)
if len(glob.glob(path)) == 0:
glob_matches = glob.glob(path)
if not glob_matches:
if verbose:
log.warning(
'Warning parsing configuration file: "include" path/glob '
"'%s' matches no files", path
)
for fn_ in sorted(glob.glob(path)):
for fn_ in sorted(glob_matches):
log.debug('Including configuration from \'%s\'', fn_)
try:
opts = _read_conf_file(fn_)

View file

@ -155,14 +155,14 @@ def extract_masters(opts, masters='master', port=None, raise_if_empty=True):
if not master_port:
emsg = "Invalid or missing opts['master_port']."
log.error(emsg + '\n')
log.error(emsg)
raise ValueError(emsg)
entries = opts.get(masters, [])
if not entries:
emsg = "Invalid or missing opts['{0}'].".format(masters)
log.error(emsg + '\n')
log.error(emsg)
if raise_if_empty:
raise ValueError(emsg)

View file

@ -43,9 +43,8 @@ import os
try:
import hypchat
HAS_HYPCHAT = True
except ImportError:
HAS_HYPCHAT = False
hypchat = None
import salt.utils.args
import salt.utils.event
@ -59,17 +58,19 @@ import salt.loader
import salt.output
from salt.ext import six
def __virtual__():
return HAS_HYPCHAT
log = logging.getLogger(__name__)
_DEFAULT_API_URL = 'https://api.hipchat.com'
_DEFAULT_SLEEP = 5
_DEFAULT_MAX_ROOMS = 1000
__virtualname__ = 'hipchat'
def __virtual__():
return __virtualname__ if hypchat is not None \
else (False, 'hypchat is not installed')
def _publish_file(token, room, filepath, message='', outputter=None, api_url=None):
'''

View file

@ -106,7 +106,7 @@ def start(url, funs=None, tags=None):
event = event_bus.get_event(full=True)
if event:
publish = True
if isinstance(tags, list) and len(tags) > 0:
if tags and isinstance(tags, list):
found_match = False
for tag in tags:
if fnmatch.fnmatch(event['tag'], tag):

View file

@ -33,6 +33,8 @@ try:
except ImportError:
logstash = None
log = logging.getLogger(__name__)
__virtualname__ = 'logstash'
@ -42,9 +44,6 @@ def __virtual__():
else (False, 'python-logstash not installed')
log = logging.getLogger(__name__)
def start(host, port=5959, tag='salt/engine/logstash', proto='udp'):
'''
Listen to salt events and forward them to logstash

View file

@ -37,19 +37,20 @@ from salt.ext.six.moves import zip
# Import third party libs
try:
import redis
HAS_REDIS = True
except ImportError:
HAS_REDIS = False
redis = None
log = logging.getLogger(__name__)
__virtualname__ = 'redis'
log = logging.getLogger(__name__)
def __virtual__():
if not HAS_REDIS:
return False
else:
return True
log = logging.getLogger(__name__)
return __virtualname__ \
if redis is not None \
else (False, 'redis python module is not installed')
class Listener(object):

View file

@ -727,7 +727,7 @@ class SlackClient(object):
if count > 10:
log.warning('Breaking in getting messages because count is exceeded')
break
if len(msg) == 0:
if not msg:
count += 1
log.warning('Skipping an empty message.')
continue # This one is a dud, get the next message

View file

@ -87,7 +87,7 @@ def start(interval=3600, expire=604800):
if now - expire > seen:
stale_keys.append(m)
if len(stale_keys):
if stale_keys:
for k in stale_keys:
log.info('Removing stale key for %s', k)
wheel.cmd('key.delete', stale_keys)

View file

@ -43,7 +43,7 @@ class SaltException(Exception):
import salt.utils.stringutils
if not isinstance(message, six.string_types):
message = six.text_type(message)
if six.PY3 or isinstance(message, unicode): # pylint: disable=incompatible-py3-code
if six.PY3 or isinstance(message, unicode): # pylint: disable=incompatible-py3-code,undefined-variable
super(SaltException, self).__init__(
salt.utils.stringutils.to_str(message)
)
@ -57,7 +57,7 @@ class SaltException(Exception):
# a str version, and convert the passed value to unicode for the
# message/strerror attributes.
super(SaltException, self).__init__(str(message)) # future lint: blacklisted-function
self.message = self.strerror = unicode(message) # pylint: disable=incompatible-py3-code
self.message = self.strerror = unicode(message) # pylint: disable=incompatible-py3-code,undefined-variable
def __unicode__(self):
return self.strerror

View file

@ -875,7 +875,7 @@ class PillarClient(Client):
):
# Don't walk any directories that match file_ignore_regex or glob
dirs[:] = [d for d in dirs if not salt.fileserver.is_file_ignored(self.opts, d)]
if len(dirs) == 0 and len(files) == 0:
if not dirs and not files:
ret.append(salt.utils.data.decode(os.path.relpath(root, path)))
return ret

View file

@ -458,7 +458,7 @@ def _refresh_buckets_cache_file(cache_file):
bucket_files_list.append(bucket_files)
# check to see if we added any keys, otherwise investigate possible error conditions
if len(bucket_files[bucket_name]) == 0:
if not bucket_files[bucket_name]:
meta_response = {}
for k in s3_meta:
if 'Code' in k or 'Message' in k:
@ -497,7 +497,7 @@ def _refresh_buckets_cache_file(cache_file):
files = [k for k in s3_meta if 'Key' in k]
# check to see if we added any keys, otherwise investigate possible error conditions
if len(files) == 0:
if not files:
meta_response = {}
for k in s3_meta:
if 'Code' in k or 'Message' in k:

View file

@ -767,7 +767,7 @@ def _file_lists(load, form):
dir_rel_fn = os.path.join(repo['mountpoint'], relpath)
if relpath != '.':
ret['dirs'].add(dir_rel_fn)
if len(dirs) == 0 and len(files) == 0:
if not dirs and not files:
ret['empty_dirs'].add(dir_rel_fn)
for fname in files:
rel_fn = os.path.relpath(

View file

@ -1157,8 +1157,7 @@ def _clean_value(key, val):
NOTE: This logic also exists in the smbios module. This function is
for use when not using smbios to retrieve the value.
'''
if (val is None or
not len(val) or
if (val is None or not val or
re.match('none', val, flags=re.IGNORECASE)):
return None
elif 'uuid' in key:

View file

@ -377,8 +377,8 @@ class Key(object):
self.opts = opts
kind = self.opts.get('__role', '') # application kind
if kind not in salt.utils.kinds.APPL_KINDS:
emsg = ("Invalid application kind = '{0}'.".format(kind))
log.error(emsg + '\n')
emsg = "Invalid application kind = '{0}'.".format(kind)
log.error(emsg)
raise ValueError(emsg)
self.event = salt.utils.event.get_event(
kind,

View file

@ -165,7 +165,7 @@ def setup_handlers():
payload_type = __opts__['fluent_handler'].get('payload_type', None)
# in general, you want the value of tag to ALSO be a member of tags
tags = __opts__['fluent_handler'].get('tags', ['salt'])
tag = tags[0] if len(tags) else 'salt'
tag = tags[0] if tags else 'salt'
if payload_type == 'graylog':
version = 0
elif payload_type == 'gelf':
@ -199,7 +199,7 @@ class MessageFormatter(logging.Formatter, NewStyleClassMixIn):
def __init__(self, payload_type, version, tags, msg_type=None, msg_path=None):
self.payload_type = payload_type
self.version = version
self.tag = tags[0] if len(tags) else 'salt' # 'salt' for backwards compat
self.tag = tags[0] if tags else 'salt' # 'salt' for backwards compat
self.tags = tags
self.msg_path = msg_path if msg_path else payload_type
self.msg_type = msg_type if msg_type else payload_type

View file

@ -210,9 +210,9 @@ def setup_handlers():
except KeyError:
log.debug('Sentry tag \'%s\' not found in grains.', tag)
continue
if len(tag_value) > 0:
if tag_value:
context_dict[tag] = tag_value
if len(context_dict) > 0:
if context_dict:
client.context.merge({'tags': context_dict})
try:
handler = SentryHandler(client)

View file

@ -239,7 +239,7 @@ setLogRecordFactory(SaltLogRecord)
class SaltLoggingClass(six.with_metaclass(LoggingMixInMeta, LOGGING_LOGGER_CLASS, NewStyleClassMixIn)):
def __new__(cls, *args): # pylint: disable=W0613, E1002
def __new__(cls, *args): # pylint: disable=W0613,E0012
'''
We override `__new__` in our logging logger class in order to provide
some additional features like expand the module name padding if length
@ -428,7 +428,7 @@ if logging.getLoggerClass() is not SaltLoggingClass:
logging.addLevelName(TRACE, 'TRACE')
logging.addLevelName(GARBAGE, 'GARBAGE')
if len(logging.root.handlers) == 0:
if not logging.root.handlers:
# No configuration to the logging system has been done so far.
# Set the root logger at the lowest level possible
logging.root.setLevel(GARBAGE)

View file

@ -676,7 +676,7 @@ class Master(SMaster):
pub_channels = []
log.info('Creating master publisher process')
log_queue = salt.log.setup.get_multiprocessing_logging_queue()
for transport, opts in iter_transport_opts(self.opts):
for _, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.pre_fork(self.process_manager, kwargs={'log_queue': log_queue})
pub_channels.append(chan)

View file

@ -509,7 +509,7 @@ class MinionBase(object):
preferred_masters[0] = opts['master'][master_idx]
preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]]
opts['master'] = preferred_masters
log.info('Distributed to the master at \'{0}\'.'.format(opts['master'][0]))
log.info('Distributed to the master at \'%s\'.', opts['master'][0])
except (KeyError, AttributeError, TypeError):
log.warning('Failed to distribute to a specific master.')
else:
@ -733,21 +733,21 @@ class MinionBase(object):
for att in range(self.opts['discovery'].get('attempts', 3)):
try:
att += 1
log.info('Attempting {0} time{1} to discover masters'.format(att, (att > 1 and 's' or '')))
log.info('Attempting %s time(s) to discover masters', att)
masters.update(master_discovery_client.discover())
if not masters:
time.sleep(self.opts['discovery'].get('pause', 5))
else:
break
except Exception as err:
log.error('SSDP discovery failure: {0}'.format(err))
log.error('SSDP discovery failure: %s', err)
break
if masters:
policy = self.opts.get('discovery', {}).get('match', 'any')
if policy not in ['any', 'all']:
log.error('SSDP configuration matcher failure: unknown value "{0}". '
'Should be "any" or "all"'.format(policy))
log.error('SSDP configuration matcher failure: unknown value "%s". '
'Should be "any" or "all"', policy)
else:
mapping = self.opts['discovery'].get('mapping', {})
for addr, mappings in masters.items():
@ -763,12 +763,12 @@ class MinionBase(object):
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
# future lint: disable=str-format-in-logging
msg = 'Minion return retry timer set to %s seconds'
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
retry_msg = msg % random_retry
log.debug('%s (randomized)', msg % random_retry)
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
@ -779,12 +779,11 @@ class MinionBase(object):
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
log.debug(msg, DEFAULT_MINION_OPTS['return_retry_timer'])
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
log.debug(msg, self.opts.get('return_retry_timer'))
return self.opts.get('return_retry_timer')
# future lint: enable=str-format-in-logging
class SMinion(MinionBase):
@ -1495,7 +1494,7 @@ class Minion(MinionBase):
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
log.warning("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
log.warning("Maximum number of processes reached while executing jid %s, waiting...", data['jid'])
yield tornado.gen.sleep(10)
process_count = len(salt.utils.minion.running(self.opts))
@ -2639,24 +2638,16 @@ class Minion(MinionBase):
cleanup=[master_event(type='alive')])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
if self.opts['grains_refresh_every']: # In minutes, not seconds!
log.debug(
'Enabling the grains refresher. Will run every %d minute(s).',
self.opts['grains_refresh_every']
)
self._refresh_grains_watcher(abs(self.opts['grains_refresh_every']))
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
'Exception occurred in attempt to initialize grain refresh '
'routine during minion tune-in: %s', exc
)
# TODO: actually listen to the return and change period
@ -3174,7 +3165,7 @@ class SyndicManager(MinionBase):
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
if not masters:
break
master_id = masters.pop(0)