mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
be more consistent with logging.warning vs logging.warn (deprecated) and quoting style
This commit is contained in:
parent
f2b50f27b7
commit
815c33e993
54 changed files with 146 additions and 146 deletions
|
@ -176,7 +176,7 @@ def _bind(username, password, anonymous=False):
|
|||
)
|
||||
result = _ldap.search_s(basedn, int(scope), paramvalues['filter'])
|
||||
if len(result) < 1:
|
||||
log.warn('Unable to find user {0}'.format(username))
|
||||
log.warning('Unable to find user {0}'.format(username))
|
||||
return False
|
||||
elif len(result) > 1:
|
||||
# Active Directory returns something odd. Though we do not
|
||||
|
|
|
@ -305,9 +305,9 @@ class Minion(parsers.MinionOptionParser): # pylint: disable=no-init
|
|||
log.info('The salt minion is starting up')
|
||||
self.minion.tune_in()
|
||||
except (KeyboardInterrupt, SaltSystemExit) as exc:
|
||||
log.warn('Stopping the Salt Minion')
|
||||
log.warning('Stopping the Salt Minion')
|
||||
if isinstance(exc, KeyboardInterrupt):
|
||||
log.warn('Exiting on Ctrl-c')
|
||||
log.warning('Exiting on Ctrl-c')
|
||||
self.shutdown()
|
||||
else:
|
||||
log.error(str(exc))
|
||||
|
@ -333,9 +333,9 @@ class Minion(parsers.MinionOptionParser): # pylint: disable=no-init
|
|||
self.minion.opts['raet_cleanup_protecteds'] = cleanup_protecteds
|
||||
self.minion.call_in()
|
||||
except (KeyboardInterrupt, SaltSystemExit) as exc:
|
||||
log.warn('Stopping the Salt Minion')
|
||||
log.warning('Stopping the Salt Minion')
|
||||
if isinstance(exc, KeyboardInterrupt):
|
||||
log.warn('Exiting on Ctrl-c')
|
||||
log.warning('Exiting on Ctrl-c')
|
||||
self.shutdown()
|
||||
else:
|
||||
log.error(str(exc))
|
||||
|
@ -479,9 +479,9 @@ class ProxyMinion(parsers.ProxyMinionOptionParser): # pylint: disable=no-init
|
|||
log.info('The proxy minion is starting up')
|
||||
self.minion.tune_in()
|
||||
except (KeyboardInterrupt, SaltSystemExit) as exc:
|
||||
log.warn('Stopping the Salt Proxy Minion')
|
||||
log.warning('Stopping the Salt Proxy Minion')
|
||||
if isinstance(exc, KeyboardInterrupt):
|
||||
log.warn('Exiting on Ctrl-c')
|
||||
log.warning('Exiting on Ctrl-c')
|
||||
self.shutdown()
|
||||
else:
|
||||
log.error(str(exc))
|
||||
|
@ -577,7 +577,7 @@ class Syndic(parsers.SyndicOptionParser):
|
|||
try:
|
||||
self.syndic.tune_in()
|
||||
except KeyboardInterrupt:
|
||||
log.warn('Stopping the Salt Syndic Minion')
|
||||
log.warning('Stopping the Salt Syndic Minion')
|
||||
self.shutdown()
|
||||
|
||||
def shutdown(self, exitcode=0, exitmsg=None):
|
||||
|
|
|
@ -1201,7 +1201,7 @@ class Cloud(object):
|
|||
|
||||
if deploy:
|
||||
if not make_master and 'master' not in minion_dict:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'There\'s no master defined on the {0!r} VM settings.'.format(
|
||||
vm_['name']
|
||||
)
|
||||
|
@ -1595,7 +1595,7 @@ class Cloud(object):
|
|||
fun = '{0}.get_configured_provider'.format(driver)
|
||||
if fun not in self.clouds:
|
||||
# Mis-configured provider that got removed?
|
||||
log.warn(
|
||||
log.warning(
|
||||
'The cloud driver, \'{0}\', configured under the '
|
||||
'\'{1}\' cloud provider alias, could not be loaded. '
|
||||
'Please check your provider configuration files and '
|
||||
|
@ -1621,7 +1621,7 @@ class Cloud(object):
|
|||
__active_provider_name__=':'.join([alias, driver])
|
||||
):
|
||||
if self.clouds[fun]() is False:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'The cloud driver, \'{0}\', configured under the '
|
||||
'\'{1}\' cloud provider alias is not properly '
|
||||
'configured. Removing it from the available '
|
||||
|
@ -1894,7 +1894,7 @@ class Map(Cloud):
|
|||
'requires'):
|
||||
deprecated = 'map_{0}'.format(setting)
|
||||
if deprecated in overrides:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'The use of \'{0}\' on the \'{1}\' mapping has '
|
||||
'been deprecated. The preferred way now is to '
|
||||
'just define \'{2}\'. For now, salt-cloud will do '
|
||||
|
@ -1952,7 +1952,7 @@ class Map(Cloud):
|
|||
# Machine already removed
|
||||
break
|
||||
|
||||
log.warn('\'{0}\' already exists, removing from '
|
||||
log.warning("'{0}' already exists, removing from "
|
||||
'the create map.'.format(name))
|
||||
|
||||
if 'existing' not in ret:
|
||||
|
|
|
@ -313,7 +313,7 @@ def list_nodes_full(call=None):
|
|||
}
|
||||
items = query(params=params)
|
||||
if 'Code' in items:
|
||||
log.warn('Query instance:{0} attribute failed'.format(instanceId))
|
||||
log.warning('Query instance:{0} attribute failed'.format(instanceId))
|
||||
continue
|
||||
|
||||
ret[instanceId] = {
|
||||
|
|
|
@ -203,17 +203,17 @@ def create(vm_):
|
|||
public = node['public_ips']
|
||||
|
||||
if private and not public:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Private IPs returned, but not public... Checking for '
|
||||
'misidentified IPs'
|
||||
)
|
||||
for private_ip in private:
|
||||
private_ip = preferred_ip(vm_, [private_ip])
|
||||
if salt.utils.cloud.is_public_ip(private_ip):
|
||||
log.warn('%s is a public IP', private_ip)
|
||||
log.warning('%s is a public IP', private_ip)
|
||||
data.public_ips.append(private_ip)
|
||||
else:
|
||||
log.warn('%s is a private IP', private_ip)
|
||||
log.warning('%s is a private IP', private_ip)
|
||||
if private_ip not in data.private_ips:
|
||||
data.private_ips.append(private_ip)
|
||||
|
||||
|
|
|
@ -1859,7 +1859,7 @@ def request_instance(vm_=None, call=None):
|
|||
return False
|
||||
|
||||
if isinstance(data, dict) and 'error' in data:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'There was an error in the query. {0}'
|
||||
.format(data['error'])
|
||||
)
|
||||
|
@ -1976,7 +1976,7 @@ def query_instance(vm_=None, call=None):
|
|||
log.debug('The query returned: {0}'.format(data))
|
||||
|
||||
if isinstance(data, dict) and 'error' in data:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'There was an error in the query. {0} attempts '
|
||||
'remaining: {1}'.format(
|
||||
attempts, data['error']
|
||||
|
@ -1988,7 +1988,7 @@ def query_instance(vm_=None, call=None):
|
|||
continue
|
||||
|
||||
if isinstance(data, list) and not data:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Query returned an empty list. {0} attempts '
|
||||
'remaining.'.format(attempts)
|
||||
)
|
||||
|
@ -2018,7 +2018,7 @@ def query_instance(vm_=None, call=None):
|
|||
return False
|
||||
|
||||
if isinstance(data, dict) and 'error' in data:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'There was an error in the query. {0}'.format(data['error'])
|
||||
)
|
||||
# Trigger a failure in the wait for IP function
|
||||
|
@ -2824,7 +2824,7 @@ def set_tags(name=None,
|
|||
break
|
||||
|
||||
if failed_to_set_tags:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Failed to set tags. Remaining attempts {0}'.format(
|
||||
attempts
|
||||
)
|
||||
|
@ -3832,7 +3832,7 @@ def __attach_vol_to_instance(params, kws, instance_id):
|
|||
opts=__opts__,
|
||||
sigver='4')
|
||||
if data[0]:
|
||||
log.warn(
|
||||
log.warning(
|
||||
('Error attaching volume {0} '
|
||||
'to instance {1}. Retrying!').format(kws['volume_id'],
|
||||
instance_id))
|
||||
|
|
|
@ -197,7 +197,7 @@ def query_instance(vm_=None, call=None):
|
|||
return False
|
||||
|
||||
if isinstance(data, dict) and 'error' in data:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'There was an error in the query {0}'.format(data['error']) # pylint: disable=E1126
|
||||
)
|
||||
# Trigger a failure in the wait for IP function
|
||||
|
|
|
@ -808,16 +808,16 @@ def create(vm_):
|
|||
private = node.get('private_ips', [])
|
||||
public = node.get('public_ips', [])
|
||||
if private and not public:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Private IPs returned, but not public... Checking for '
|
||||
'misidentified IPs'
|
||||
)
|
||||
for private_ip in private:
|
||||
private_ip = preferred_ip(vm_, [private_ip])
|
||||
if salt.utils.cloud.is_public_ip(private_ip):
|
||||
log.warn('{0} is a public IP'.format(private_ip))
|
||||
log.warning('{0} is a public IP'.format(private_ip))
|
||||
data.public_ips.append(private_ip)
|
||||
log.warn(
|
||||
log.warning(
|
||||
(
|
||||
'Public IP address was not ready when we last'
|
||||
' checked. Appending public IP address now.'
|
||||
|
@ -825,7 +825,7 @@ def create(vm_):
|
|||
)
|
||||
public = data.public_ips
|
||||
else:
|
||||
log.warn('{0} is a private IP'.format(private_ip))
|
||||
log.warning('{0} is a private IP'.format(private_ip))
|
||||
ignore_ip = ignore_cidr(vm_, private_ip)
|
||||
if private_ip not in data.private_ips and not ignore_ip:
|
||||
result.append(private_ip)
|
||||
|
|
|
@ -688,22 +688,22 @@ def create(vm_):
|
|||
result = []
|
||||
private = node['private_ips']
|
||||
if private and not public:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Private IPs returned, but not public... Checking for '
|
||||
'misidentified IPs'
|
||||
)
|
||||
for private_ip in private:
|
||||
private_ip = preferred_ip(vm_, [private_ip])
|
||||
if salt.utils.cloud.is_public_ip(private_ip):
|
||||
log.warn('{0} is a public IP'.format(private_ip))
|
||||
log.warning('{0} is a public IP'.format(private_ip))
|
||||
data.public_ips.append(private_ip)
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Public IP address was not ready when we last checked.'
|
||||
' Appending public IP address now.'
|
||||
)
|
||||
public = data.public_ips
|
||||
else:
|
||||
log.warn('{0} is a private IP'.format(private_ip))
|
||||
log.warning('{0} is a private IP'.format(private_ip))
|
||||
ignore_ip = ignore_cidr(vm_, private_ip)
|
||||
if private_ip not in data.private_ips and not ignore_ip:
|
||||
result.append(private_ip)
|
||||
|
|
|
@ -280,17 +280,17 @@ def create(vm_):
|
|||
public = node['public_ips']
|
||||
|
||||
if private and not public:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Private IPs returned, but not public... Checking for '
|
||||
'misidentified IPs'
|
||||
)
|
||||
for private_ip in private:
|
||||
private_ip = preferred_ip(vm_, [private_ip])
|
||||
if salt.utils.cloud.is_public_ip(private_ip):
|
||||
log.warn('{0} is a public IP'.format(private_ip))
|
||||
log.warning('{0} is a public IP'.format(private_ip))
|
||||
data.public_ips.append(private_ip)
|
||||
else:
|
||||
log.warn('{0} is a private IP'.format(private_ip))
|
||||
log.warning('{0} is a private IP'.format(private_ip))
|
||||
if private_ip not in data.private_ips:
|
||||
data.private_ips.append(private_ip)
|
||||
|
||||
|
|
|
@ -1506,9 +1506,9 @@ def load_config(path, env_var, default_path=None):
|
|||
import inspect
|
||||
previous_frame = inspect.getframeinfo(inspect.currentframe().f_back)
|
||||
log.warning(
|
||||
'The function \'{0}()\' defined in \'{1}\' is not yet using the '
|
||||
'new \'default_path\' argument to `salt.config.load_config()`. '
|
||||
'As such, the \'{2}\' environment variable will be ignored'.format(
|
||||
"The function '{0}()' defined in '{1}' is not yet using the "
|
||||
"new 'default_path' argument to `salt.config.load_config()`. "
|
||||
"As such, the '{2}' environment variable will be ignored".format(
|
||||
previous_frame.function, previous_frame.filename, env_var
|
||||
)
|
||||
)
|
||||
|
@ -1574,13 +1574,13 @@ def include_config(include, orig_path, verbose):
|
|||
# for empty include directory (which might be by design)
|
||||
if len(glob.glob(path)) == 0:
|
||||
if verbose:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Warning parsing configuration file: "include" path/glob '
|
||||
'\'{0}\' matches no files'.format(path)
|
||||
"'{0}' matches no files".format(path)
|
||||
)
|
||||
|
||||
for fn_ in sorted(glob.glob(path)):
|
||||
log.debug('Including configuration from \'{0}\''.format(fn_))
|
||||
log.debug("Including configuration from '{0}'".format(fn_))
|
||||
opts = _read_conf_file(fn_)
|
||||
|
||||
include = opts.get('include', [])
|
||||
|
@ -2289,7 +2289,7 @@ def apply_cloud_providers_config(overrides, defaults=None):
|
|||
# Is the user still using the old format in the new configuration file?!
|
||||
for name, settings in six.iteritems(config.copy()):
|
||||
if '.' in name:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Please switch to the new providers configuration syntax'
|
||||
)
|
||||
|
||||
|
@ -2319,7 +2319,7 @@ def apply_cloud_providers_config(overrides, defaults=None):
|
|||
if 'extends' not in details:
|
||||
log.error(
|
||||
'Please check your cloud providers configuration. '
|
||||
'There\'s no \'driver\', \'provider\', nor \'extends\' '
|
||||
"There's no 'driver', 'provider', nor 'extends' "
|
||||
'definition referenced.'
|
||||
)
|
||||
continue
|
||||
|
@ -2489,9 +2489,9 @@ def apply_cloud_providers_config(overrides, defaults=None):
|
|||
continue
|
||||
|
||||
log.info(
|
||||
'There\'s at least one cloud driver under the \'{0}\' '
|
||||
"There's at least one cloud driver under the '{0}' "
|
||||
'cloud provider alias which does not have the required '
|
||||
'\'driver\' setting. Removing it from the available '
|
||||
"'driver' setting. Removing it from the available "
|
||||
'providers listing.'.format(
|
||||
provider_alias
|
||||
)
|
||||
|
@ -2553,10 +2553,10 @@ def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):
|
|||
# and there's more than one entry under the alias.
|
||||
# WARN the user!!!!
|
||||
log.error(
|
||||
'The \'{0}\' cloud provider definition has more than one '
|
||||
"The '{0}' cloud provider definition has more than one "
|
||||
'entry. Your VM configuration should be specifying the '
|
||||
'provider as \'driver: {0}:<driver-engine>\'. Since '
|
||||
'it\'s not, we\'re returning the first definition which '
|
||||
"provider as 'driver: {0}:<driver-engine>'. Since "
|
||||
"it's not, we're returning the first definition which "
|
||||
'might not be what you intended.'.format(
|
||||
vm_['driver']
|
||||
)
|
||||
|
@ -2603,9 +2603,9 @@ def is_provider_configured(opts, provider, required_keys=()):
|
|||
# There's at least one require configuration key which is not
|
||||
# set.
|
||||
log.warning(
|
||||
'The required \'{0}\' configuration setting is missing '
|
||||
'from the \'{1}\' driver, which is configured under the '
|
||||
'\'{2}\' alias.'.format(key, provider, alias)
|
||||
"The required '{0}' configuration setting is missing "
|
||||
"from the '{1}' driver, which is configured under the "
|
||||
"'{2}' alias.".format(key, provider, alias)
|
||||
)
|
||||
return False
|
||||
# If we reached this far, there's a properly configured provider.
|
||||
|
@ -2625,9 +2625,9 @@ def is_provider_configured(opts, provider, required_keys=()):
|
|||
# This provider does not include all necessary keys,
|
||||
# continue to next one.
|
||||
log.warning(
|
||||
'The required \'{0}\' configuration setting is '
|
||||
'missing from the \'{1}\' driver, which is configured '
|
||||
'under the \'{2}\' alias.'.format(
|
||||
"The required '{0}' configuration setting is "
|
||||
"missing from the '{1}' driver, which is configured "
|
||||
"under the '{2}' alias.".format(
|
||||
key, provider, alias
|
||||
)
|
||||
)
|
||||
|
@ -2706,8 +2706,8 @@ def is_profile_configured(opts, provider, profile_name, vm_=None):
|
|||
if profile_key.get(item, None) is None:
|
||||
# There's at least one required configuration item which is not set.
|
||||
log.error(
|
||||
'The required \'{0}\' configuration setting is missing from '
|
||||
'the \'{1}\' profile, which is configured under the \'{2}\' '
|
||||
"The required '{0}' configuration setting is missing from "
|
||||
"the '{1}' profile, which is configured under the '{2}' "
|
||||
'alias.'.format(item, profile_name, alias)
|
||||
)
|
||||
return False
|
||||
|
@ -2731,8 +2731,8 @@ def check_driver_dependencies(driver, dependencies):
|
|||
for key, value in six.iteritems(dependencies):
|
||||
if value is False:
|
||||
log.warning(
|
||||
'Missing dependency: \'{0}\'. The {1} driver requires '
|
||||
'\'{0}\' to be installed.'.format(
|
||||
"Missing dependency: '{0}'. The {1} driver requires "
|
||||
"'{0}' to be installed.".format(
|
||||
key,
|
||||
driver
|
||||
)
|
||||
|
@ -3004,7 +3004,7 @@ def apply_master_config(overrides=None, defaults=None):
|
|||
# to make `salt.modules.publish` not work under the test-suite.
|
||||
if opts['worker_threads'] < 3 and opts.get('peer', None):
|
||||
log.warning(
|
||||
'The \'worker_threads\' setting on \'{0}\' cannot be lower than '
|
||||
"The 'worker_threads' setting on '{0}' cannot be lower than "
|
||||
'3. Resetting it to the default value of 3.'.format(
|
||||
opts['conf_file']
|
||||
)
|
||||
|
|
|
@ -336,7 +336,7 @@ class AutoKey(object):
|
|||
|
||||
if not self.check_permissions(signing_file):
|
||||
message = 'Wrong permissions for {0}, ignoring content'
|
||||
log.warn(message.format(signing_file))
|
||||
log.warning(message.format(signing_file))
|
||||
return False
|
||||
|
||||
with salt.utils.fopen(signing_file, 'r') as fp_:
|
||||
|
@ -364,7 +364,7 @@ class AutoKey(object):
|
|||
stub_file = os.path.join(autosign_dir, f)
|
||||
mtime = os.path.getmtime(stub_file)
|
||||
if mtime < min_time:
|
||||
log.warn('Autosign keyid expired {0}'.format(stub_file))
|
||||
log.warning('Autosign keyid expired {0}'.format(stub_file))
|
||||
os.remove(stub_file)
|
||||
|
||||
stub_file = os.path.join(autosign_dir, keyid)
|
||||
|
@ -837,7 +837,7 @@ class RemoteFuncs(object):
|
|||
if not good:
|
||||
# The minion is not who it says it is!
|
||||
# We don't want to listen to it!
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Minion id {0} is not who it says it is!'.format(
|
||||
load['id']
|
||||
)
|
||||
|
@ -962,7 +962,7 @@ class RemoteFuncs(object):
|
|||
except ValueError:
|
||||
msg = 'Failed to parse timeout value: {0}'.format(
|
||||
load['tmo'])
|
||||
log.warn(msg)
|
||||
log.warning(msg)
|
||||
return {}
|
||||
if 'timeout' in load:
|
||||
try:
|
||||
|
@ -970,7 +970,7 @@ class RemoteFuncs(object):
|
|||
except ValueError:
|
||||
msg = 'Failed to parse timeout value: {0}'.format(
|
||||
load['timeout'])
|
||||
log.warn(msg)
|
||||
log.warning(msg)
|
||||
return {}
|
||||
if 'tgt_type' in load:
|
||||
if load['tgt_type'].startswith('node'):
|
||||
|
|
|
@ -945,7 +945,7 @@ class RemoteClient(Client):
|
|||
d_tries += 1
|
||||
hsum = salt.utils.get_hash(dest, data.get('hash_type', 'md5'))
|
||||
if hsum != data['hsum']:
|
||||
log.warn('Bad download of file {0}, attempt {1} '
|
||||
log.warning('Bad download of file {0}, attempt {1} '
|
||||
'of 3'.format(path, d_tries))
|
||||
continue
|
||||
break
|
||||
|
|
|
@ -231,7 +231,7 @@ def reap_fileserver_cache_dir(cache_base, find_func):
|
|||
try:
|
||||
filename, _, hash_type = file_rel_path.rsplit('.', 2)
|
||||
except ValueError:
|
||||
log.warn((
|
||||
log.warning((
|
||||
'Found invalid hash file [{0}] when attempting to reap'
|
||||
' cache directory.'
|
||||
).format(file_))
|
||||
|
|
|
@ -449,8 +449,8 @@ def _refresh_buckets_cache_file(cache_file):
|
|||
continue
|
||||
else:
|
||||
log.warning(
|
||||
('S3 Error! Do you have any files '
|
||||
'in your S3 bucket?'))
|
||||
'S3 Error! Do you have any files '
|
||||
'in your S3 bucket?')
|
||||
return {}
|
||||
|
||||
metadata[saltenv] = bucket_files
|
||||
|
@ -488,8 +488,8 @@ def _refresh_buckets_cache_file(cache_file):
|
|||
continue
|
||||
else:
|
||||
log.warning(
|
||||
('S3 Error! Do you have any files '
|
||||
'in your S3 bucket?'))
|
||||
'S3 Error! Do you have any files '
|
||||
'in your S3 bucket?')
|
||||
return {}
|
||||
|
||||
environments = [(os.path.dirname(k['Key']).split('/', 1))[0] for k in files]
|
||||
|
|
|
@ -193,7 +193,7 @@ def _linux_gpu_data():
|
|||
log.debug('Unexpected lspci output: \'{0}\''.format(line))
|
||||
|
||||
if error:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Error loading grains, unexpected linux_gpu_data output, '
|
||||
'check that you have a valid shell configured and '
|
||||
'permissions to run lspci command'
|
||||
|
@ -656,8 +656,8 @@ def _virtual(osdata):
|
|||
grains['virtual'] = 'kvm'
|
||||
else:
|
||||
if osdata['kernel'] in skip_cmds:
|
||||
log.warn(
|
||||
'The tools \'dmidecode\' and \'lspci\' failed to '
|
||||
log.warning(
|
||||
"The tools 'dmidecode' and 'lspci' failed to "
|
||||
'execute because they do not exist on the system of the user '
|
||||
'running this instance or the user does not have the '
|
||||
'necessary permissions to execute them. Grains output might '
|
||||
|
@ -820,8 +820,8 @@ def _virtual(osdata):
|
|||
grains['virtual_subtype'] = 'Xen Dom0'
|
||||
|
||||
for command in failed_commands:
|
||||
log.warn(
|
||||
'Although \'{0}\' was found in path, the current user '
|
||||
log.warning(
|
||||
"Although '{0}' was found in path, the current user "
|
||||
'cannot execute it. Grains output might not be '
|
||||
'accurate.'.format(command)
|
||||
)
|
||||
|
|
|
@ -45,6 +45,6 @@ def config():
|
|||
try:
|
||||
return yaml.safe_load(fp_.read())
|
||||
except Exception:
|
||||
log.warn("Bad syntax in grains file! Skipping.")
|
||||
log.warning("Bad syntax in grains file! Skipping.")
|
||||
return {}
|
||||
return {}
|
||||
|
|
|
@ -406,7 +406,7 @@ def setup_temp_logger(log_level='error'):
|
|||
Setup the temporary console logger
|
||||
'''
|
||||
if is_temp_logging_configured():
|
||||
logging.getLogger(__name__).warn(
|
||||
logging.getLogger(__name__).warning(
|
||||
'Temporary logging is already configured'
|
||||
)
|
||||
return
|
||||
|
@ -460,7 +460,7 @@ def setup_console_logger(log_level='error', log_format=None, date_format=None):
|
|||
Setup the console logger
|
||||
'''
|
||||
if is_console_configured():
|
||||
logging.getLogger(__name__).warn('Console logging already configured')
|
||||
logging.getLogger(__name__).warning('Console logging already configured')
|
||||
return
|
||||
|
||||
# Remove the temporary logging handler
|
||||
|
@ -533,11 +533,11 @@ def setup_logfile_logger(log_path, log_level='error', log_format=None,
|
|||
'''
|
||||
|
||||
if is_logfile_configured():
|
||||
logging.getLogger(__name__).warn('Logfile logging already configured')
|
||||
logging.getLogger(__name__).warning('Logfile logging already configured')
|
||||
return
|
||||
|
||||
if log_path is None:
|
||||
logging.getLogger(__name__).warn(
|
||||
logging.getLogger(__name__).warning(
|
||||
'log_path setting is set to `None`. Nothing else to do'
|
||||
)
|
||||
return
|
||||
|
@ -940,7 +940,7 @@ def __process_multiprocessing_logging_queue(opts, queue):
|
|||
except (EOFError, KeyboardInterrupt, SystemExit):
|
||||
break
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
logging.getLogger(__name__).warn(
|
||||
logging.getLogger(__name__).warning(
|
||||
'An exception occurred in the multiprocessing logging '
|
||||
'queue thread: {0}'.format(exc),
|
||||
exc_info_on_loglevel=logging.DEBUG
|
||||
|
|
|
@ -993,7 +993,7 @@ class AESFuncs(object):
|
|||
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
|
||||
# The minion is not who it says it is!
|
||||
# We don't want to listen to it!
|
||||
log.warn(
|
||||
log.warning(
|
||||
(
|
||||
'Minion id {0} is not who it says it is and is attempting '
|
||||
'to issue a peer command'
|
||||
|
@ -1051,7 +1051,7 @@ class AESFuncs(object):
|
|||
if not self.__verify_minion(load['id'], load['tok']):
|
||||
# The minion is not who it says it is!
|
||||
# We don't want to listen to it!
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Minion id {0} is not who it says it is!'.format(
|
||||
load['id']
|
||||
)
|
||||
|
@ -1203,7 +1203,7 @@ class AESFuncs(object):
|
|||
if not self.__verify_minion(load['id'], load['tok']):
|
||||
# The minion is not who it says it is!
|
||||
# We don't want to listen to it!
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Minion id {0} is not who it says it is!'.format(
|
||||
load['id']
|
||||
)
|
||||
|
|
|
@ -1461,7 +1461,7 @@ class Minion(MinionBase):
|
|||
'{0}. This is often due to the master being shut down or '
|
||||
'overloaded. If the master is running consider increasing '
|
||||
'the worker_threads value.').format(jid)
|
||||
log.warn(msg)
|
||||
log.warning(msg)
|
||||
return ''
|
||||
|
||||
log.trace('ret_val = {0}'.format(ret_val)) # pylint: disable=no-member
|
||||
|
|
|
@ -526,7 +526,7 @@ def unzip(zip_file, dest, excludes=None, options=None, template=None,
|
|||
salt '*' archive.unzip /tmp/zipfile.zip /home/strongbad/ password='BadPassword'
|
||||
'''
|
||||
if options:
|
||||
log.warn('Options \'{0}\' ignored, only works with unzip binary.'.format(options))
|
||||
log.warning("Options '{0}' ignored, only works with unzip binary.".format(options))
|
||||
if not excludes:
|
||||
excludes = []
|
||||
if runas:
|
||||
|
@ -535,7 +535,7 @@ def unzip(zip_file, dest, excludes=None, options=None, template=None,
|
|||
uinfo = __salt__['user.info'](runas)
|
||||
if not uinfo:
|
||||
raise SaltInvocationError(
|
||||
'User \'{0}\' does not exist'.format(runas)
|
||||
"User '{0}' does not exist".format(runas)
|
||||
)
|
||||
|
||||
zip_file, dest = _render_filenames(zip_file, dest, None, template)
|
||||
|
|
|
@ -196,14 +196,14 @@ def stop(dev=None):
|
|||
|
||||
'''
|
||||
if dev is not None:
|
||||
log.warn('Stopping {0}, device will only reappear after reregistering!'.format(dev))
|
||||
log.warning('Stopping {0}, device will only reappear after reregistering!'.format(dev))
|
||||
if not _bcsys(dev, 'stop', 'goaway', 'error', 'Error stopping {0}'.format(dev)):
|
||||
return False
|
||||
return _wait(lambda: _sysfs_attr(_bcpath(dev)) is False, 'error', 'Device {0} did not stop'.format(dev), 300)
|
||||
else:
|
||||
cache = uuid()
|
||||
if not cache:
|
||||
log.warn('bcache already stopped?')
|
||||
log.warning('bcache already stopped?')
|
||||
return None
|
||||
|
||||
if not _alltrue(detach()):
|
||||
|
@ -896,10 +896,10 @@ def _wipe(dev):
|
|||
log.error('Unable to read SysFS props for {0}'.format(dev))
|
||||
return None
|
||||
elif not discard:
|
||||
log.warn('{0} seems unable to discard'.format(dev))
|
||||
log.warning('{0} seems unable to discard'.format(dev))
|
||||
wiper = 'dd'
|
||||
elif not HAS_BLKDISCARD:
|
||||
log.warn('blkdiscard binary not available, properly wipe the dev manually for optimal results')
|
||||
log.warning('blkdiscard binary not available, properly wipe the dev manually for optimal results')
|
||||
wiper = 'dd'
|
||||
else:
|
||||
wiper = 'blkdiscard'
|
||||
|
|
|
@ -647,7 +647,7 @@ def register_instances(name, instances, region=None, key=None, keyid=None,
|
|||
try:
|
||||
registered_instances = conn.register_instances(name, instances)
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.warn(error)
|
||||
log.warning(error)
|
||||
return False
|
||||
registered_instance_ids = [instance.id for instance in
|
||||
registered_instances]
|
||||
|
@ -655,7 +655,7 @@ def register_instances(name, instances, region=None, key=None, keyid=None,
|
|||
# able to be registered with the given ELB
|
||||
register_failures = set(instances).difference(set(registered_instance_ids))
|
||||
if register_failures:
|
||||
log.warn('Instance(s): {0} not registered with ELB {1}.'
|
||||
log.warning('Instance(s): {0} not registered with ELB {1}.'
|
||||
.format(list(register_failures), name))
|
||||
register_result = False
|
||||
else:
|
||||
|
@ -696,12 +696,12 @@ def deregister_instances(name, instances, region=None, key=None, keyid=None,
|
|||
# deregister_instances returns "None" because the instances are
|
||||
# effectively deregistered from ELB
|
||||
if error.error_code == 'InvalidInstance':
|
||||
log.warn('One or more of instance(s) {0} are not part of ELB {1}.'
|
||||
log.warning('One or more of instance(s) {0} are not part of ELB {1}.'
|
||||
' deregister_instances not performed.'
|
||||
.format(instances, name))
|
||||
return None
|
||||
else:
|
||||
log.warn(error)
|
||||
log.warning(error)
|
||||
return False
|
||||
registered_instance_ids = [instance.id for instance in
|
||||
registered_instances]
|
||||
|
@ -709,7 +709,7 @@ def deregister_instances(name, instances, region=None, key=None, keyid=None,
|
|||
# unable to be deregistered from the given ELB
|
||||
deregister_failures = set(instances).intersection(set(registered_instance_ids))
|
||||
if deregister_failures:
|
||||
log.warn('Instance(s): {0} not deregistered from ELB {1}.'
|
||||
log.warning('Instance(s): {0} not deregistered from ELB {1}.'
|
||||
.format(list(deregister_failures), name))
|
||||
deregister_result = False
|
||||
else:
|
||||
|
|
|
@ -102,7 +102,7 @@ def A(host, nameserver=None):
|
|||
cmd = __salt__['cmd.run_all'](dig, python_shell=False)
|
||||
# In this case, 0 is not the same as False
|
||||
if cmd['retcode'] != 0:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'dig returned exit code \'{0}\'. Returning empty list as '
|
||||
'fallback.'.format(
|
||||
cmd['retcode']
|
||||
|
@ -134,7 +134,7 @@ def AAAA(host, nameserver=None):
|
|||
cmd = __salt__['cmd.run_all'](dig, python_shell=False)
|
||||
# In this case, 0 is not the same as False
|
||||
if cmd['retcode'] != 0:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'dig returned exit code \'{0}\'. Returning empty list as '
|
||||
'fallback.'.format(
|
||||
cmd['retcode']
|
||||
|
@ -166,7 +166,7 @@ def NS(domain, resolve=True, nameserver=None):
|
|||
cmd = __salt__['cmd.run_all'](dig, python_shell=False)
|
||||
# In this case, 0 is not the same as False
|
||||
if cmd['retcode'] != 0:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'dig returned exit code \'{0}\'. Returning empty list as '
|
||||
'fallback.'.format(
|
||||
cmd['retcode']
|
||||
|
@ -207,7 +207,7 @@ def SPF(domain, record='SPF', nameserver=None):
|
|||
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
# In this case, 0 is not the same as False
|
||||
if result['retcode'] != 0:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'dig returned exit code \'{0}\'. Returning empty list as fallback.'
|
||||
.format(result['retcode'])
|
||||
)
|
||||
|
@ -264,7 +264,7 @@ def MX(domain, resolve=False, nameserver=None):
|
|||
cmd = __salt__['cmd.run_all'](dig, python_shell=False)
|
||||
# In this case, 0 is not the same as False
|
||||
if cmd['retcode'] != 0:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'dig returned exit code \'{0}\'. Returning empty list as '
|
||||
'fallback.'.format(
|
||||
cmd['retcode']
|
||||
|
@ -302,7 +302,7 @@ def TXT(host, nameserver=None):
|
|||
cmd = __salt__['cmd.run_all'](dig, python_shell=False)
|
||||
|
||||
if cmd['retcode'] != 0:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'dig returned exit code \'{0}\'. Returning empty list as '
|
||||
'fallback.'.format(
|
||||
cmd['retcode']
|
||||
|
|
|
@ -383,7 +383,7 @@ def _hdparm(args, failhard=True):
|
|||
if failhard:
|
||||
raise CommandExecutionError(msg)
|
||||
else:
|
||||
log.warn(msg)
|
||||
log.warning(msg)
|
||||
|
||||
return result['stdout']
|
||||
|
||||
|
|
|
@ -1421,7 +1421,7 @@ def _validate_input(kwargs,
|
|||
'Host path {0} in bind {1} is not absolute'
|
||||
.format(container_path, bind)
|
||||
)
|
||||
log.warn('Host path {0} in bind {1} is not absolute,'
|
||||
log.warning('Host path {0} in bind {1} is not absolute,'
|
||||
' assuming it is a docker volume.'.format(host_path,
|
||||
bind))
|
||||
if not os.path.isabs(container_path):
|
||||
|
|
|
@ -546,7 +546,7 @@ def rm_auth_key(user, key, config='.ssh/authorized_keys'):
|
|||
with salt.utils.fopen(full, 'w') as _fh:
|
||||
_fh.writelines(lines)
|
||||
except (IOError, OSError) as exc:
|
||||
log.warn('Could not read/write key file: {0}'.format(str(exc)))
|
||||
log.warning('Could not read/write key file: {0}'.format(str(exc)))
|
||||
return 'Key not removed'
|
||||
return 'Key removed'
|
||||
# TODO: Should this function return a simple boolean?
|
||||
|
|
|
@ -249,7 +249,7 @@ def interfaces(root):
|
|||
elif is_r:
|
||||
reads.append(relpath)
|
||||
else:
|
||||
log.warn('Unable to find any interfaces in {0}'.format(canpath))
|
||||
log.warning('Unable to find any interfaces in {0}'.format(canpath))
|
||||
|
||||
return {
|
||||
'r': reads,
|
||||
|
|
|
@ -677,7 +677,7 @@ def create_ca(ca_name,
|
|||
key = OpenSSL.crypto.load_privatekey(
|
||||
OpenSSL.crypto.FILETYPE_PEM, fic2.read())
|
||||
except OpenSSL.crypto.Error as err:
|
||||
log.warn('Error loading existing private key'
|
||||
log.warning('Error loading existing private key'
|
||||
' %s, generating a new key: %s', ca_keyp, str(err))
|
||||
bck = "{0}.unloadable.{1}".format(ca_keyp,
|
||||
datetime.utcnow().strftime("%Y%m%d%H%M%S"))
|
||||
|
|
|
@ -151,7 +151,7 @@ def __virtual__():
|
|||
'''
|
||||
if salt.utils.is_windows():
|
||||
if not HAS_DEPENDENCIES:
|
||||
log.warn('Could not load dependencies for {0}'.format(__virtualname__))
|
||||
log.warning('Could not load dependencies for {0}'.format(__virtualname__))
|
||||
return __virtualname__
|
||||
return (False, "Module win_task: module only works on Windows systems")
|
||||
|
||||
|
|
|
@ -720,11 +720,11 @@ def bootstrap(directory='.',
|
|||
distribute = False
|
||||
if new_st:
|
||||
distribute = False
|
||||
LOG.warning(u'Forcing to use setuptools as we have setuptools >= 0.7')
|
||||
LOG.warning('Forcing to use setuptools as we have setuptools >= 0.7')
|
||||
if distribute:
|
||||
new_st = False
|
||||
if buildout_ver == 1:
|
||||
LOG.warning(u'Using distribute !')
|
||||
LOG.warning('Using distribute !')
|
||||
bootstrap_args += ' --distribute'
|
||||
if not os.path.isdir(dbuild):
|
||||
os.makedirs(dbuild)
|
||||
|
|
|
@ -397,7 +397,7 @@ def ext_pillar(minion_id, pillar, resource, sequence, subkey=False, subkey_only=
|
|||
|
||||
for categ, info in [next(six.iteritems(s)) for s in sequence]:
|
||||
if categ not in inp:
|
||||
log.warn("Category is not defined: {0}".format(categ))
|
||||
log.warning("Category is not defined: {0}".format(categ))
|
||||
continue
|
||||
|
||||
alias = None
|
||||
|
@ -416,7 +416,7 @@ def ext_pillar(minion_id, pillar, resource, sequence, subkey=False, subkey_only=
|
|||
if isinstance(inp[categ], list):
|
||||
entries = inp[categ]
|
||||
elif not inp[categ]:
|
||||
log.warn("Category has no value set: {0}".format(categ))
|
||||
log.warning("Category has no value set: {0}".format(categ))
|
||||
continue
|
||||
else:
|
||||
entries = [inp[categ]]
|
||||
|
|
|
@ -288,7 +288,7 @@ def ext_pillar(minion_id, # pylint: disable=W0613
|
|||
import salt.log
|
||||
msg = 'Error parsing configuration file: {0} - {1}'
|
||||
if salt.log.is_console_configured():
|
||||
log.warn(msg.format(config_file, err))
|
||||
log.warning(msg.format(config_file, err))
|
||||
else:
|
||||
print(msg.format(config_file, err))
|
||||
else:
|
||||
|
|
|
@ -395,7 +395,7 @@ def ext_pillar(minion_id, pillar, *args, **kwargs):
|
|||
stack_config_files += cfgs
|
||||
for cfg in stack_config_files:
|
||||
if not os.path.isfile(cfg):
|
||||
log.warn('Ignoring pillar stack cfg "{0}": '
|
||||
log.warning('Ignoring pillar stack cfg "{0}": '
|
||||
'file does not exist'.format(cfg))
|
||||
continue
|
||||
stack = _process_stack_cfg(cfg, stack, minion_id, pillar)
|
||||
|
|
|
@ -263,7 +263,7 @@ def _decrypt_ciphertext(cipher, translate_newlines=False):
|
|||
input=cipher.replace(r'\n', '\n') if translate_newlines else cipher
|
||||
)
|
||||
if not decrypted_data:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Could not decrypt cipher %s, received: %s',
|
||||
cipher,
|
||||
decrypt_error
|
||||
|
|
|
@ -57,7 +57,7 @@ def render(yaml_data, saltenv='base', sls='', argline='', **kws):
|
|||
raise SaltRenderError(exc)
|
||||
if len(warn_list) > 0:
|
||||
for item in warn_list:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'{warn} found in {sls} saltenv={env}'.format(
|
||||
warn=item.message, sls=salt.utils.url.create(sls), env=saltenv
|
||||
)
|
||||
|
|
|
@ -23,7 +23,7 @@ def render(sls_data, saltenv='base', sls='', **kws):
|
|||
data = deserialize(sls_data) or {}
|
||||
|
||||
for item in warn_list:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'{warn} found in {sls} saltenv={env}'.format(
|
||||
warn=item.message, sls=salt.utils.url.create(sls), env=saltenv
|
||||
)
|
||||
|
|
|
@ -118,7 +118,7 @@ def prep_jid(nocache=False, passed_jid=None, recurse_count=0):
|
|||
with salt.utils.fopen(os.path.join(jid_dir_, 'nocache'), 'wb+') as fn_:
|
||||
fn_.write('')
|
||||
except IOError:
|
||||
log.warn('Could not write out jid file for job {0}. Retrying.'.format(jid))
|
||||
log.warning('Could not write out jid file for job {0}. Retrying.'.format(jid))
|
||||
time.sleep(0.1)
|
||||
return prep_jid(passed_jid=jid, nocache=nocache,
|
||||
recurse_count=recurse_count+1)
|
||||
|
|
|
@ -165,7 +165,7 @@ class Runner(RunnerClient):
|
|||
if self.opts.get('async', False):
|
||||
async_pub = self.async(self.opts['fun'], low, user=user)
|
||||
# by default: info will be not enougth to be printed out !
|
||||
log.warn('Running in async mode. Results of this execution may '
|
||||
log.warning('Running in async mode. Results of this execution may '
|
||||
'be collected by attaching to the master event bus or '
|
||||
'by examing the master job cache, if configured. '
|
||||
'This execution is running under tag {tag}'.format(**async_pub))
|
||||
|
|
|
@ -32,7 +32,7 @@ def query(url, output=True, **kwargs):
|
|||
data='<xml>somecontent</xml>'
|
||||
'''
|
||||
if output is not True:
|
||||
log.warn('Output option has been deprecated. Please use --quiet.')
|
||||
log.warning('Output option has been deprecated. Please use --quiet.')
|
||||
if 'node' not in kwargs:
|
||||
kwargs['node'] = 'master'
|
||||
|
||||
|
|
|
@ -108,7 +108,7 @@ def find_guest(name, quiet=False, path=None):
|
|||
salt-run lxc.find_guest name
|
||||
'''
|
||||
if quiet:
|
||||
log.warn('\'quiet\' argument is being deprecated.'
|
||||
log.warning("'quiet' argument is being deprecated."
|
||||
' Please migrate to --quiet')
|
||||
for data in _list_iter(path=path):
|
||||
host, l = next(six.iteritems(data))
|
||||
|
@ -234,7 +234,7 @@ def init(names, host=None, saltcloud_mode=False, quiet=False, **kwargs):
|
|||
'''
|
||||
path = kwargs.get('path', None)
|
||||
if quiet:
|
||||
log.warn('\'quiet\' argument is being deprecated.'
|
||||
log.warning("'quiet' argument is being deprecated."
|
||||
' Please migrate to --quiet')
|
||||
ret = {'comment': '', 'result': True}
|
||||
if host is None:
|
||||
|
@ -424,7 +424,7 @@ def cloud_init(names, host=None, quiet=False, **kwargs):
|
|||
init the container with the saltcloud opts format instead
|
||||
'''
|
||||
if quiet:
|
||||
log.warn('\'quiet\' argument is being deprecated. Please migrate to --quiet')
|
||||
log.warning("'quiet' argument is being deprecated. Please migrate to --quiet")
|
||||
return __salt__['lxc.init'](names=names, host=host,
|
||||
saltcloud_mode=True, quiet=quiet, **kwargs)
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ def query(host=None, quiet=False, hyper=None):
|
|||
host = hyper
|
||||
|
||||
if quiet:
|
||||
log.warn('\'quiet\' is deprecated. Please migrate to --quiet')
|
||||
log.warning("'quiet' is deprecated. Please migrate to --quiet")
|
||||
ret = {}
|
||||
client = salt.client.get_local_client(__opts__['conf_file'])
|
||||
try:
|
||||
|
@ -117,7 +117,7 @@ def list(host=None, quiet=False, hyper=None): # pylint: disable=redefined-built
|
|||
host = hyper
|
||||
|
||||
if quiet:
|
||||
log.warn('\'quiet\' is deprecated. Please migrate to --quiet')
|
||||
log.warning("'quiet' is deprecated. Please migrate to --quiet")
|
||||
ret = {}
|
||||
client = salt.client.get_local_client(__opts__['conf_file'])
|
||||
for info in client.cmd_iter('virtual:physical',
|
||||
|
|
|
@ -85,7 +85,7 @@ def minion_process():
|
|||
try:
|
||||
minion.start()
|
||||
except (SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc:
|
||||
log.warn('** Restarting minion **')
|
||||
log.warning('** Restarting minion **')
|
||||
delay = 60
|
||||
if minion is not None and hasattr(minion, 'config'):
|
||||
delay = minion.config.get('random_reauth_delay', 60)
|
||||
|
@ -199,7 +199,7 @@ def proxy_minion_process(queue):
|
|||
restart = False
|
||||
|
||||
if restart is True:
|
||||
log.warn('** Restarting proxy minion **')
|
||||
log.warning('** Restarting proxy minion **')
|
||||
delay = 60
|
||||
if proxyminion is not None:
|
||||
if hasattr(proxyminion, 'config'):
|
||||
|
|
|
@ -554,7 +554,7 @@ def present(name, Bucket,
|
|||
# notice something mismatches their desired state.
|
||||
if _describe.get('Location', {}).get('LocationConstraint') != LocationConstraint:
|
||||
msg = 'Bucket {0} location does not match desired configuration, but cannot be changed'.format(LocationConstraint)
|
||||
log.warn(msg)
|
||||
log.warning(msg)
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Failed to update bucket: {0}.'.format(msg)
|
||||
return ret
|
||||
|
|
|
@ -187,7 +187,7 @@ def updated(name=None, cyg_arch='x86_64', mirrors=None):
|
|||
return ret
|
||||
|
||||
if not mirrors:
|
||||
LOG.warn('No mirror given, using the default.')
|
||||
LOG.warning('No mirror given, using the default.')
|
||||
|
||||
before = __salt__['cyg.list'](cyg_arch=cyg_arch)
|
||||
if __salt__['cyg.update'](cyg_arch, mirrors=mirrors):
|
||||
|
|
|
@ -226,7 +226,7 @@ def state(
|
|||
|
||||
for minion, mdata in six.iteritems(cmd_ret):
|
||||
if mdata.get('out', '') != 'highstate':
|
||||
log.warning("Output from salt state not highstate")
|
||||
log.warning('Output from salt state not highstate')
|
||||
|
||||
m_ret = False
|
||||
|
||||
|
|
|
@ -206,10 +206,10 @@ def installed(name,
|
|||
ret = {}
|
||||
|
||||
if 'group' in kwargs:
|
||||
log.warn('Passing \'group\' is deprecated, just remove it')
|
||||
log.warning("Passing 'group' is deprecated, just remove it")
|
||||
output_loglevel = kwargs.get('output_loglevel', None)
|
||||
if output_loglevel and not loglevel:
|
||||
log.warn('Passing \'output_loglevel\' is deprecated,'
|
||||
log.warning("Passing 'output_loglevel' is deprecated,"
|
||||
' please use loglevel instead')
|
||||
try:
|
||||
test_release = int(test_release)
|
||||
|
|
|
@ -64,7 +64,7 @@ def compile_template(template,
|
|||
return ret
|
||||
# Template is an empty file
|
||||
if salt.utils.is_empty(template):
|
||||
log.warn('Template is an empty file: {0}'.format(template))
|
||||
log.warning('Template is an empty file: {0}'.format(template))
|
||||
return ret
|
||||
|
||||
with codecs.open(template, encoding=SLS_ENCODING) as ifile:
|
||||
|
|
|
@ -364,7 +364,7 @@ class AESReqServerMixin(object):
|
|||
|
||||
else:
|
||||
# Something happened that I have not accounted for, FAIL!
|
||||
log.warn('Unaccounted for authentication failure')
|
||||
log.warning('Unaccounted for authentication failure')
|
||||
eload = {'result': False,
|
||||
'id': load['id'],
|
||||
'pub': load['pub']}
|
||||
|
|
|
@ -2403,7 +2403,7 @@ def lock_file(filename, interval=.5, timeout=15):
|
|||
while True:
|
||||
if os.path.exists(lock):
|
||||
if time.time() - start >= timeout:
|
||||
log.warn('Unable to obtain lock for {0}'.format(filename))
|
||||
log.warning('Unable to obtain lock for {0}'.format(filename))
|
||||
return False
|
||||
time.sleep(interval)
|
||||
else:
|
||||
|
|
|
@ -154,7 +154,7 @@ def query(url,
|
|||
requests_lib = opts.get('requests_lib', False)
|
||||
|
||||
if requests_lib is True:
|
||||
log.warn('Please set "backend" to "requests" instead of setting '
|
||||
log.warning('Please set "backend" to "requests" instead of setting '
|
||||
'"requests_lib" to "True"')
|
||||
|
||||
if HAS_REQUESTS is False:
|
||||
|
@ -330,11 +330,11 @@ def query(url,
|
|||
hostname = request.get_host()
|
||||
handlers[0] = urllib_request.HTTPSHandler(1)
|
||||
if not HAS_MATCHHOSTNAME:
|
||||
log.warn(('match_hostname() not available, SSL hostname checking '
|
||||
'not available. THIS CONNECTION MAY NOT BE SECURE!'))
|
||||
log.warning('match_hostname() not available, SSL hostname checking '
|
||||
'not available. THIS CONNECTION MAY NOT BE SECURE!')
|
||||
elif verify_ssl is False:
|
||||
log.warn(('SSL certificate verification has been explicitly '
|
||||
'disabled. THIS CONNECTION MAY NOT BE SECURE!'))
|
||||
log.warning('SSL certificate verification has been explicitly '
|
||||
'disabled. THIS CONNECTION MAY NOT BE SECURE!')
|
||||
else:
|
||||
if ':' in hostname:
|
||||
hostname, port = hostname.split(':')
|
||||
|
|
|
@ -165,7 +165,7 @@ class MasterPillarUtil(object):
|
|||
if minion_id is None:
|
||||
return {}
|
||||
if not minion_grains:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Cannot get pillar data for {0}: no grains supplied.'.format(
|
||||
minion_id
|
||||
)
|
||||
|
|
|
@ -375,7 +375,7 @@ class Schedule(object):
|
|||
if name in self.opts['pillar']['schedule']:
|
||||
del self.opts['pillar']['schedule'][name]
|
||||
schedule = self.opts['pillar']['schedule']
|
||||
log.warn('Pillar schedule deleted. Pillar refresh recommended. Run saltutil.refresh_pillar.')
|
||||
log.warning('Pillar schedule deleted. Pillar refresh recommended. Run saltutil.refresh_pillar.')
|
||||
|
||||
# Fire the complete event back along with updated list of schedule
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
|
||||
|
|
|
@ -48,7 +48,7 @@ def zmq_version():
|
|||
if not match:
|
||||
msg = "Using untested zmq python bindings version: '{0}'".format(ver)
|
||||
if is_console_configured():
|
||||
log.warn(msg)
|
||||
log.warning(msg)
|
||||
else:
|
||||
sys.stderr.write("WARNING {0}\n".format(msg))
|
||||
return True
|
||||
|
@ -69,7 +69,7 @@ def zmq_version():
|
|||
if "dev" in ver and not point:
|
||||
msg = 'Using dev zmq module, please report unexpected results'
|
||||
if is_console_configured():
|
||||
log.warn(msg)
|
||||
log.warning(msg)
|
||||
else:
|
||||
sys.stderr.write("WARNING: {0}\n".format(msg))
|
||||
return True
|
||||
|
@ -133,7 +133,7 @@ def verify_socket(interface, pub_port, ret_port):
|
|||
msg = ('Unable to bind socket, this might not be a problem.'
|
||||
' Is there another salt-master running?')
|
||||
if is_console_configured():
|
||||
log.warn(msg)
|
||||
log.warning(msg)
|
||||
else:
|
||||
sys.stderr.write('WARNING: {0}\n'.format(msg))
|
||||
result = False
|
||||
|
@ -517,4 +517,4 @@ def verify_log(opts):
|
|||
If an insecre logging configuration is found, show a warning
|
||||
'''
|
||||
if opts.get('log_level') in ('garbage', 'trace', 'debug'):
|
||||
log.warn('Insecure logging configuration detected! Sensitive data may be logged.')
|
||||
log.warning('Insecure logging configuration detected! Sensitive data may be logged.')
|
||||
|
|
Loading…
Add table
Reference in a new issue