mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch '2017.7' into 'oxygen'
Conflicts: - salt/modules/gnomedesktop.py - salt/utils/cloud.py
This commit is contained in:
commit
ca1f8a3b59
9 changed files with 47 additions and 36 deletions
|
@ -4,7 +4,7 @@ Documentation=man:salt-master(1) file:///usr/share/doc/salt/html/contents.html h
|
|||
After=network.target
|
||||
|
||||
[Service]
|
||||
LimitNOFILE=16384
|
||||
LimitNOFILE=100000
|
||||
Type=notify
|
||||
NotifyAccess=all
|
||||
ExecStart=/usr/bin/salt-master
|
||||
|
|
|
@ -95,7 +95,7 @@ echoinfo() {
|
|||
|
||||
#--- FUNCTION -------------------------------------------------------------------------------------------------------
|
||||
# NAME: echowarn
|
||||
# DESCRIPTION: Echo warning informations to stdout.
|
||||
# DESCRIPTION: Echo warning information to stdout.
|
||||
#----------------------------------------------------------------------------------------------------------------------
|
||||
echowarn() {
|
||||
printf "${YC} * WARN${EC}: %s\n" "$@";
|
||||
|
@ -338,7 +338,7 @@ __usage() {
|
|||
-U If set, fully upgrade the system prior to bootstrapping Salt
|
||||
-I If set, allow insecure connections while downloading any files. For
|
||||
example, pass '--no-check-certificate' to 'wget' or '--insecure' to
|
||||
'curl'. On Debian and Ubuntu, using this option with -U allows to obtain
|
||||
'curl'. On Debian and Ubuntu, using this option with -U allows obtaining
|
||||
GnuPG archive keys insecurely if distro has changed release signatures.
|
||||
-F Allow copied files to overwrite existing (config, init.d, etc)
|
||||
-K If set, keep the temporary files in the temporary directories specified
|
||||
|
|
|
@ -20,6 +20,7 @@ try:
|
|||
except ImportError:
|
||||
HAS_GLIB = False
|
||||
|
||||
import salt.utils
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -50,6 +51,17 @@ class _GSettings(object):
|
|||
self.UID = None
|
||||
self.HOME = None
|
||||
|
||||
@property
|
||||
def gestting_command(self):
|
||||
'''
|
||||
return the command to run the gesttings binary
|
||||
'''
|
||||
if salt.utils.which_bin(['dbus-run-session']):
|
||||
cmd = ['dbus-run-session', '--', 'gsettings']
|
||||
else:
|
||||
cmd = ['dbus-launch', '--exit-with-session', 'gsettings']
|
||||
return cmd
|
||||
|
||||
def _get(self):
|
||||
'''
|
||||
get the value for user in gsettings
|
||||
|
@ -62,7 +74,7 @@ class _GSettings(object):
|
|||
log.info('User does not exist')
|
||||
return False
|
||||
|
||||
cmd = 'dbus-launch --exit-with-session gsettings get {0} {1}'.format(self.SCHEMA, self.KEY)
|
||||
cmd = self.gestting_command + ['get', str(self.SCHEMA), str(self.KEY)]
|
||||
environ = {}
|
||||
environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid)
|
||||
result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False)
|
||||
|
@ -90,8 +102,7 @@ class _GSettings(object):
|
|||
result['stdout'] = 'User {0} does not exist'.format(user)
|
||||
return result
|
||||
|
||||
cmd = 'dbus-launch --exit-with-session gsettings set {0} {1} "{2}"'.format(
|
||||
self.SCHEMA, self.KEY, value)
|
||||
cmd = self.gestting_command + ['set', self.SCHEMA, self.KEY, value]
|
||||
environ = {}
|
||||
environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid)
|
||||
result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False)
|
||||
|
|
|
@ -71,14 +71,13 @@ VERIFY_TRUST_LEVELS = {
|
|||
'4': 'Ultimate'
|
||||
}
|
||||
|
||||
HAS_LIBS = False
|
||||
GPG_1_3_1 = False
|
||||
|
||||
try:
|
||||
import gnupg
|
||||
HAS_LIBS = True
|
||||
HAS_GPG_BINDINGS = True
|
||||
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
|
||||
except ImportError:
|
||||
pass
|
||||
HAS_GPG_BINDINGS = False
|
||||
|
||||
|
||||
def _gpg():
|
||||
|
@ -96,15 +95,10 @@ def __virtual__():
|
|||
if not _gpg():
|
||||
return (False, 'The gpg execution module cannot be loaded: '
|
||||
'gpg binary is not in the path.')
|
||||
if HAS_LIBS:
|
||||
gnupg_version = _LooseVersion(gnupg.__version__)
|
||||
if gnupg_version >= '1.3.1':
|
||||
global GPG_1_3_1
|
||||
GPG_1_3_1 = True
|
||||
return __virtualname__
|
||||
|
||||
return (False, 'The gpg execution module cannot be loaded; the'
|
||||
' gnupg python module is not installed.')
|
||||
return __virtualname__ if HAS_GPG_BINDINGS \
|
||||
else (False, 'The gpg execution module cannot be loaded; the '
|
||||
'gnupg python module is not installed.')
|
||||
|
||||
|
||||
def _get_user_info(user=None):
|
||||
|
|
|
@ -144,7 +144,7 @@ def rpc(cmd=None, dest=None, format='xml', **kwargs):
|
|||
The rpc to be executed. (default = None)
|
||||
Optional
|
||||
* dest:
|
||||
Destination file where the rpc ouput is stored. (default = None)
|
||||
Destination file where the rpc output is stored. (default = None)
|
||||
Note that the file will be stored on the proxy minion. To push the
|
||||
files to the master use the salt's following execution module:
|
||||
:py:func:`cp.push <salt.modules.cp.push>`
|
||||
|
|
|
@ -101,7 +101,7 @@ def _filter_dict(input_dict, search_key, search_value):
|
|||
|
||||
def _explicit_close(napalm_device):
|
||||
'''
|
||||
Will explicitely close the config session with the network device,
|
||||
Will explicily close the config session with the network device,
|
||||
when running in a now-always-alive proxy minion or regular minion.
|
||||
This helper must be used in configuration-related functions,
|
||||
as the session is preserved and not closed before making any changes.
|
||||
|
@ -139,7 +139,7 @@ def _config_logic(napalm_device,
|
|||
# then the decorator will make sure that
|
||||
# if not proxy (when the connection is always alive)
|
||||
# and the `inherit_napalm_device` is set,
|
||||
# `napalm_device` will be overriden.
|
||||
# `napalm_device` will be overridden.
|
||||
# See `salt.utils.napalm.proxy_napalm_wrap` decorator.
|
||||
|
||||
loaded_result['already_configured'] = False
|
||||
|
|
|
@ -830,17 +830,17 @@ def mod_repo(repo, **kwargs):
|
|||
the URL for zypper to reference
|
||||
|
||||
enabled
|
||||
enable or disable (True or False) repository,
|
||||
Enable or disable (True or False) repository,
|
||||
but do not remove if disabled.
|
||||
|
||||
refresh
|
||||
enable or disable (True or False) auto-refresh of the repository.
|
||||
Enable or disable (True or False) auto-refresh of the repository.
|
||||
|
||||
cache
|
||||
Enable or disable (True or False) RPM files caching.
|
||||
|
||||
gpgcheck
|
||||
Enable or disable (True or False) GOG check for this repository.
|
||||
Enable or disable (True or False) GPG check for this repository.
|
||||
|
||||
gpgautoimport
|
||||
Automatically trust and import new repository.
|
||||
|
|
|
@ -90,6 +90,10 @@ try:
|
|||
except ImportError:
|
||||
HAS_GETPASS = False
|
||||
|
||||
# This is required to support international characters in AWS EC2 tags or any
|
||||
# other kind of metadata provided by particular Cloud vendor.
|
||||
MSGPACK_ENCODING = 'utf-8'
|
||||
|
||||
NSTATES = {
|
||||
0: 'running',
|
||||
1: 'rebooting',
|
||||
|
@ -2506,7 +2510,7 @@ def cachedir_index_add(minion_id, profile, driver, provider, base=None):
|
|||
if os.path.exists(index_file):
|
||||
mode = 'rb' if six.PY3 else 'r'
|
||||
with salt.utils.files.fopen(index_file, mode) as fh_:
|
||||
index = salt.utils.data.decode(msgpack.load(fh_))
|
||||
index = salt.utils.data.decode(msgpack.load(fh_, encoding=MSGPACK_ENCODING))
|
||||
else:
|
||||
index = {}
|
||||
|
||||
|
@ -2523,7 +2527,7 @@ def cachedir_index_add(minion_id, profile, driver, provider, base=None):
|
|||
|
||||
mode = 'wb' if six.PY3 else 'w'
|
||||
with salt.utils.files.fopen(index_file, mode) as fh_:
|
||||
msgpack.dump(index, fh_)
|
||||
msgpack.dump(index, fh_, encoding=MSGPACK_ENCODING)
|
||||
|
||||
unlock_file(index_file)
|
||||
|
||||
|
@ -2540,7 +2544,7 @@ def cachedir_index_del(minion_id, base=None):
|
|||
if os.path.exists(index_file):
|
||||
mode = 'rb' if six.PY3 else 'r'
|
||||
with salt.utils.files.fopen(index_file, mode) as fh_:
|
||||
index = salt.utils.data.decode(msgpack.load(fh_))
|
||||
index = salt.utils.data.decode(msgpack.load(fh_, encoding=MSGPACK_ENCODING))
|
||||
else:
|
||||
return
|
||||
|
||||
|
@ -2549,7 +2553,7 @@ def cachedir_index_del(minion_id, base=None):
|
|||
|
||||
mode = 'wb' if six.PY3 else 'w'
|
||||
with salt.utils.files.fopen(index_file, mode) as fh_:
|
||||
msgpack.dump(index, fh_)
|
||||
msgpack.dump(index, fh_, encoding=MSGPACK_ENCODING)
|
||||
|
||||
unlock_file(index_file)
|
||||
|
||||
|
@ -2607,7 +2611,7 @@ def request_minion_cachedir(
|
|||
path = os.path.join(base, 'requested', fname)
|
||||
mode = 'wb' if six.PY3 else 'w'
|
||||
with salt.utils.files.fopen(path, mode) as fh_:
|
||||
msgpack.dump(data, fh_)
|
||||
msgpack.dump(data, fh_, encoding=MSGPACK_ENCODING)
|
||||
|
||||
|
||||
def change_minion_cachedir(
|
||||
|
@ -2639,12 +2643,12 @@ def change_minion_cachedir(
|
|||
path = os.path.join(base, cachedir, fname)
|
||||
|
||||
with salt.utils.files.fopen(path, 'r') as fh_:
|
||||
cache_data = salt.utils.data.decode(msgpack.load(fh_))
|
||||
cache_data = salt.utils.data.decode(msgpack.load(fh_, encoding=MSGPACK_ENCODING))
|
||||
|
||||
cache_data.update(data)
|
||||
|
||||
with salt.utils.files.fopen(path, 'w') as fh_:
|
||||
msgpack.dump(cache_data, fh_)
|
||||
msgpack.dump(cache_data, fh_, encoding=MSGPACK_ENCODING)
|
||||
|
||||
|
||||
def activate_minion_cachedir(minion_id, base=None):
|
||||
|
@ -2718,7 +2722,7 @@ def list_cache_nodes_full(opts=None, provider=None, base=None):
|
|||
minion_id = fname[:-2] # strip '.p' from end of msgpack filename
|
||||
mode = 'rb' if six.PY3 else 'r'
|
||||
with salt.utils.files.fopen(fpath, mode) as fh_:
|
||||
minions[driver][prov][minion_id] = salt.utils.data.decode(msgpack.load(fh_))
|
||||
minions[driver][prov][minion_id] = salt.utils.data.decode(msgpack.load(fh_, encoding=MSGPACK_ENCODING))
|
||||
|
||||
return minions
|
||||
|
||||
|
@ -2895,7 +2899,7 @@ def cache_node_list(nodes, provider, opts):
|
|||
path = os.path.join(prov_dir, '{0}.p'.format(node))
|
||||
mode = 'wb' if six.PY3 else 'w'
|
||||
with salt.utils.files.fopen(path, mode) as fh_:
|
||||
msgpack.dump(nodes[node], fh_)
|
||||
msgpack.dump(nodes[node], fh_, encoding=MSGPACK_ENCODING)
|
||||
|
||||
|
||||
def cache_node(node, provider, opts):
|
||||
|
@ -2921,7 +2925,7 @@ def cache_node(node, provider, opts):
|
|||
path = os.path.join(prov_dir, '{0}.p'.format(node['name']))
|
||||
mode = 'wb' if six.PY3 else 'w'
|
||||
with salt.utils.files.fopen(path, mode) as fh_:
|
||||
msgpack.dump(node, fh_)
|
||||
msgpack.dump(node, fh_, encoding=MSGPACK_ENCODING)
|
||||
|
||||
|
||||
def missing_node_cache(prov_dir, node_list, provider, opts):
|
||||
|
@ -2996,7 +3000,7 @@ def diff_node_cache(prov_dir, node, new_data, opts):
|
|||
|
||||
with salt.utils.files.fopen(path, 'r') as fh_:
|
||||
try:
|
||||
cache_data = salt.utils.data.decode(msgpack.load(fh_))
|
||||
cache_data = salt.utils.data.decode(msgpack.load(fh_, encoding=MSGPACK_ENCODING))
|
||||
except ValueError:
|
||||
log.warning('Cache for %s was corrupt: Deleting', node)
|
||||
cache_data = {}
|
||||
|
|
|
@ -300,7 +300,7 @@ class MasterPillarUtil(object):
|
|||
cached minion data on the master, or by fetching the grains
|
||||
directly on the minion.
|
||||
|
||||
By default, this function tries hard to get the pillar data:
|
||||
By default, this function tries hard to get the grains data:
|
||||
- Try to get the cached minion grains if the master
|
||||
has minion_data_cache: True
|
||||
- If the grains data for the minion is cached, use it.
|
||||
|
@ -309,6 +309,8 @@ class MasterPillarUtil(object):
|
|||
'''
|
||||
minion_grains = {}
|
||||
minion_ids = self._tgt_to_list()
|
||||
if not minion_ids:
|
||||
return {}
|
||||
if any(arg for arg in [self.use_cached_grains, self.grains_fallback]):
|
||||
log.debug('Getting cached minion data.')
|
||||
cached_minion_grains, cached_minion_pillars = self._get_cached_minion_data(*minion_ids)
|
||||
|
|
Loading…
Add table
Reference in a new issue