Merge branch '2016.3' into 'develop'

Conflicts:
  - salt/grains/core.py
  - salt/state.py
  - tests/unit/grains/core_test.py
This commit is contained in:
rallytime 2016-07-21 12:06:25 -06:00
commit 1a9d6eee15
13 changed files with 1218 additions and 134 deletions

View file

@ -86,7 +86,7 @@
# Set the default output file used by the salt command. Default is to output
# to the CLI and not to a file. Functions the same way as the "--out-file"
CLI option, only sets this to a single file for all salt commands.
# CLI option, only sets this to a single file for all salt commands.
#output_file: None
# Return minions that timeout when running commands like test.ping
@ -248,6 +248,14 @@ CLI option, only sets this to a single file for all salt commands.
# ZMQ high-water-mark for EventPublisher pub socket
#event_publisher_pub_hwm: 10000
# The master may allocate memory per-event and not
# reclaim it.
# To set a high-water mark for memory allocation, use
# ipc_write_buffer to set a high-water mark for message
# buffering.
# Value: In bytes. Set to 'dynamic' to have Salt select
# a value for you. Default is disabled.
# ipc_write_buffer: 'dynamic'
##### Security settings #####

File diff suppressed because it is too large Load diff

View file

@ -9,6 +9,7 @@ from __future__ import absolute_import
# Import third party libs
try:
from pyroute2.ipdb import IPDB
IP = IPDB()
HAS_PYROUTE2 = True
except ImportError:
HAS_PYROUTE2 = False
@ -29,8 +30,6 @@ ATTRS = ['family', 'txqlen', 'ipdb_scope', 'index', 'operstate', 'group',
LAST_STATS = {}
IP = IPDB()
class Hashabledict(dict):
'''

View file

@ -8,10 +8,12 @@ from __future__ import absolute_import
import collections
import copy
import math
import json
# Import salt libs
import salt.utils
import salt.utils.dictupdate
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.exceptions import SaltException
# Import 3rd-party libs
@ -46,7 +48,7 @@ _SANITIZERS = {
}
def get(key, default=''):
def get(key, default='', delimiter=DEFAULT_TARGET_DELIM, ordered=True):
'''
Attempt to retrieve the named value from grains, if the named value is not
available return the passed default. The default return is an empty string.
@ -67,7 +69,35 @@ def get(key, default=''):
salt '*' grains.get pkg:apache
'''
return salt.utils.traverse_dict_and_list(__grains__, key, default)
if ordered is True:
grains = __grains__
else:
grains = json.loads(json.dumps(__grains__))
return salt.utils.traverse_dict_and_list(__grains__,
key,
default,
delimiter)
def has_value(key):
'''
Determine whether a named value exists in the grains dictionary.
Given a grains dictionary that contains the following structure::
{'pkg': {'apache': 'httpd'}}
One would determine if the apache key in the pkg dict exists by::
pkg:apache
CLI Example:
.. code-block:: bash
salt '*' grains.has_value pkg:apache
'''
return True if salt.utils.traverse_dict_and_list(__grains__, key, False) else False
def items(sanitize=False):

View file

@ -528,6 +528,56 @@ def show_sls(mods, saltenv='base', test=None, **kwargs):
return high_data
def show_low_sls(mods, saltenv='base', test=None, env=None, **kwargs):
'''
Display the low state data from a specific sls or list of sls files on the
master
CLI Example:
.. code-block:: bash
salt '*' state.show_sls core,edit.vim dev
'''
__pillar__.update(kwargs.get('pillar', {}))
__opts__['grains'] = __grains__
if env is not None:
salt.utils.warn_until(
'Carbon',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Carbon.'
)
# Backwards compatibility
saltenv = env
opts = copy.copy(__opts__)
if salt.utils.test_mode(test=test, **kwargs):
opts['test'] = True
else:
opts['test'] = __opts__.get('test', None)
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__['fileclient'])
if isinstance(mods, string_types):
mods = mods.split(',')
high_data, errors = st_.render_highstate({saltenv: mods})
high_data, ext_errors = st_.state.reconcile_extend(high_data)
errors += ext_errors
errors += st_.state.verify_high(high_data)
if errors:
return errors
high_data, req_in_errors = st_.state.requisite_in(high_data)
errors += req_in_errors
high_data = st_.state.apply_exclude(high_data)
# Verify that the high data is structurally sound
if errors:
return errors
ret = st_.state.compile_high_data(high_data)
return ret
def show_top():
'''
Return the top data that the minion will use for a highstate

View file

@ -41,6 +41,14 @@ import salt.exceptions
from salt.utils.locales import sdecode
import salt.defaults.exitcodes
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
import platform
import salt.grains.core
log = logging.getLogger(__name__)
_DFLT_LOG_DATEFMT = '%H:%M:%S'
@ -60,6 +68,31 @@ else:
_DFLT_IPC_MODE = 'ipc'
_MASTER_TRIES = 1
def _gather_buffer_space():
'''
Gather some system data and then calculate
buffer space.
Result is in bytes.
'''
if HAS_PSUTIL:
# Oh good, we have psutil. This will be quick.
total_mem = psutil.virtual_memory().total
else:
# We need to load up ``mem_total`` grain. Let's mimic required OS data.
os_data = {'kernel': platform.system()}
grains = salt.grains.core._memdata(os_data)
total_mem = grains['mem_total']
# Return the higher number between 5% of the system memory and 100MB
return max([total_mem * 0.05, 10 << 20])
# For the time being this will be a fixed calculation
# TODO: Allow user configuration
_DFLT_IPC_WBUFFER = _gather_buffer_space() * .5
# TODO: Reserved for future use
_DFLT_IPC_RBUFFER = _gather_buffer_space() * .5
FLO_DIR = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'daemons', 'flo')
@ -451,6 +484,10 @@ VALID_OPTS = {
# ZMQ HWM for EventPublisher pub socket
'event_publisher_pub_hwm': int,
# IPC buffer size
# Refs https://github.com/saltstack/salt/issues/34215
'ipc_write_buffer': int,
# The number of MWorker processes for a master to startup. This number needs to scale up as
# the number of connected minions increases.
'worker_threads': int,
@ -965,6 +1002,7 @@ DEFAULT_MINION_OPTS = {
'mine_return_job': False,
'mine_interval': 60,
'ipc_mode': _DFLT_IPC_MODE,
'ipc_write_buffer': _DFLT_IPC_WBUFFER,
'ipv6': False,
'file_buffer_size': 262144,
'tcp_pub_port': 4510,
@ -1222,6 +1260,7 @@ DEFAULT_MASTER_OPTS = {
'minion_data_cache': True,
'enforce_mine_cache': False,
'ipc_mode': _DFLT_IPC_MODE,
'ipc_write_buffer': _DFLT_IPC_WBUFFER,
'ipv6': False,
'tcp_master_pub_port': 4512,
'tcp_master_pull_port': 4513,
@ -3009,6 +3048,11 @@ def apply_minion_config(overrides=None,
if 'beacons' not in opts:
opts['beacons'] = {}
if overrides.get('ipc_write_buffer', '') == 'dynamic':
opts['ipc_write_buffer'] = _DFLT_IPC_WBUFFER
if 'ipc_write_buffer' not in overrides:
opts['ipc_write_buffer'] = 0
# if there is no schedule option yet, add an empty scheduler
if 'schedule' not in opts:
opts['schedule'] = {}
@ -3083,7 +3127,10 @@ def apply_master_config(overrides=None, defaults=None):
)
opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens')
opts['syndic_dir'] = os.path.join(opts['cachedir'], 'syndics')
if overrides.get('ipc_write_buffer', '') == 'dynamic':
opts['ipc_write_buffer'] = _DFLT_IPC_WBUFFER
if 'ipc_write_buffer' not in overrides:
opts['ipc_write_buffer'] = 0
using_ip_for_id = False
append_master = False
if not opts.get('id'):

View file

@ -646,7 +646,6 @@ def create(zpool, *vdevs, **kwargs):
salt '*' zpool.create myzpool /path/to/vdev1 [...] properties="{'property1': 'value1', 'property2': 'value2'}"
'''
ret = {}
dlist = []
# Check if the pool_name is already being used
if exists(zpool):
@ -657,23 +656,7 @@ def create(zpool, *vdevs, **kwargs):
ret[zpool] = 'no devices specified'
return ret
# make sure files are present on filesystem
ret[zpool] = {}
for vdev in vdevs:
if vdev not in ['mirror', 'log', 'cache', 'raidz1', 'raidz2', 'raidz3', 'spare']:
if not os.path.exists(vdev):
ret[zpool][vdev] = 'not present on filesystem'
continue
mode = os.stat(vdev).st_mode
if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode) and not stat.S_ISCHR(mode):
ret[zpool][vdev] = 'not a block device, a file vdev or character special device'
continue
dlist.append(vdev)
if len(ret[zpool]) > 0:
return ret
devs = ' '.join(dlist)
devs = ' '.join(vdevs)
zpool_cmd = _check_zpool()
force = kwargs.get('force', False)
altroot = kwargs.get('altroot', None)
@ -688,10 +671,13 @@ def create(zpool, *vdevs, **kwargs):
if properties: # create "-o property=value" pairs
optlist = []
for prop in properties:
if ' ' in properties[prop]:
value = "'{0}'".format(properties[prop])
if isinstance(properties[prop], bool):
value = 'on' if properties[prop] else 'off'
else:
value = properties[prop]
if ' ' in properties[prop]:
value = "'{0}'".format(properties[prop])
else:
value = properties[prop]
optlist.append('-o {0}={1}'.format(prop, value))
opts = ' '.join(optlist)
cmd = '{0} {1}'.format(cmd, opts)
@ -718,7 +704,7 @@ def create(zpool, *vdevs, **kwargs):
if res['retcode'] != 0:
ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout']
else:
ret[zpool] = 'created'
ret[zpool] = 'created with {0}'.format(devs)
return ret
@ -743,7 +729,6 @@ def add(zpool, *vdevs, **kwargs):
salt '*' zpool.add myzpool /path/to/vdev1 /path/to/vdev2 [...]
'''
ret = {}
dlist = []
# check for pool
if not exists(zpool):
@ -755,24 +740,7 @@ def add(zpool, *vdevs, **kwargs):
return ret
force = kwargs.get('force', False)
# make sure files are present on filesystem
ret[zpool] = {}
for vdev in vdevs:
if vdev not in ['mirror', 'log', 'cache', 'raidz1', 'raidz2', 'raidz3', 'spare']:
if not os.path.exists(vdev):
ret[zpool][vdev] = 'not present on filesystem'
continue
mode = os.stat(vdev).st_mode
if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode):
ret[zpool][vdev] = 'not a block device, a file vdev or character special device'
continue
dlist.append(vdev)
if len(ret[zpool]) > 0:
return ret
devs = ' '.join(dlist)
devs = ' '.join(vdevs)
# try and add watch out for mismatched replication levels
zpool_cmd = _check_zpool()
@ -786,10 +754,7 @@ def add(zpool, *vdevs, **kwargs):
if res['retcode'] != 0:
ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout']
else:
ret[zpool] = {}
for device in dlist:
if device not in ['mirror', 'log', 'cache', 'raidz1', 'raidz2', 'raidz3', 'spare']:
ret[zpool][device] = 'added'
ret[zpool] = 'added {0}'.format(devs)
return ret
@ -970,8 +935,7 @@ def replace(zpool, old_device, new_device=None, force=False):
if res['retcode'] != 0:
ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout']
else:
ret[zpool] = {}
ret[zpool][old_device] = 'replaced with {0}'.format(new_device)
ret[zpool] = 'replaced {0} with {1}'.format(old_device, new_device)
return ret
@ -1207,22 +1171,7 @@ def online(zpool, *vdevs, **kwargs):
# get expand option
expand = kwargs.get('expand', False)
# make sure files are present on filesystem
ret[zpool] = {}
for vdev in vdevs:
if not os.path.exists(vdev):
ret[zpool][vdev] = 'not present on filesystem'
continue
mode = os.stat(vdev).st_mode
if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode):
ret[zpool][vdev] = 'not a block device, a file vdev or character special device'
continue
dlist.append(vdev)
if len(ret[zpool]) > 0:
return ret
devs = ' '.join(dlist)
devs = ' '.join(vdevs)
zpool_cmd = _check_zpool()
cmd = '{zpool_cmd} online {expand}{zpool} {devs}'.format(
zpool_cmd=zpool_cmd,
@ -1235,10 +1184,7 @@ def online(zpool, *vdevs, **kwargs):
if res['retcode'] != 0:
ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout']
else:
ret[zpool] = {}
for device in dlist:
if device not in ['mirror', 'log', 'cache', 'raidz1', 'raidz2', 'raidz3', 'spare']:
ret[zpool][device] = 'onlined'
ret[zpool] = 'onlined {0}'.format(devs)
return ret
@ -1294,10 +1240,7 @@ def offline(zpool, *vdevs, **kwargs):
if res['retcode'] != 0:
ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout']
else:
ret[zpool] = {}
for device in vdevs:
if device not in ['mirror', 'log', 'cache', 'raidz1', 'raidz2', 'raidz3', 'spare']:
ret[zpool][device] = 'offlined'
ret[zpool] = 'offlined {0}'.format(devs)
return ret

View file

@ -51,6 +51,7 @@ import salt.utils.yamlloader as yamlloader
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import map, range, reload_module
from salt.ext.six import string_types
# pylint: enable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
@ -523,6 +524,8 @@ class Compiler(object):
if not isinstance(chunk['order'], (int, float)):
if chunk['order'] == 'last':
chunk['order'] = cap + 1000000
elif chunk['order'] == 'first':
chunk['order'] = 0
else:
chunk['order'] = cap
if 'name_order' in chunk:
@ -788,6 +791,9 @@ class State(object):
else:
low_data_onlyif = low_data['onlyif']
for entry in low_data_onlyif:
if not isinstance(entry, string_types):
ret.update({'comment': 'onlyif execution failed, bad type passed', 'result': False})
return ret
cmd = self.functions['cmd.retcode'](
entry, ignore_retcode=True, python_shell=True, **cmd_opts)
log.debug('Last command return code: {0}'.format(cmd))
@ -806,6 +812,9 @@ class State(object):
else:
low_data_unless = low_data['unless']
for entry in low_data_unless:
if not isinstance(entry, string_types):
ret.update({'comment': 'unless execution failed, bad type passed', 'result': False})
return ret
cmd = self.functions['cmd.retcode'](
entry, ignore_retcode=True, python_shell=True, **cmd_opts)
log.debug('Last command return code: {0}'.format(cmd))
@ -1240,6 +1249,8 @@ class State(object):
if not isinstance(chunk['order'], (int, float)):
if chunk['order'] == 'last':
chunk['order'] = cap + 1000000
elif chunk['order'] == 'first':
chunk['order'] = 0
else:
chunk['order'] = cap
if 'name_order' in chunk:

View file

@ -30,6 +30,17 @@ Management zpool
/dev/disk2
/dev/disk3
simplepool:
zpool.present:
- config:
import: false
force: true
- properties:
comment: another salty storage pool
- layout:
- /dev/disk0
- /dev/disk1
.. warning::
The layout will never be updated, it will only be used at time of creation.
@ -42,7 +53,6 @@ from __future__ import absolute_import
# Import Python libs
import os
import stat
import logging
# Import Salt libs
@ -69,23 +79,6 @@ def __virtual__():
)
def _check_device(device, config):
'''
Check if device is present
'''
if '/' not in device and config['device_dir'] and os.path.exists(config['device_dir']):
device = os.path.join(config['device_dir'], device)
if not os.path.exists(device):
return False, 'not present on filesystem'
else:
mode = os.stat(device).st_mode
if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode) and not stat.S_ISCHR(mode):
return False, 'not a block device, a file vdev or character special device'
return True, ''
def present(name, properties=None, filesystem_properties=None, layout=None, config=None):
'''
ensure storage pool is present on the system
@ -159,39 +152,22 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf
config = {
'import': True,
'import_dirs': None,
'device_dir': None if __grains__['kernel'] != 'SunOS' else '/dev/rdsk',
'device_dir': None,
'force': False
}
if __grains__['kernel'] == 'SunOS':
config['device_dir'] = '/dev/rdsk'
elif __grains__['kernel'] == 'Linux':
config['device_dir'] = '/dev'
config.update(state_config)
log.debug('zpool.present::{0}::config - {1}'.format(name, config))
# validate layout
# parse layout
if layout:
layout_valid = True
layout_result = {}
for root_dev in layout:
if '-' in root_dev:
if root_dev.split('-')[0] not in ['mirror', 'log', 'cache', 'raidz1', 'raidz2', 'raidz3', 'spare']:
layout_valid = False
layout_result[root_dev] = 'not a valid vdev type'
layout[root_dev] = layout[root_dev].keys() if isinstance(layout[root_dev], OrderedDict) else layout[root_dev].split(' ')
for dev in layout[root_dev]:
dev_info = _check_device(dev, config)
if not dev_info[0]:
layout_valid = False
layout_result[root_dev] = {}
layout_result[root_dev][dev] = dev_info[1]
else:
dev_info = _check_device(root_dev, config)
if not dev_info[0]:
layout_valid = False
layout_result[root_dev] = dev_info[1]
if not layout_valid:
ret['result'] = False
ret['comment'] = "{0}".format(layout_result)
return ret
if '-' not in root_dev:
continue
layout[root_dev] = layout[root_dev].keys() if isinstance(layout[root_dev], OrderedDict) else layout[root_dev].split(' ')
log.debug('zpool.present::{0}::layout - {1}'.format(name, layout))
@ -267,12 +243,13 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf
params.append(name)
for root_dev in layout:
if '-' in root_dev: # special device
params.append(root_dev.split('-')[0]) # add the type by stripping the ID
if root_dev.split('-')[0] in ['mirror', 'log', 'cache', 'raidz1', 'raidz2', 'raidz3', 'spare']:
for sub_dev in layout[root_dev]: # add all sub devices
if '/' not in sub_dev and config['device_dir'] and os.path.exists(config['device_dir']):
sub_dev = os.path.join(config['device_dir'], sub_dev)
params.append(sub_dev)
# NOTE: accomidate non existing 'disk' vdev
if root_dev.split('-')[0] != 'disk':
params.append(root_dev.split('-')[0]) # add the type by stripping the ID
for sub_dev in layout[root_dev]: # add all sub devices
if '/' not in sub_dev and config['device_dir'] and os.path.exists(config['device_dir']):
sub_dev = os.path.join(config['device_dir'], sub_dev)
params.append(sub_dev)
else: # normal device
if '/' not in root_dev and config['device_dir'] and os.path.exists(config['device_dir']):
root_dev = os.path.join(config['device_dir'], root_dev)
@ -280,11 +257,11 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf
# execute zpool.create
ret['result'] = __salt__['zpool.create'](*params, force=config['force'], properties=properties, filesystem_properties=filesystem_properties)
if ret['result'].get(name) == 'created':
if ret['result'].get(name).startswith('created'):
ret['result'] = True
else:
if ret['result'].get(name):
ret['comment'] = ret['result'][name]
ret['comment'] = ret['result'].get(name)
ret['result'] = False
if ret['result']:

View file

@ -440,9 +440,10 @@ class IPCMessagePublisher(object):
A Tornado IPC Publisher similar to Tornado's TCPServer class
but using either UNIX domain sockets or TCP sockets
'''
def __init__(self, socket_path, io_loop=None):
def __init__(self, opts, socket_path, io_loop=None):
'''
Create a new Tornado IPC server
:param dict opts: Salt options
:param str/int socket_path: Path on the filesystem for the
socket to bind to. This socket does
not need to exist prior to calling
@ -453,6 +454,7 @@ class IPCMessagePublisher(object):
for a tcp localhost connection.
:param IOLoop io_loop: A Tornado ioloop to handle scheduling
'''
self.opts = opts
self.socket_path = socket_path
self._started = False
@ -515,10 +517,18 @@ class IPCMessagePublisher(object):
def handle_connection(self, connection, address):
log.trace('IPCServer: Handling connection to address: {0}'.format(address))
try:
stream = IOStream(
connection,
io_loop=self.io_loop,
)
if self.opts['ipc_write_buffer'] > 0:
log.trace('Setting IPC connection write buffer: {0}'.format((self.opts['ipc_write_buffer'])))
stream = IOStream(
connection,
io_loop=self.io_loop,
max_write_buffer_size=self.opts['ipc_write_buffer']
)
else:
stream = IOStream(
connection,
io_loop=self.io_loop
)
self.streams.add(stream)
except Exception as exc:
log.error('IPC streaming error: {0}'.format(exc))

View file

@ -882,6 +882,7 @@ class AsyncEventPublisher(object):
raise
self.publisher = salt.transport.ipc.IPCMessagePublisher(
self.opts,
epub_uri,
io_loop=self.io_loop
)
@ -969,6 +970,7 @@ class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess):
)
self.publisher = salt.transport.ipc.IPCMessagePublisher(
self.opts,
epub_uri,
io_loop=self.io_loop
)

View file

@ -17,6 +17,7 @@ ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
import salt.ext.six as six
from salt.modules import mysql as mysqlmod
@ -28,6 +29,9 @@ try:
except ImportError:
NO_MYSQL = True
if not salt.utils.which('mysqladmin'):
NO_MYSQL = True
@skipIf(
NO_MYSQL,

View file

@ -78,6 +78,7 @@ class ExtendedTestCase(TestCase):
self.assertEqual(exc.message, exc_msg)
@skipIf(True, 'Test mock token is not properly mocked and occassionally causes the test suite to hang.')
@skipIf(not HAS_CERTS, 'Cannot find CA cert bundle')
@skipIf(NO_MOCK, NO_MOCK_REASON)
@patch('salt.cloud.clouds.gce.__virtual__', MagicMock(return_value='gce'))