Merge branch '2016.11' into 'nitrogen'

No conflicts.
This commit is contained in:
rallytime 2017-04-18 16:42:30 -06:00
commit 6493880bde
8 changed files with 62 additions and 12 deletions

View file

@ -193,6 +193,15 @@
# The wait-time will be a random number of seconds between 0 and the defined value.
#random_reauth_delay: 60
# To avoid overloading a master when many minions startup at once, a randomized
# delay may be set to tell the minions to wait before connecting to the master.
# This value is the number of seconds to choose from for a random number. For
# example, setting this value to 60 will choose a random number of seconds to delay
# on startup between zero seconds and sixty seconds. Setting to '0' will disable
# this feature.
#random_startup_delay: 0
# When waiting for a master to accept the minion's public key, salt will
# continuously attempt to reconnect until successful. This is the timeout value,
# in seconds, for each individual attempt. After this timeout expires, the minion

View file

@ -805,6 +805,24 @@ restart.
.. conf_minion:: recon_default
``random_startup_delay``
------------------------
Default: ``0``
The maximum bound for an interval in which a minion will randomly sleep upon starting
up prior to attempting to connect to a master. This can be used to splay connection attempts
for cases where many minions starting up at once may place undue load on a master.
For example, setting this to ``5`` will tell a minion to sleep for a value between ``0``
and ``5`` seconds.
.. code-block:: yaml
random_startup_delay: 5
.. conf_minion:: random_startup_delay
``recon_default``
-----------------

View file

@ -786,6 +786,12 @@ VALID_OPTS = {
# The logfile location for salt-key
'key_logfile': str,
# The upper bound for the random number of seconds that a minion should
# delay when starting in up before it connects to a master. This can be
# used to mitigate a thundering-herd scenario when many minions start up
# at once and attempt to all connect immediately to the master
'random_startup_delay': int,
# The source location for the winrepo sls files
# (used by win_pkg.py, minion only)
'winrepo_source_dir': str,
@ -1068,6 +1074,7 @@ DEFAULT_MINION_OPTS = {
'renderer': 'yaml_jinja',
'renderer_whitelist': [],
'renderer_blacklist': [],
'random_startup_delay': 0,
'failhard': False,
'autoload_dynamic_modules': True,
'environment': None,

View file

@ -11,6 +11,7 @@ import copy
import time
import types
import signal
import random
import fnmatch
import logging
import threading
@ -968,6 +969,14 @@ class Minion(MinionBase):
self.opts['grains'] = salt.loader.grains(opts)
log.info('Creating minion process manager')
if self.opts['random_startup_delay']:
sleep_time = random.randint(0, self.opts['random_startup_delay'])
log.info('Minion sleeping for {0} seconds due to configured '
'startup_delay between 0 and {1} seconds'.format(sleep_time,
self.opts['random_startup_delay']))
time.sleep(sleep_time)
self.process_manager = ProcessManager(name='MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
# We don't have the proxy setup yet, so we can't start engines

View file

@ -18,6 +18,7 @@ except ImportError:
HAS_SALTCLOUD = False
import salt.utils
from salt.exceptions import SaltCloudConfigError
# Import 3rd-party libs
import salt.ext.six as six
@ -250,7 +251,12 @@ def action(
salt minionname cloud.action show_image provider=my-ec2-config image=ami-1624987f
'''
client = _get_client()
info = client.action(fun, cloudmap, names, provider, instance, kwargs)
try:
info = client.action(fun, cloudmap, names, provider, instance, kwargs)
except SaltCloudConfigError as err:
log.error(err)
return None
return info

View file

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
'''
Manage users with the useradd command
Manage users with the pw command
.. important::
If you feel that Salt should be using this module to manage users on a
@ -475,7 +475,7 @@ def get_loginclass(name):
userinfo = __salt__['cmd.run_stdout'](['pw', 'usershow', '-n', name])
userinfo = userinfo.split(':')
return {'loginclass': userinfo[4] if len(userinfo) == 10 else ''}
return userinfo[4] if len(userinfo) == 10 else ''
def list_groups(name):

View file

@ -14,6 +14,7 @@ import os
# Import Salt libs
import salt.cloud
from salt.exceptions import SaltCloudConfigError
# Get logging started
log = logging.getLogger(__name__)
@ -150,8 +151,12 @@ def action(func=None,
salt-run cloud.action start my-salt-vm
'''
info = {}
client = _get_client()
info = client.action(func, cloudmap, instances, provider, instance, **_filter_kwargs(kwargs))
try:
info = client.action(func, cloudmap, instances, provider, instance, **_filter_kwargs(kwargs))
except SaltCloudConfigError as err:
log.error(err)
return info

View file

@ -63,8 +63,7 @@ class MinionTestCase(TestCase):
return None BEFORE any of the processes are spun up because we should be avoiding firing duplicate
jobs.
'''
mock_opts = {'cachedir': '',
'extension_modules': ''}
mock_opts = salt.config.DEFAULT_MINION_OPTS
mock_data = {'fun': 'foo.bar',
'jid': 123}
mock_jid_queue = [123]
@ -85,9 +84,7 @@ class MinionTestCase(TestCase):
jid isn't already present in the jid_queue.
'''
mock_jid = 11111
mock_opts = {'cachedir': '',
'extension_modules': '',
'minion_jid_queue_hwm': 100}
mock_opts = salt.config.DEFAULT_MINION_OPTS
mock_data = {'fun': 'foo.bar',
'jid': mock_jid}
mock_jid_queue = [123, 456]
@ -115,9 +112,8 @@ class MinionTestCase(TestCase):
Tests that the _handle_decoded_payload function removes a jid from the minion's jid_queue when the
minion's jid_queue high water mark (minion_jid_queue_hwm) is hit.
'''
mock_opts = {'cachedir': '',
'extension_modules': '',
'minion_jid_queue_hwm': 2}
mock_opts = salt.config.DEFAULT_MINION_OPTS
mock_opts['minion_jid_queue_hwm'] = 2
mock_data = {'fun': 'foo.bar',
'jid': 789}
mock_jid_queue = [123, 456]