mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge remote-tracking branch 'upstream/2017.7' into merge-develop
This commit is contained in:
commit
9a50e7c1ca
23 changed files with 1188 additions and 505 deletions
2
doc/_themes/saltstack2/layout.html
vendored
2
doc/_themes/saltstack2/layout.html
vendored
|
@ -255,8 +255,8 @@
|
|||
|
||||
<div class="col-sm-6">
|
||||
|
||||
<a href="https://saltstack.com/support" target="_blank"><img class="nolightbox footer-banner center" src="{{ pathto('_static/images/footer-support.png', 1) }}"/></a>
|
||||
<a href="https://saltstack.com/saltstack-enterprise/" target="_blank"><img class="nolightbox footer-banner center" src="{{ pathto('_static/images/enterprise_ad.jpg', 1) }}"/></a>
|
||||
<a href="http://saltconf.com" target="_blank"><img class="nolightbox footer-banner center" src="{{ pathto('_static/images/DOCBANNER.jpg', 1) }}"/></a>
|
||||
|
||||
|
||||
</div>
|
||||
|
|
BIN
doc/_themes/saltstack2/static/images/DOCBANNER.jpg
vendored
Normal file
BIN
doc/_themes/saltstack2/static/images/DOCBANNER.jpg
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 790 KiB |
|
@ -77,6 +77,7 @@ MOCK_MODULES = [
|
|||
'yaml.nodes',
|
||||
'yaml.parser',
|
||||
'yaml.scanner',
|
||||
'salt.utils.yamlloader',
|
||||
'zmq',
|
||||
'zmq.eventloop',
|
||||
'zmq.eventloop.ioloop',
|
||||
|
@ -125,6 +126,7 @@ MOCK_MODULES = [
|
|||
'ClusterShell',
|
||||
'ClusterShell.NodeSet',
|
||||
'django',
|
||||
'docker',
|
||||
'libvirt',
|
||||
'MySQLdb',
|
||||
'MySQLdb.cursors',
|
||||
|
@ -174,7 +176,7 @@ MOCK_MODULES = [
|
|||
|
||||
for mod_name in MOCK_MODULES:
|
||||
if mod_name == 'psutil':
|
||||
mock = Mock(mapping={'total': 0}) # Otherwise it will crash Sphinx
|
||||
mock = Mock(mapping={'total': 0, 'version_info': (0, 6,0)}) # Otherwise it will crash Sphinx
|
||||
else:
|
||||
mock = Mock()
|
||||
sys.modules[mod_name] = mock
|
||||
|
|
|
@ -902,6 +902,8 @@ what you are doing! Transports are explained in :ref:`Salt Transports
|
|||
|
||||
transport: zeromq
|
||||
|
||||
.. conf_master:: transport_opts
|
||||
|
||||
``transport_opts``
|
||||
------------------
|
||||
|
||||
|
@ -920,6 +922,27 @@ what you are doing! Transports are explained in :ref:`Salt Transports
|
|||
ret_port: 4606
|
||||
zeromq: []
|
||||
|
||||
.. conf_master:: sock_pool_size
|
||||
|
||||
``sock_pool_size``
|
||||
------------------
|
||||
|
||||
Default: 1
|
||||
|
||||
To avoid blocking waiting while writing a data to a socket, we support
|
||||
socket pool for Salt applications. For example, a job with a large number
|
||||
of target host list can cause long period blocking waiting. The option
|
||||
is used by ZMQ and TCP transports, and the other transport methods don't
|
||||
need the socket pool by definition. Most of Salt tools, including CLI,
|
||||
are enough to use a single bucket of socket pool. On the other hands,
|
||||
it is highly recommended to set the size of socket pool larger than 1
|
||||
for other Salt applications, especially Salt API, which must write data
|
||||
to socket concurrently.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
sock_pool_size: 15
|
||||
|
||||
|
||||
.. _salt-ssh-configuration:
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -31,6 +31,7 @@ import salt.config
|
|||
import salt.loader
|
||||
import salt.transport.client
|
||||
import salt.utils
|
||||
import salt.utils.files
|
||||
import salt.utils.minions
|
||||
import salt.payload
|
||||
|
||||
|
@ -227,8 +228,9 @@ class LoadAuth(object):
|
|||
tdata['groups'] = load['groups']
|
||||
|
||||
try:
|
||||
with salt.utils.fopen(t_path, 'w+b') as fp_:
|
||||
fp_.write(self.serial.dumps(tdata))
|
||||
with salt.utils.files.set_umask(0o177):
|
||||
with salt.utils.fopen(t_path, 'w+b') as fp_:
|
||||
fp_.write(self.serial.dumps(tdata))
|
||||
except (IOError, OSError):
|
||||
log.warning('Authentication failure: can not write token file "{0}".'.format(t_path))
|
||||
return {}
|
||||
|
@ -666,14 +668,12 @@ class Resolver(object):
|
|||
tdata = self._send_token_request(load)
|
||||
if 'token' not in tdata:
|
||||
return tdata
|
||||
oldmask = os.umask(0o177)
|
||||
try:
|
||||
with salt.utils.fopen(self.opts['token_file'], 'w+') as fp_:
|
||||
fp_.write(tdata['token'])
|
||||
with salt.utils.files.set_umask(0o177):
|
||||
with salt.utils.fopen(self.opts['token_file'], 'w+') as fp_:
|
||||
fp_.write(tdata['token'])
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
finally:
|
||||
os.umask(oldmask)
|
||||
return tdata
|
||||
|
||||
def mk_token(self, load):
|
||||
|
|
|
@ -205,6 +205,9 @@ VALID_OPTS = {
|
|||
# The directory containing unix sockets for things like the event bus
|
||||
'sock_dir': str,
|
||||
|
||||
# The pool size of unix sockets, it is necessary to avoid blocking waiting for zeromq and tcp communications.
|
||||
'sock_pool_size': int,
|
||||
|
||||
# Specifies how the file server should backup files, if enabled. The backups
|
||||
# live in the cache dir.
|
||||
'backup_mode': str,
|
||||
|
@ -1099,6 +1102,7 @@ DEFAULT_MINION_OPTS = {
|
|||
'grains_deep_merge': False,
|
||||
'conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'minion'),
|
||||
'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'minion'),
|
||||
'sock_pool_size': 1,
|
||||
'backup_mode': '',
|
||||
'renderer': 'yaml_jinja',
|
||||
'renderer_whitelist': [],
|
||||
|
@ -1349,6 +1353,7 @@ DEFAULT_MASTER_OPTS = {
|
|||
'user': _MASTER_USER,
|
||||
'worker_threads': 5,
|
||||
'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'master'),
|
||||
'sock_pool_size': 1,
|
||||
'ret_port': 4506,
|
||||
'timeout': 5,
|
||||
'keep_jobs': 24,
|
||||
|
@ -2311,6 +2316,7 @@ def syndic_config(master_config_path,
|
|||
'sock_dir': os.path.join(
|
||||
opts['cachedir'], opts.get('syndic_sock_dir', opts['sock_dir'])
|
||||
),
|
||||
'sock_pool_size': master_opts['sock_pool_size'],
|
||||
'cachedir': master_opts['cachedir'],
|
||||
}
|
||||
opts.update(syndic_opts)
|
||||
|
|
|
@ -1117,6 +1117,7 @@ _OS_NAME_MAP = {
|
|||
'nilrt': 'NILinuxRT',
|
||||
'nilrt-xfce': 'NILinuxRT-XFCE',
|
||||
'manjaro': 'Manjaro',
|
||||
'manjarolin': 'Manjaro',
|
||||
'antergos': 'Antergos',
|
||||
'sles': 'SUSE',
|
||||
'void': 'Void',
|
||||
|
|
|
@ -223,21 +223,57 @@ different grain matches.
|
|||
|
||||
class Samba(Map):
|
||||
merge = 'samba:lookup'
|
||||
# NOTE: priority is new to 2017.7.0
|
||||
priority = ('os_family', 'os')
|
||||
|
||||
class Ubuntu:
|
||||
__grain__ = 'os'
|
||||
service = 'smbd'
|
||||
|
||||
class Debian:
|
||||
server = 'samba'
|
||||
client = 'samba-client'
|
||||
service = 'samba'
|
||||
|
||||
class Ubuntu:
|
||||
__grain__ = 'os'
|
||||
service = 'smbd'
|
||||
|
||||
class RedHat:
|
||||
class RHEL:
|
||||
__match__ = 'RedHat'
|
||||
server = 'samba'
|
||||
client = 'samba'
|
||||
service = 'smb'
|
||||
|
||||
.. note::
|
||||
By default, the ``os_family`` grain will be used as the target for
|
||||
matching. This can be overridden by specifying a ``__grain__`` attribute.
|
||||
|
||||
If a ``__match__`` attribute is defined for a given class, then that value
|
||||
will be matched against the targeted grain, otherwise the class name's
|
||||
value will be be matched.
|
||||
|
||||
Given the above example, the following is true:
|
||||
|
||||
1. Minions with an ``os_family`` of **Debian** will be assigned the
|
||||
attributes defined in the **Debian** class.
|
||||
2. Minions with an ``os`` grain of **Ubuntu** will be assigned the
|
||||
attributes defined in the **Ubuntu** class.
|
||||
3. Minions with an ``os_family`` grain of **RedHat** will be assigned the
|
||||
attributes defined in the **RHEL** class.
|
||||
|
||||
That said, sometimes a minion may match more than one class. For instance,
|
||||
in the above example, Ubuntu minions will match both the **Debian** and
|
||||
**Ubuntu** classes, since Ubuntu has an ``os_family`` grain of **Debian**
|
||||
an an ``os`` grain of **Ubuntu**. As of the 2017.7.0 release, the order is
|
||||
dictated by the order of declaration, with classes defined later overriding
|
||||
earlier ones. Addtionally, 2017.7.0 adds support for explicitly defining
|
||||
the ordering using an optional attribute called ``priority``.
|
||||
|
||||
Given the above example, ``os_family`` matches will be processed first,
|
||||
with ``os`` matches processed after. This would have the effect of
|
||||
assigning ``smbd`` as the ``service`` attribute on Ubuntu minions. If the
|
||||
``priority`` item was not defined, or if the order of the items in the
|
||||
``priority`` tuple were reversed, Ubuntu minions would have a ``service``
|
||||
attribute of ``samba``, since ``os_family`` matches would have been
|
||||
processed second.
|
||||
|
||||
To use this new data you can import it into your state file and then access
|
||||
your attributes. To access the data in the map you simply access the attribute
|
||||
name on the base class that is extending Map. Assuming the above Map was in the
|
||||
|
|
|
@ -761,7 +761,7 @@ class State(object):
|
|||
agg_opt = low['aggregate']
|
||||
if agg_opt is True:
|
||||
agg_opt = [low['state']]
|
||||
else:
|
||||
elif not isinstance(agg_opt, list):
|
||||
return low
|
||||
if low['state'] in agg_opt and not low.get('__agg__'):
|
||||
agg_fun = '{0}.mod_aggregate'.format(low['state'])
|
||||
|
|
|
@ -375,7 +375,7 @@ def extracted(name,
|
|||
|
||||
.. versionadded:: 2016.11.0
|
||||
|
||||
source_hash_update
|
||||
source_hash_update : False
|
||||
Set this to ``True`` if archive should be extracted if source_hash has
|
||||
changed. This would extract regardless of the ``if_missing`` parameter.
|
||||
|
||||
|
@ -844,10 +844,10 @@ def extracted(name,
|
|||
if source_hash:
|
||||
try:
|
||||
source_sum = __salt__['file.get_source_sum'](
|
||||
source=source_match,
|
||||
source_hash=source_hash,
|
||||
source_hash_name=source_hash_name,
|
||||
saltenv=__env__)
|
||||
source=source_match,
|
||||
source_hash=source_hash,
|
||||
source_hash_name=source_hash_name,
|
||||
saltenv=__env__)
|
||||
except CommandExecutionError as exc:
|
||||
ret['comment'] = exc.strerror
|
||||
return ret
|
||||
|
@ -868,7 +868,7 @@ def extracted(name,
|
|||
# Prevent a traceback from attempting to read from a directory path
|
||||
salt.utils.rm_rf(cached_source)
|
||||
|
||||
existing_cached_source_sum = _read_cached_checksum(cached_source) \
|
||||
existing_cached_source_sum = _read_cached_checksum(cached_source)
|
||||
|
||||
if source_is_local:
|
||||
# No need to download archive, it's local to the minion
|
||||
|
@ -935,15 +935,16 @@ def extracted(name,
|
|||
)
|
||||
return file_result
|
||||
|
||||
if source_hash:
|
||||
_update_checksum(cached_source)
|
||||
|
||||
else:
|
||||
log.debug(
|
||||
'Archive %s is already in cache',
|
||||
salt.utils.url.redact_http_basic_auth(source_match)
|
||||
)
|
||||
|
||||
if source_hash and source_hash_update and not skip_verify:
|
||||
# Create local hash sum file if we're going to track sum update
|
||||
_update_checksum(cached_source)
|
||||
|
||||
if archive_format == 'zip' and not password:
|
||||
log.debug('Checking %s to see if it is password-protected',
|
||||
source_match)
|
||||
|
@ -1147,6 +1148,15 @@ def extracted(name,
|
|||
created_destdir = False
|
||||
|
||||
if extraction_needed:
|
||||
if source_is_local and source_hash and not skip_verify:
|
||||
ret['result'] = __salt__['file.check_hash'](source_match, source_sum['hsum'])
|
||||
if not ret['result']:
|
||||
ret['comment'] = \
|
||||
'{0} does not match the desired source_hash {1}'.format(
|
||||
source_match, source_sum['hsum']
|
||||
)
|
||||
return ret
|
||||
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = \
|
||||
|
|
|
@ -66,17 +66,17 @@ def installed(name,
|
|||
'''
|
||||
Verify that the correct versions of composer dependencies are present.
|
||||
|
||||
dir
|
||||
Directory location of the composer.json file.
|
||||
name
|
||||
Directory location of the ``composer.json`` file.
|
||||
|
||||
composer
|
||||
Location of the composer.phar file. If not set composer will
|
||||
just execute "composer" as if it is installed globally.
|
||||
(i.e. /path/to/composer.phar)
|
||||
Location of the ``composer.phar`` file. If not set composer will
|
||||
just execute ``composer`` as if it is installed globally.
|
||||
(i.e. ``/path/to/composer.phar``)
|
||||
|
||||
php
|
||||
Location of the php executable to use with composer.
|
||||
(i.e. /usr/bin/php)
|
||||
(i.e. ``/usr/bin/php``)
|
||||
|
||||
user
|
||||
Which system user to run composer as.
|
||||
|
@ -84,32 +84,32 @@ def installed(name,
|
|||
.. versionadded:: 2014.1.4
|
||||
|
||||
prefer_source
|
||||
--prefer-source option of composer.
|
||||
``--prefer-source`` option of composer.
|
||||
|
||||
prefer_dist
|
||||
--prefer-dist option of composer.
|
||||
``--prefer-dist`` option of composer.
|
||||
|
||||
no_scripts
|
||||
--no-scripts option of composer.
|
||||
``--no-scripts`` option of composer.
|
||||
|
||||
no_plugins
|
||||
--no-plugins option of composer.
|
||||
``--no-plugins`` option of composer.
|
||||
|
||||
optimize
|
||||
--optimize-autoloader option of composer. Recommended for production.
|
||||
``--optimize-autoloader`` option of composer. Recommended for production.
|
||||
|
||||
no_dev
|
||||
--no-dev option for composer. Recommended for production.
|
||||
``--no-dev`` option for composer. Recommended for production.
|
||||
|
||||
quiet
|
||||
--quiet option for composer. Whether or not to return output from composer.
|
||||
``--quiet`` option for composer. Whether or not to return output from composer.
|
||||
|
||||
composer_home
|
||||
$COMPOSER_HOME environment variable
|
||||
``$COMPOSER_HOME`` environment variable
|
||||
|
||||
always_check
|
||||
If True, _always_ run `composer install` in the directory. This is the
|
||||
default behavior. If False, only run `composer install` if there is no
|
||||
If ``True``, *always* run ``composer install`` in the directory. This is the
|
||||
default behavior. If ``False``, only run ``composer install`` if there is no
|
||||
vendor directory present.
|
||||
'''
|
||||
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
|
||||
|
@ -193,17 +193,17 @@ def update(name,
|
|||
Composer update the directory to ensure we have the latest versions
|
||||
of all project dependencies.
|
||||
|
||||
dir
|
||||
Directory location of the composer.json file.
|
||||
name
|
||||
Directory location of the ``composer.json`` file.
|
||||
|
||||
composer
|
||||
Location of the composer.phar file. If not set composer will
|
||||
just execute "composer" as if it is installed globally.
|
||||
Location of the ``composer.phar`` file. If not set composer will
|
||||
just execute ``composer`` as if it is installed globally.
|
||||
(i.e. /path/to/composer.phar)
|
||||
|
||||
php
|
||||
Location of the php executable to use with composer.
|
||||
(i.e. /usr/bin/php)
|
||||
(i.e. ``/usr/bin/php``)
|
||||
|
||||
user
|
||||
Which system user to run composer as.
|
||||
|
@ -211,28 +211,28 @@ def update(name,
|
|||
.. versionadded:: 2014.1.4
|
||||
|
||||
prefer_source
|
||||
--prefer-source option of composer.
|
||||
``--prefer-source`` option of composer.
|
||||
|
||||
prefer_dist
|
||||
--prefer-dist option of composer.
|
||||
``--prefer-dist`` option of composer.
|
||||
|
||||
no_scripts
|
||||
--no-scripts option of composer.
|
||||
``--no-scripts`` option of composer.
|
||||
|
||||
no_plugins
|
||||
--no-plugins option of composer.
|
||||
``--no-plugins`` option of composer.
|
||||
|
||||
optimize
|
||||
--optimize-autoloader option of composer. Recommended for production.
|
||||
``--optimize-autoloader`` option of composer. Recommended for production.
|
||||
|
||||
no_dev
|
||||
--no-dev option for composer. Recommended for production.
|
||||
``--no-dev`` option for composer. Recommended for production.
|
||||
|
||||
quiet
|
||||
--quiet option for composer. Whether or not to return output from composer.
|
||||
``--quiet`` option for composer. Whether or not to return output from composer.
|
||||
|
||||
composer_home
|
||||
$COMPOSER_HOME environment variable
|
||||
``$COMPOSER_HOME`` environment variable
|
||||
'''
|
||||
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
|
||||
|
||||
|
|
|
@ -3,9 +3,13 @@
|
|||
Encapsulate the different transports available to Salt.
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Import third party libs
|
||||
import salt.ext.six as six
|
||||
from salt.ext.six.moves import range
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def iter_transport_opts(opts):
|
||||
|
@ -47,3 +51,19 @@ class Channel(object):
|
|||
# salt.transport.channel.Channel.factory()
|
||||
from salt.transport.client import ReqChannel
|
||||
return ReqChannel.factory(opts, **kwargs)
|
||||
|
||||
|
||||
class MessageClientPool(object):
|
||||
def __init__(self, tgt, opts, args=None, kwargs=None):
|
||||
sock_pool_size = opts['sock_pool_size'] if 'sock_pool_size' in opts else 1
|
||||
if sock_pool_size < 1:
|
||||
log.warn('sock_pool_size is not correctly set, \
|
||||
the option should be greater than 0 but, {0}'.format(sock_pool_size))
|
||||
sock_pool_size = 1
|
||||
|
||||
if args is None:
|
||||
args = ()
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
|
||||
self.message_clients = [tgt(*args, **kwargs) for _ in range(sock_pool_size)]
|
||||
|
|
|
@ -267,9 +267,9 @@ class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
|
|||
host, port = parse.netloc.rsplit(':', 1)
|
||||
self.master_addr = (host, int(port))
|
||||
self._closing = False
|
||||
self.message_client = SaltMessageClient(
|
||||
self.opts, host, int(port), io_loop=self.io_loop,
|
||||
resolver=resolver)
|
||||
self.message_client = SaltMessageClientPool(self.opts,
|
||||
args=(self.opts, host, int(port),),
|
||||
kwargs={'io_loop': self.io_loop, 'resolver': resolver})
|
||||
|
||||
def close(self):
|
||||
if self._closing:
|
||||
|
@ -404,7 +404,7 @@ class AsyncTCPPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.tran
|
|||
def _do_transfer():
|
||||
msg = self._package_load(self.auth.crypticle.dumps(load))
|
||||
package = salt.transport.frame.frame_msg(msg, header=None)
|
||||
yield self.message_client._stream.write(package)
|
||||
yield self.message_client.write_to_stream(package)
|
||||
raise tornado.gen.Return(True)
|
||||
|
||||
if force_auth or not self.auth.authenticated:
|
||||
|
@ -494,13 +494,12 @@ class AsyncTCPPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.tran
|
|||
if not self.auth.authenticated:
|
||||
yield self.auth.authenticate()
|
||||
if self.auth.authenticated:
|
||||
self.message_client = SaltMessageClient(
|
||||
self.message_client = SaltMessageClientPool(
|
||||
self.opts,
|
||||
self.opts['master_ip'],
|
||||
int(self.auth.creds['publish_port']),
|
||||
io_loop=self.io_loop,
|
||||
connect_callback=self.connect_callback,
|
||||
disconnect_callback=self.disconnect_callback)
|
||||
args=(self.opts, self.opts['master_ip'], int(self.auth.creds['publish_port']),),
|
||||
kwargs={'io_loop': self.io_loop,
|
||||
'connect_callback': self.connect_callback,
|
||||
'disconnect_callback': self.disconnect_callback})
|
||||
yield self.message_client.connect() # wait for the client to be connected
|
||||
self.connected = True
|
||||
# TODO: better exception handling...
|
||||
|
@ -776,6 +775,43 @@ class TCPClientKeepAlive(tornado.tcpclient.TCPClient):
|
|||
return stream.connect(addr)
|
||||
|
||||
|
||||
class SaltMessageClientPool(salt.transport.MessageClientPool):
|
||||
'''
|
||||
Wrapper class of SaltMessageClient to avoid blocking waiting while writing data to socket.
|
||||
'''
|
||||
def __init__(self, opts, args=None, kwargs=None):
|
||||
super(SaltMessageClientPool, self).__init__(SaltMessageClient, opts, args=args, kwargs=kwargs)
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def close(self):
|
||||
for message_client in self.message_clients:
|
||||
message_client.close()
|
||||
self.message_clients = []
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def connect(self):
|
||||
futures = []
|
||||
for message_client in self.message_clients:
|
||||
futures.append(message_client.connect())
|
||||
for future in futures:
|
||||
yield future
|
||||
raise tornado.gen.Return(None)
|
||||
|
||||
def on_recv(self, *args, **kwargs):
|
||||
for message_client in self.message_clients:
|
||||
message_client.on_recv(*args, **kwargs)
|
||||
|
||||
def send(self, *args, **kwargs):
|
||||
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
|
||||
return message_clients[0].send(*args, **kwargs)
|
||||
|
||||
def write_to_stream(self, *args, **kwargs):
|
||||
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
|
||||
return message_clients[0]._stream.write(*args, **kwargs)
|
||||
|
||||
|
||||
# TODO consolidate with IPCClient
|
||||
# TODO: limit in-flight messages.
|
||||
# TODO: singleton? Something to not re-create the tcp connection so much
|
||||
|
|
|
@ -118,8 +118,9 @@ class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
|
|||
# copied. The reason is the same as the io_loop skip above.
|
||||
setattr(result, key,
|
||||
AsyncReqMessageClientPool(result.opts,
|
||||
self.master_uri,
|
||||
io_loop=result._io_loop))
|
||||
args=(result.opts, self.master_uri,),
|
||||
kwargs={'io_loop': self._io_loop}))
|
||||
|
||||
continue
|
||||
setattr(result, key, copy.deepcopy(self.__dict__[key], memo))
|
||||
return result
|
||||
|
@ -156,9 +157,8 @@ class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
|
|||
# we don't need to worry about auth as a kwarg, since its a singleton
|
||||
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop)
|
||||
self.message_client = AsyncReqMessageClientPool(self.opts,
|
||||
self.master_uri,
|
||||
io_loop=self._io_loop,
|
||||
)
|
||||
args=(self.opts, self.master_uri,),
|
||||
kwargs={'io_loop': self._io_loop})
|
||||
|
||||
def __del__(self):
|
||||
'''
|
||||
|
@ -847,32 +847,24 @@ class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
|
|||
context.term()
|
||||
|
||||
|
||||
# TODO: unit tests!
|
||||
class AsyncReqMessageClientPool(object):
|
||||
def __init__(self, opts, addr, linger=0, io_loop=None, socket_pool=1):
|
||||
self.opts = opts
|
||||
self.addr = addr
|
||||
self.linger = linger
|
||||
self.io_loop = io_loop
|
||||
self.socket_pool = socket_pool
|
||||
self.message_clients = []
|
||||
class AsyncReqMessageClientPool(salt.transport.MessageClientPool):
|
||||
'''
|
||||
Wrapper class of AsyncReqMessageClientPool to avoid blocking waiting while writing data to socket.
|
||||
'''
|
||||
def __init__(self, opts, args=None, kwargs=None):
|
||||
super(AsyncReqMessageClientPool, self).__init__(AsyncReqMessageClient, opts, args=args, kwargs=kwargs)
|
||||
|
||||
def __del__(self):
|
||||
self.destroy()
|
||||
|
||||
def destroy(self):
|
||||
for message_client in self.message_clients:
|
||||
message_client.destroy()
|
||||
self.message_clients = []
|
||||
|
||||
def __del__(self):
|
||||
self.destroy()
|
||||
|
||||
def send(self, message, timeout=None, tries=3, future=None, callback=None, raw=False):
|
||||
if len(self.message_clients) < self.socket_pool:
|
||||
message_client = AsyncReqMessageClient(self.opts, self.addr, self.linger, self.io_loop)
|
||||
self.message_clients.append(message_client)
|
||||
return message_client.send(message, timeout, tries, future, callback, raw)
|
||||
else:
|
||||
available_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
|
||||
return available_clients[0].send(message, timeout, tries, future, callback, raw)
|
||||
def send(self, *args, **kwargs):
|
||||
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
|
||||
return message_clients[0].send(*args, **kwargs)
|
||||
|
||||
|
||||
# TODO: unit tests!
|
||||
|
|
|
@ -253,3 +253,19 @@ def wait_lock(path, lock_fn=None, timeout=5, sleep=0.1, time_start=None):
|
|||
if obtained_lock:
|
||||
os.remove(lock_fn)
|
||||
log.trace('Write lock for %s (%s) released', path, lock_fn)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def set_umask(mask):
|
||||
'''
|
||||
Temporarily set the umask and restore once the contextmanager exits
|
||||
'''
|
||||
if salt.utils.is_windows():
|
||||
# Don't attempt on Windows
|
||||
yield
|
||||
else:
|
||||
try:
|
||||
orig_mask = os.umask(mask)
|
||||
yield
|
||||
finally:
|
||||
os.umask(orig_mask)
|
||||
|
|
|
@ -330,10 +330,7 @@ class MapMeta(six.with_metaclass(Prepareable, type)):
|
|||
# if so use it, otherwise use the name of the object
|
||||
# this is so that you can match complex values, which the python
|
||||
# class name syntax does not allow
|
||||
if hasattr(filt, '__match__'):
|
||||
match = filt.__match__
|
||||
else:
|
||||
match = item
|
||||
match = getattr(filt, '__match__', item)
|
||||
|
||||
match_attrs = {}
|
||||
for name in filt.__dict__:
|
||||
|
@ -342,6 +339,32 @@ class MapMeta(six.with_metaclass(Prepareable, type)):
|
|||
|
||||
match_info.append((grain, match, match_attrs))
|
||||
|
||||
# Reorder based on priority
|
||||
try:
|
||||
if not hasattr(cls.priority, '__iter__'):
|
||||
log.error('pyobjects: priority must be an iterable')
|
||||
else:
|
||||
new_match_info = []
|
||||
for grain in cls.priority:
|
||||
# Using list() here because we will be modifying
|
||||
# match_info during iteration
|
||||
for index, item in list(enumerate(match_info)):
|
||||
try:
|
||||
if item[0] == grain:
|
||||
# Add item to new list
|
||||
new_match_info.append(item)
|
||||
# Clear item from old list
|
||||
match_info[index] = None
|
||||
except TypeError:
|
||||
# Already moved this item to new list
|
||||
pass
|
||||
# Add in any remaining items not defined in priority
|
||||
new_match_info.extend([x for x in match_info if x is not None])
|
||||
# Save reordered list as the match_info list
|
||||
match_info = new_match_info
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# Check for matches and update the attrs dict accordingly
|
||||
attrs = {}
|
||||
if match_info:
|
||||
|
|
|
@ -12,6 +12,7 @@ import os
|
|||
from tests.support.case import ModuleCase
|
||||
from tests.support.helpers import skip_if_not_root, Webserver
|
||||
from tests.support.mixins import SaltReturnAssertsMixin
|
||||
from tests.support.paths import FILES
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
|
@ -24,8 +25,12 @@ if salt.utils.is_windows():
|
|||
else:
|
||||
ARCHIVE_DIR = '/tmp/archive'
|
||||
|
||||
ARCHIVE_NAME = 'custom.tar.gz'
|
||||
ARCHIVE_TAR_SOURCE = 'http://localhost:{0}/{1}'.format(9999, ARCHIVE_NAME)
|
||||
ARCHIVE_LOCAL_TAR_SOURCE = 'file://{0}'.format(os.path.join(FILES, 'file', 'base', ARCHIVE_NAME))
|
||||
UNTAR_FILE = os.path.join(ARCHIVE_DIR, 'custom/README')
|
||||
ARCHIVE_TAR_HASH = 'md5=7643861ac07c30fe7d2310e9f25ca514'
|
||||
ARCHIVE_TAR_BAD_HASH = 'md5=d41d8cd98f00b204e9800998ecf8427e'
|
||||
|
||||
|
||||
class ArchiveTest(ModuleCase, SaltReturnAssertsMixin):
|
||||
|
@ -178,3 +183,52 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin):
|
|||
self.assertSaltTrueReturn(ret)
|
||||
|
||||
self._check_extracted(UNTAR_FILE)
|
||||
|
||||
def test_local_archive_extracted(self):
|
||||
'''
|
||||
test archive.extracted with local file
|
||||
'''
|
||||
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
|
||||
source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar')
|
||||
log.debug('ret = %s', ret)
|
||||
|
||||
self.assertSaltTrueReturn(ret)
|
||||
|
||||
self._check_extracted(UNTAR_FILE)
|
||||
|
||||
def test_local_archive_extracted_skip_verify(self):
|
||||
'''
|
||||
test archive.extracted with local file, bad hash and skip_verify
|
||||
'''
|
||||
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
|
||||
source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar',
|
||||
source_hash=ARCHIVE_TAR_BAD_HASH, skip_verify=True)
|
||||
log.debug('ret = %s', ret)
|
||||
|
||||
self.assertSaltTrueReturn(ret)
|
||||
|
||||
self._check_extracted(UNTAR_FILE)
|
||||
|
||||
def test_local_archive_extracted_with_source_hash(self):
|
||||
'''
|
||||
test archive.extracted with local file and valid hash
|
||||
'''
|
||||
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
|
||||
source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar',
|
||||
source_hash=ARCHIVE_TAR_HASH)
|
||||
log.debug('ret = %s', ret)
|
||||
|
||||
self.assertSaltTrueReturn(ret)
|
||||
|
||||
self._check_extracted(UNTAR_FILE)
|
||||
|
||||
def test_local_archive_extracted_with_bad_source_hash(self):
|
||||
'''
|
||||
test archive.extracted with local file and bad hash
|
||||
'''
|
||||
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
|
||||
source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar',
|
||||
source_hash=ARCHIVE_TAR_BAD_HASH)
|
||||
log.debug('ret = %s', ret)
|
||||
|
||||
self.assertSaltFalseReturn(ret)
|
||||
|
|
|
@ -2,9 +2,12 @@
|
|||
|
||||
# Import Pytohn libs
|
||||
from __future__ import absolute_import
|
||||
import jinja2
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import textwrap
|
||||
import uuid
|
||||
|
||||
# Import Salt Testing libs
|
||||
|
@ -19,6 +22,9 @@ from salt.template import compile_template
|
|||
from salt.utils.odict import OrderedDict
|
||||
from salt.utils.pyobjects import (StateFactory, State, Registry,
|
||||
SaltObject, InvalidFunction, DuplicateState)
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
File = StateFactory('file')
|
||||
Service = StateFactory('service')
|
||||
|
||||
|
@ -58,33 +64,37 @@ Service = StateFactory('service')
|
|||
Service.running(extend('apache'), watch=[{'file': '/etc/file'}])
|
||||
'''
|
||||
|
||||
map_template = '''#!pyobjects
|
||||
map_prefix = '''\
|
||||
#!pyobjects
|
||||
from salt.utils.pyobjects import StateFactory
|
||||
Service = StateFactory('service')
|
||||
|
||||
|
||||
{% macro priority(value) %}
|
||||
priority = {{ value }}
|
||||
{% endmacro %}
|
||||
class Samba(Map):
|
||||
__merge__ = 'samba:lookup'
|
||||
|
||||
class Debian:
|
||||
server = 'samba'
|
||||
client = 'samba-client'
|
||||
service = 'samba'
|
||||
|
||||
class RougeChapeau:
|
||||
__match__ = 'RedHat'
|
||||
server = 'samba'
|
||||
client = 'samba'
|
||||
service = 'smb'
|
||||
|
||||
class Ubuntu:
|
||||
__grain__ = 'os'
|
||||
service = 'smbd'
|
||||
'''
|
||||
|
||||
map_suffix = '''
|
||||
with Pkg.installed("samba", names=[Samba.server, Samba.client]):
|
||||
Service.running("samba", name=Samba.service)
|
||||
'''
|
||||
|
||||
map_data = {
|
||||
'debian': " class Debian:\n"
|
||||
" server = 'samba'\n"
|
||||
" client = 'samba-client'\n"
|
||||
" service = 'samba'\n",
|
||||
'centos': " class RougeChapeau:\n"
|
||||
" __match__ = 'RedHat'\n"
|
||||
" server = 'samba'\n"
|
||||
" client = 'samba'\n"
|
||||
" service = 'smb'\n",
|
||||
'ubuntu': " class Ubuntu:\n"
|
||||
" __grain__ = 'os'\n"
|
||||
" service = 'smbd'\n"
|
||||
}
|
||||
|
||||
import_template = '''#!pyobjects
|
||||
import salt://map.sls
|
||||
|
||||
|
@ -140,6 +150,24 @@ with Pkg.installed("pkg"):
|
|||
'''
|
||||
|
||||
|
||||
class MapBuilder(object):
|
||||
def build_map(self, template=None):
|
||||
'''
|
||||
Build from a specific template or just use a default if no template
|
||||
is passed to this function.
|
||||
'''
|
||||
if template is None:
|
||||
template = textwrap.dedent('''\
|
||||
{{ ubuntu }}
|
||||
{{ centos }}
|
||||
{{ debian }}
|
||||
''')
|
||||
full_template = map_prefix + template + map_suffix
|
||||
ret = jinja2.Template(full_template).render(**map_data)
|
||||
log.debug('built map: \n%s', ret)
|
||||
return ret
|
||||
|
||||
|
||||
class StateTests(TestCase):
|
||||
def setUp(self):
|
||||
Registry.empty()
|
||||
|
@ -292,7 +320,7 @@ class RendererMixin(object):
|
|||
state.opts['renderer_whitelist'])
|
||||
|
||||
|
||||
class RendererTests(RendererMixin, StateTests):
|
||||
class RendererTests(RendererMixin, StateTests, MapBuilder):
|
||||
def test_basic(self):
|
||||
ret = self.render(basic_template)
|
||||
self.assertEqual(ret, OrderedDict([
|
||||
|
@ -350,7 +378,7 @@ class RendererTests(RendererMixin, StateTests):
|
|||
})
|
||||
]))
|
||||
|
||||
self.write_template_file("map.sls", map_template)
|
||||
self.write_template_file("map.sls", self.build_map())
|
||||
render_and_assert(import_template)
|
||||
render_and_assert(from_import_template)
|
||||
render_and_assert(import_as_template)
|
||||
|
@ -359,7 +387,7 @@ class RendererTests(RendererMixin, StateTests):
|
|||
render_and_assert(recursive_import_template)
|
||||
|
||||
def test_import_scope(self):
|
||||
self.write_template_file("map.sls", map_template)
|
||||
self.write_template_file("map.sls", self.build_map())
|
||||
self.write_template_file("recursive_map.sls", recursive_map_template)
|
||||
|
||||
def do_render():
|
||||
|
@ -401,34 +429,97 @@ class RendererTests(RendererMixin, StateTests):
|
|||
]))
|
||||
|
||||
|
||||
class MapTests(RendererMixin, TestCase):
|
||||
class MapTests(RendererMixin, TestCase, MapBuilder):
|
||||
maxDiff = None
|
||||
|
||||
def test_map(self):
|
||||
def samba_with_grains(grains):
|
||||
return self.render(map_template, {'grains': grains})
|
||||
debian_grains = {'os_family': 'Debian', 'os': 'Debian'}
|
||||
ubuntu_grains = {'os_family': 'Debian', 'os': 'Ubuntu'}
|
||||
centos_grains = {'os_family': 'RedHat', 'os': 'CentOS'}
|
||||
|
||||
def assert_ret(ret, server, client, service):
|
||||
self.assertDictEqual(ret, OrderedDict([
|
||||
('samba', OrderedDict([
|
||||
('pkg.installed', [
|
||||
{'names': [server, client]}
|
||||
]),
|
||||
('service.running', [
|
||||
{'name': service},
|
||||
{'require': [{'pkg': 'samba'}]}
|
||||
])
|
||||
]))
|
||||
debian_attrs = ('samba', 'samba-client', 'samba')
|
||||
ubuntu_attrs = ('samba', 'samba-client', 'smbd')
|
||||
centos_attrs = ('samba', 'samba', 'smb')
|
||||
|
||||
def samba_with_grains(self, template, grains):
|
||||
return self.render(template, {'grains': grains})
|
||||
|
||||
def assert_equal(self, ret, server, client, service):
|
||||
self.assertDictEqual(ret, OrderedDict([
|
||||
('samba', OrderedDict([
|
||||
('pkg.installed', [
|
||||
{'names': [server, client]}
|
||||
]),
|
||||
('service.running', [
|
||||
{'name': service},
|
||||
{'require': [{'pkg': 'samba'}]}
|
||||
])
|
||||
]))
|
||||
]))
|
||||
|
||||
ret = samba_with_grains({'os_family': 'Debian', 'os': 'Debian'})
|
||||
assert_ret(ret, 'samba', 'samba-client', 'samba')
|
||||
def assert_not_equal(self, ret, server, client, service):
|
||||
try:
|
||||
self.assert_equal(ret, server, client, service)
|
||||
except AssertionError:
|
||||
pass
|
||||
else:
|
||||
raise AssertionError('both dicts are equal')
|
||||
|
||||
ret = samba_with_grains({'os_family': 'Debian', 'os': 'Ubuntu'})
|
||||
assert_ret(ret, 'samba', 'samba-client', 'smbd')
|
||||
def test_map(self):
|
||||
'''
|
||||
Test declarative ordering
|
||||
'''
|
||||
# With declarative ordering, the ubuntu-specfic service name should
|
||||
# override the one inherited from debian.
|
||||
template = self.build_map(textwrap.dedent('''\
|
||||
{{ debian }}
|
||||
{{ centos }}
|
||||
{{ ubuntu }}
|
||||
'''))
|
||||
|
||||
ret = samba_with_grains({'os_family': 'RedHat', 'os': 'CentOS'})
|
||||
assert_ret(ret, 'samba', 'samba', 'smb')
|
||||
ret = self.samba_with_grains(template, self.debian_grains)
|
||||
self.assert_equal(ret, *self.debian_attrs)
|
||||
|
||||
ret = self.samba_with_grains(template, self.ubuntu_grains)
|
||||
self.assert_equal(ret, *self.ubuntu_attrs)
|
||||
|
||||
ret = self.samba_with_grains(template, self.centos_grains)
|
||||
self.assert_equal(ret, *self.centos_attrs)
|
||||
|
||||
# Switching the order, debian should still work fine but ubuntu should
|
||||
# no longer match, since the debian service name should override the
|
||||
# ubuntu one.
|
||||
template = self.build_map(textwrap.dedent('''\
|
||||
{{ ubuntu }}
|
||||
{{ debian }}
|
||||
'''))
|
||||
|
||||
ret = self.samba_with_grains(template, self.debian_grains)
|
||||
self.assert_equal(ret, *self.debian_attrs)
|
||||
|
||||
ret = self.samba_with_grains(template, self.ubuntu_grains)
|
||||
self.assert_not_equal(ret, *self.ubuntu_attrs)
|
||||
|
||||
def test_map_with_priority(self):
|
||||
'''
|
||||
With declarative ordering, the debian service name would override the
|
||||
ubuntu one since debian comes second. This will test overriding this
|
||||
behavior using the priority attribute.
|
||||
'''
|
||||
template = self.build_map(textwrap.dedent('''\
|
||||
{{ priority(('os_family', 'os')) }}
|
||||
{{ ubuntu }}
|
||||
{{ centos }}
|
||||
{{ debian }}
|
||||
'''))
|
||||
|
||||
ret = self.samba_with_grains(template, self.debian_grains)
|
||||
self.assert_equal(ret, *self.debian_attrs)
|
||||
|
||||
ret = self.samba_with_grains(template, self.ubuntu_grains)
|
||||
self.assert_equal(ret, *self.ubuntu_attrs)
|
||||
|
||||
ret = self.samba_with_grains(template, self.centos_grains)
|
||||
self.assert_equal(ret, *self.centos_attrs)
|
||||
|
||||
|
||||
class SaltObjectTests(TestCase):
|
||||
|
|
52
tests/unit/test_transport.py
Normal file
52
tests/unit/test_transport.py
Normal file
|
@ -0,0 +1,52 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
from salt.transport import MessageClientPool
|
||||
|
||||
# Import Salt Testing libs
|
||||
from tests.support.unit import TestCase
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MessageClientPoolTest(TestCase):
|
||||
|
||||
class MockClass(object):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
|
||||
def test_init(self):
|
||||
opts = {'sock_pool_size': 10}
|
||||
args = (0,)
|
||||
kwargs = {'kwarg': 1}
|
||||
message_client_pool = MessageClientPool(self.MockClass, opts, args=args, kwargs=kwargs)
|
||||
self.assertEqual(opts['sock_pool_size'], len(message_client_pool.message_clients))
|
||||
for message_client in message_client_pool.message_clients:
|
||||
self.assertEqual(message_client.args, args)
|
||||
self.assertEqual(message_client.kwargs, kwargs)
|
||||
|
||||
def test_init_without_config(self):
|
||||
opts = {}
|
||||
args = (0,)
|
||||
kwargs = {'kwarg': 1}
|
||||
message_client_pool = MessageClientPool(self.MockClass, opts, args=args, kwargs=kwargs)
|
||||
# The size of pool is set as 1 by the MessageClientPool init method.
|
||||
self.assertEqual(1, len(message_client_pool.message_clients))
|
||||
for message_client in message_client_pool.message_clients:
|
||||
self.assertEqual(message_client.args, args)
|
||||
self.assertEqual(message_client.kwargs, kwargs)
|
||||
|
||||
def test_init_less_than_one(self):
|
||||
opts = {'sock_pool_size': -1}
|
||||
args = (0,)
|
||||
kwargs = {'kwarg': 1}
|
||||
message_client_pool = MessageClientPool(self.MockClass, opts, args=args, kwargs=kwargs)
|
||||
# The size of pool is set as 1 by the MessageClientPool init method.
|
||||
self.assertEqual(1, len(message_client_pool.message_clients))
|
||||
for message_client in message_client_pool.message_clients:
|
||||
self.assertEqual(message_client.args, args)
|
||||
self.assertEqual(message_client.kwargs, kwargs)
|
|
@ -9,7 +9,8 @@ import threading
|
|||
|
||||
import tornado.gen
|
||||
import tornado.ioloop
|
||||
from tornado.testing import AsyncTestCase
|
||||
import tornado.concurrent
|
||||
from tornado.testing import AsyncTestCase, gen_test
|
||||
|
||||
import salt.config
|
||||
import salt.ext.six as six
|
||||
|
@ -17,11 +18,14 @@ import salt.utils
|
|||
import salt.transport.server
|
||||
import salt.transport.client
|
||||
import salt.exceptions
|
||||
from salt.ext.six.moves import range
|
||||
from salt.transport.tcp import SaltMessageClientPool
|
||||
|
||||
# Import Salt Testing libs
|
||||
from tests.support.unit import TestCase, skipIf
|
||||
from tests.support.helpers import get_unused_localhost_port, flaky
|
||||
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
|
||||
from tests.support.mock import MagicMock, patch
|
||||
from tests.unit.transport.mixins import PubChannelMixin, ReqChannelMixin
|
||||
|
||||
|
||||
|
@ -234,3 +238,74 @@ class AsyncPubChannelTest(BaseTCPPubCase, PubChannelMixin):
|
|||
'''
|
||||
Tests around the publish system
|
||||
'''
|
||||
|
||||
|
||||
class SaltMessageClientPoolTest(AsyncTestCase):
|
||||
def setUp(self):
|
||||
super(SaltMessageClientPoolTest, self).setUp()
|
||||
sock_pool_size = 5
|
||||
with patch('salt.transport.tcp.SaltMessageClient.__init__', MagicMock(return_value=None)):
|
||||
self.message_client_pool = SaltMessageClientPool({'sock_pool_size': sock_pool_size},
|
||||
args=({}, '', 0))
|
||||
self.original_message_clients = self.message_client_pool.message_clients
|
||||
self.message_client_pool.message_clients = [MagicMock() for _ in range(sock_pool_size)]
|
||||
|
||||
def tearDown(self):
|
||||
with patch('salt.transport.tcp.SaltMessageClient.close', MagicMock(return_value=None)):
|
||||
del self.original_message_clients
|
||||
super(SaltMessageClientPoolTest, self).tearDown()
|
||||
|
||||
def test_send(self):
|
||||
for message_client_mock in self.message_client_pool.message_clients:
|
||||
message_client_mock.send_queue = [0, 0, 0]
|
||||
message_client_mock.send.return_value = []
|
||||
self.assertEqual([], self.message_client_pool.send())
|
||||
self.message_client_pool.message_clients[2].send_queue = [0]
|
||||
self.message_client_pool.message_clients[2].send.return_value = [1]
|
||||
self.assertEqual([1], self.message_client_pool.send())
|
||||
|
||||
def test_write_to_stream(self):
|
||||
for message_client_mock in self.message_client_pool.message_clients:
|
||||
message_client_mock.send_queue = [0, 0, 0]
|
||||
message_client_mock._stream.write.return_value = []
|
||||
self.assertEqual([], self.message_client_pool.write_to_stream(''))
|
||||
self.message_client_pool.message_clients[2].send_queue = [0]
|
||||
self.message_client_pool.message_clients[2]._stream.write.return_value = [1]
|
||||
self.assertEqual([1], self.message_client_pool.write_to_stream(''))
|
||||
|
||||
def test_close(self):
|
||||
self.message_client_pool.close()
|
||||
self.assertEqual([], self.message_client_pool.message_clients)
|
||||
|
||||
def test_on_recv(self):
|
||||
for message_client_mock in self.message_client_pool.message_clients:
|
||||
message_client_mock.on_recv.return_value = None
|
||||
self.message_client_pool.on_recv()
|
||||
for message_client_mock in self.message_client_pool.message_clients:
|
||||
self.assertTrue(message_client_mock.on_recv.called)
|
||||
|
||||
def test_connect_all(self):
|
||||
@gen_test
|
||||
def test_connect(self):
|
||||
yield self.message_client_pool.connect()
|
||||
|
||||
for message_client_mock in self.message_client_pool.message_clients:
|
||||
future = tornado.concurrent.Future()
|
||||
future.set_result('foo')
|
||||
message_client_mock.connect.return_value = future
|
||||
|
||||
self.assertIsNone(test_connect(self))
|
||||
|
||||
def test_connect_partial(self):
|
||||
@gen_test(timeout=0.1)
|
||||
def test_connect(self):
|
||||
yield self.message_client_pool.connect()
|
||||
|
||||
for idx, message_client_mock in enumerate(self.message_client_pool.message_clients):
|
||||
future = tornado.concurrent.Future()
|
||||
if idx % 2 == 0:
|
||||
future.set_result('foo')
|
||||
message_client_mock.connect.return_value = future
|
||||
|
||||
with self.assertRaises(tornado.ioloop.TimeoutError):
|
||||
test_connect(self)
|
||||
|
|
|
@ -30,12 +30,15 @@ import salt.utils
|
|||
import salt.transport.server
|
||||
import salt.transport.client
|
||||
import salt.exceptions
|
||||
from salt.ext.six.moves import range
|
||||
from salt.transport.zeromq import AsyncReqMessageClientPool
|
||||
|
||||
# Import test support libs
|
||||
from tests.support.paths import TMP_CONF_DIR
|
||||
from tests.support.unit import TestCase, skipIf
|
||||
from tests.support.helpers import flaky, get_unused_localhost_port
|
||||
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
|
||||
from tests.support.mock import MagicMock, patch
|
||||
from tests.unit.transport.mixins import PubChannelMixin, ReqChannelMixin
|
||||
|
||||
ON_SUSE = False
|
||||
|
@ -271,3 +274,34 @@ class AsyncPubChannelTest(BaseZMQPubCase, PubChannelMixin):
|
|||
'''
|
||||
def get_new_ioloop(self):
|
||||
return zmq.eventloop.ioloop.ZMQIOLoop()
|
||||
|
||||
|
||||
class AsyncReqMessageClientPoolTest(TestCase):
|
||||
def setUp(self):
|
||||
super(AsyncReqMessageClientPoolTest, self).setUp()
|
||||
sock_pool_size = 5
|
||||
with patch('salt.transport.zeromq.AsyncReqMessageClient.__init__', MagicMock(return_value=None)):
|
||||
self.message_client_pool = AsyncReqMessageClientPool({'sock_pool_size': sock_pool_size},
|
||||
args=({}, ''))
|
||||
self.original_message_clients = self.message_client_pool.message_clients
|
||||
self.message_client_pool.message_clients = [MagicMock() for _ in range(sock_pool_size)]
|
||||
|
||||
def tearDown(self):
|
||||
with patch('salt.transport.zeromq.AsyncReqMessageClient.destroy', MagicMock(return_value=None)):
|
||||
del self.original_message_clients
|
||||
super(AsyncReqMessageClientPoolTest, self).tearDown()
|
||||
|
||||
def test_send(self):
|
||||
for message_client_mock in self.message_client_pool.message_clients:
|
||||
message_client_mock.send_queue = [0, 0, 0]
|
||||
message_client_mock.send.return_value = []
|
||||
|
||||
self.assertEqual([], self.message_client_pool.send())
|
||||
|
||||
self.message_client_pool.message_clients[2].send_queue = [0]
|
||||
self.message_client_pool.message_clients[2].send.return_value = [1]
|
||||
self.assertEqual([1], self.message_client_pool.send())
|
||||
|
||||
def test_destroy(self):
|
||||
self.message_client_pool.destroy()
|
||||
self.assertEqual([], self.message_client_pool.message_clients)
|
||||
|
|
|
@ -104,8 +104,13 @@ class NetworkTestCase(TestCase):
|
|||
self.assertEqual(ret, '10.1.2.3')
|
||||
|
||||
def test_host_to_ips(self):
|
||||
'''
|
||||
NOTE: When this test fails it's usually because the IP address has
|
||||
changed. In these cases, we just need to update the IP address in the
|
||||
assertion.
|
||||
'''
|
||||
ret = network.host_to_ips('www.saltstack.com')
|
||||
self.assertEqual(ret, ['104.199.122.13'])
|
||||
self.assertEqual(ret, ['104.197.168.128'])
|
||||
|
||||
def test_generate_minion_id(self):
|
||||
self.assertTrue(network.generate_minion_id())
|
||||
|
|
Loading…
Add table
Reference in a new issue