mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge pull request #40494 from rallytime/merge-develop
[develop] Merge forward from 2016.11 to develop
This commit is contained in:
commit
4d7871e4d7
53 changed files with 1530 additions and 226 deletions
|
@ -133,6 +133,14 @@
|
|||
|
||||
# Cache subsystem module to use for minion data cache.
|
||||
#cache: localfs
|
||||
# Enables a fast in-memory cache booster and sets the expiration time.
|
||||
#memcache_expire_seconds: 0
|
||||
# Set a memcache limit in items (bank + key) per cache storage (driver + driver_opts).
|
||||
#memcache_max_items: 1024
|
||||
# Each time a cache storage got full cleanup all the expired items not just the oldest one.
|
||||
#memcache_full_cleanup: False
|
||||
# Enable collecting the memcache stats and log it on `debug` log level.
|
||||
#memcache_debug: False
|
||||
|
||||
# Store all returns in the given returner.
|
||||
# Setting this option requires that any returner-specific configuration also
|
||||
|
|
|
@ -520,6 +520,75 @@ Cache subsystem module to use for minion data cache.
|
|||
|
||||
cache: consul
|
||||
|
||||
.. conf_master:: memcache_expire_seconds
|
||||
|
||||
``memcache_expire_seconds``
|
||||
---------------------------
|
||||
|
||||
Default: ``0``
|
||||
|
||||
Memcache is an additional cache layer that keeps a limited amount of data
|
||||
fetched from the minion data cache for a limited period of time in memory that
|
||||
makes cache operations faster. It doesn't make much sence for the ``localfs``
|
||||
cache driver but helps for more complex drivers like ``consul``.
|
||||
|
||||
This option sets the memcache items expiration time. By default is set to ``0``
|
||||
that disables the memcache.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
memcache_expire_seconds: 30
|
||||
|
||||
.. conf_master:: memcache_max_items
|
||||
|
||||
``memcache_max_items``
|
||||
----------------------
|
||||
|
||||
Default: ``1024``
|
||||
|
||||
Set memcache limit in items that are bank-key pairs. I.e the list of
|
||||
minion_0/data, minion_0/mine, minion_1/data contains 3 items. This value depends
|
||||
on the count of minions usually targeted in your environment. The best one could
|
||||
be found by analyzing the cache log with ``memcache_debug`` enabled.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
memcache_max_items: 1024
|
||||
|
||||
.. conf_master:: memcache_full_cleanup
|
||||
|
||||
``memcache_full_cleanup``
|
||||
-------------------------
|
||||
|
||||
Default: ``False``
|
||||
|
||||
If cache storage got full, i.e. the items count exceeds the
|
||||
``memcache_max_items`` value, memcache cleans up it's storage. If this option
|
||||
set to ``False`` memcache removes the only one oldest value from it's storage.
|
||||
If this set set to ``True`` memcache removes all the expired items and also
|
||||
removes the oldest one if there are no expired items.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
memcache_full_cleanup: True
|
||||
|
||||
.. conf_master:: memcache_debug
|
||||
|
||||
``memcache_debug``
|
||||
------------------
|
||||
|
||||
Default: ``False``
|
||||
|
||||
Enable collecting the memcache stats and log it on `debug` log level. If enabled
|
||||
memcache collect information about how many ``fetch`` calls has been done and
|
||||
how many of them has been hit by memcache. Also it outputs the rate value that
|
||||
is the result of division of the first two values. This should help to choose
|
||||
right values for the expiration time and the cache size.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
memcache_debug: True
|
||||
|
||||
.. conf_master:: ext_job_cache
|
||||
|
||||
``ext_job_cache``
|
||||
|
@ -1381,6 +1450,23 @@ The renderer to use on the minions to render the state data.
|
|||
|
||||
renderer: yaml_jinja
|
||||
|
||||
.. conf_master:: userdata_template
|
||||
|
||||
``userdata_template``
|
||||
---------------------
|
||||
|
||||
.. versionadded:: 2016.11.4
|
||||
|
||||
Default: ``None``
|
||||
|
||||
The renderer to use for templating userdata files in salt-cloud, if the
|
||||
``userdata_template`` is not set in the cloud profile. If no value is set in
|
||||
the cloud profile or master config file, no templating will be performed.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
userdata_template: jinja
|
||||
|
||||
.. conf_master:: jinja_trim_blocks
|
||||
|
||||
``jinja_trim_blocks``
|
||||
|
|
|
@ -249,9 +249,10 @@ to the next master in the list if it finds the existing one is dead.
|
|||
|
||||
Default: ``False``
|
||||
|
||||
If :conf_minion:`master` is a list of addresses and :conf_minion`master_type` is ``failover``, shuffle them before trying to
|
||||
connect to distribute the minions over all available masters. This uses
|
||||
Python's :func:`random.shuffle <python2:random.shuffle>` method.
|
||||
If :conf_minion:`master` is a list of addresses and :conf_minion`master_type`
|
||||
is ``failover``, shuffle them before trying to connect to distribute the
|
||||
minions over all available masters. This uses Python's :func:`random.shuffle
|
||||
<python2:random.shuffle>` method.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -264,9 +265,10 @@ Python's :func:`random.shuffle <python2:random.shuffle>` method.
|
|||
|
||||
Default: ``False``
|
||||
|
||||
If :conf_minion:`master` is a list of addresses, shuffle them before trying to
|
||||
connect to distribute the minions over all available masters. This uses
|
||||
Python's :func:`random.randint <python2:random.randint>` method.
|
||||
If :conf_minion:`master` is a list of addresses, and :conf_minion`master_type`
|
||||
is set to ``failover`` shuffle them before trying to connect to distribute the
|
||||
minions over all available masters. This uses Python's :func:`random.shuffle
|
||||
<python2:random.shuffle>` method.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
|
|
@ -354,6 +354,35 @@ functionality was added to Salt in the 2015.5.0 release.
|
|||
# Pass userdata to the instance to be created
|
||||
userdata_file: /etc/salt/my-userdata-file
|
||||
|
||||
.. note::
|
||||
From versions 2016.11.0 and 2016.11.3, this file was passed through the
|
||||
master's :conf_master:`renderer` to template it. However, this caused
|
||||
issues with non-YAML data, so templating is no longer performed by default.
|
||||
To template the userdata_file, add a ``userdata_template`` option to the
|
||||
cloud profile:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-ec2-config:
|
||||
# Pass userdata to the instance to be created
|
||||
userdata_file: /etc/salt/my-userdata-file
|
||||
userdata_template: jinja
|
||||
|
||||
If no ``userdata_template`` is set in the cloud profile, then the master
|
||||
configuration will be checked for a :conf_master:`userdata_template` value.
|
||||
If this is not set, then no templating will be performed on the
|
||||
userdata_file.
|
||||
|
||||
To disable templating in a cloud profile when a
|
||||
:conf_master:`userdata_template` has been set in the master configuration
|
||||
file, simply set ``userdata_template`` to ``False`` in the cloud profile:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-ec2-config:
|
||||
# Pass userdata to the instance to be created
|
||||
userdata_file: /etc/salt/my-userdata-file
|
||||
userdata_template: False
|
||||
|
||||
EC2 allows a location to be set for servers to be deployed in. Availability
|
||||
zones exist inside regions, and may be added to increase specificity.
|
||||
|
|
|
@ -153,4 +153,33 @@ cloud-init if available.
|
|||
|
||||
.. code-block:: yaml
|
||||
|
||||
userdata_file: /etc/salt/cloud-init/packages.yml
|
||||
my-openstack-config:
|
||||
# Pass userdata to the instance to be created
|
||||
userdata_file: /etc/salt/cloud-init/packages.yml
|
||||
|
||||
.. note::
|
||||
As of the 2016.11.4 release, this file can be templated. To use templating,
|
||||
simply specify a ``userdata_template`` option in the cloud profile:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-openstack-config:
|
||||
# Pass userdata to the instance to be created
|
||||
userdata_file: /etc/salt/cloud-init/packages.yml
|
||||
userdata_template: jinja
|
||||
|
||||
If no ``userdata_template`` is set in the cloud profile, then the master
|
||||
configuration will be checked for a :conf_master:`userdata_template` value.
|
||||
If this is not set, then no templating will be performed on the
|
||||
userdata_file.
|
||||
|
||||
To disable templating in a cloud profile when a
|
||||
:conf_master:`userdata_template` has been set in the master configuration
|
||||
file, simply set ``userdata_template`` to ``False`` in the cloud profile:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-openstack-config:
|
||||
# Pass userdata to the instance to be created
|
||||
userdata_file: /etc/salt/cloud-init/packages.yml
|
||||
userdata_template: False
|
||||
|
|
|
@ -73,12 +73,45 @@ profile configuration as `userdata_file`. For instance:
|
|||
|
||||
.. code-block:: yaml
|
||||
|
||||
userdata_file: /etc/salt/windows-firewall.ps1
|
||||
my-ec2-config:
|
||||
# Pass userdata to the instance to be created
|
||||
userdata_file: /etc/salt/windows-firewall.ps1
|
||||
|
||||
If you are using WinRM on EC2 the HTTPS port for the WinRM service must also be enabled
|
||||
in your userdata. By default EC2 Windows images only have insecure HTTP enabled. To
|
||||
enable HTTPS and basic authentication required by pywinrm consider the following
|
||||
userdata example:
|
||||
.. note::
|
||||
From versions 2016.11.0 and 2016.11.3, this file was passed through the
|
||||
master's :conf_master:`renderer` to template it. However, this caused
|
||||
issues with non-YAML data, so templating is no longer performed by default.
|
||||
To template the userdata_file, add a ``userdata_template`` option to the
|
||||
cloud profile:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-ec2-config:
|
||||
# Pass userdata to the instance to be created
|
||||
userdata_file: /etc/salt/windows-firewall.ps1
|
||||
userdata_template: jinja
|
||||
|
||||
If no ``userdata_template`` is set in the cloud profile, then the master
|
||||
configuration will be checked for a :conf_master:`userdata_template` value.
|
||||
If this is not set, then no templating will be performed on the
|
||||
userdata_file.
|
||||
|
||||
To disable templating in a cloud profile when a
|
||||
:conf_master:`userdata_template` has been set in the master configuration
|
||||
file, simply set ``userdata_template`` to ``False`` in the cloud profile:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-ec2-config:
|
||||
# Pass userdata to the instance to be created
|
||||
userdata_file: /etc/salt/windows-firewall.ps1
|
||||
userdata_template: False
|
||||
|
||||
|
||||
If you are using WinRM on EC2 the HTTPS port for the WinRM service must also be
|
||||
enabled in your userdata. By default EC2 Windows images only have insecure HTTP
|
||||
enabled. To enable HTTPS and basic authentication required by pywinrm consider
|
||||
the following userdata example:
|
||||
|
||||
.. code-block:: powershell
|
||||
|
||||
|
|
|
@ -8,10 +8,9 @@ In 0.10.4 the `external_nodes` system was upgraded to allow for modular
|
|||
subsystems to be used to generate the top file data for a :ref:`highstate
|
||||
<running-highstate>` run on the master.
|
||||
|
||||
The old `external_nodes` option has been removed.
|
||||
The master tops system contains a number of subsystems that
|
||||
are loaded via the Salt loader interfaces like modules, states, returners,
|
||||
runners, etc.
|
||||
The old `external_nodes` option has been removed. The master tops system
|
||||
provides a pluggable and extendable replacement for it, allowing for multiple
|
||||
different subsystems to provide top file data.
|
||||
|
||||
Using the new `master_tops` option is simple:
|
||||
|
||||
|
@ -38,24 +37,25 @@ for :mod:`Reclass <salt.tops.reclass_adapter>`.
|
|||
|
||||
for :mod:`Varstack <salt.tops.varstack>`.
|
||||
|
||||
It's also possible to create custom master_tops modules. These modules must go
|
||||
in a subdirectory called `tops` in the `extension_modules` directory.
|
||||
The `extension_modules` directory is not defined by default (the
|
||||
default `/srv/salt/_modules` will NOT work as of this release)
|
||||
It's also possible to create custom master_tops modules. Simply place them into
|
||||
``salt://_tops`` in the Salt fileserver and use the
|
||||
:py:func:`saltutil.sync_tops <salt.runners.saltutil.sync_tops>` runner to sync
|
||||
them. If this runner function is not available, they can manually be placed
|
||||
into ``extmods/tops``, relative to the master cachedir (in most cases the full
|
||||
path will be ``/var/cache/salt/master/extmods/tops``).
|
||||
|
||||
Custom tops modules are written like any other execution module, see the source
|
||||
for the two modules above for examples of fully functional ones. Below is
|
||||
a degenerate example:
|
||||
for the two modules above for examples of fully functional ones. Below is a
|
||||
bare-bones example:
|
||||
|
||||
/etc/salt/master:
|
||||
**/etc/salt/master:**
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
extension_modules: /srv/salt/modules
|
||||
master_tops:
|
||||
customtop: True
|
||||
|
||||
/srv/salt/modules/tops/customtop.py:
|
||||
**customtop.py:** (custom master_tops module)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
|
@ -66,6 +66,7 @@ a degenerate example:
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
return __virtualname__
|
||||
|
||||
|
@ -84,3 +85,10 @@ a degenerate example:
|
|||
----------
|
||||
base:
|
||||
- test
|
||||
|
||||
.. note::
|
||||
If a master_tops module returns :ref:`top file <states-top>` data for a
|
||||
given minion, it will be added to the states configured in the top file. It
|
||||
will *not* replace it altogether. The Nitrogen release adds additional
|
||||
functionality allowing a minion to treat master_tops as the single source
|
||||
of truth, irrespective of the top file.
|
||||
|
|
|
@ -9,5 +9,54 @@ AIX Fixes
|
|||
=========
|
||||
|
||||
Added module execution support for user and group
|
||||
Added module execution support for timezone
|
||||
Added module execution support for network and status
|
||||
Added module execution support for beacon.status
|
||||
Added module execution support for disk.iostat
|
||||
|
||||
|
||||
Minion Data Cache Fixes
|
||||
=======================
|
||||
|
||||
Added Memcache booster for the minion data cache.
|
||||
Memcache is an additional cache layer that keeps a limited amount of data
|
||||
fetched from the minion data cache for a limited period of time in memory that
|
||||
makes cache operations faster. It doesn't make much sence for the ``localfs``
|
||||
cache driver but helps for more complex drivers like ``consul``.
|
||||
For more details see ``memcache_expire_seconds`` and other ``memcache_*``
|
||||
options in the master config reverence.
|
||||
|
||||
Salt-Cloud Fixes
|
||||
================
|
||||
|
||||
2016.11.0 added support for templating userdata files for the :mod:`ec2
|
||||
<salt.cloud.clouds.ec2>` driver, using the :conf_master:`renderer` option from
|
||||
the master config file. However, as the default renderer first evaluates jinja
|
||||
templating, followed by loading the data as a YAML dictionary, this results in
|
||||
unpredictable results when userdata files are comprised of non-YAML data (which
|
||||
they generally are).
|
||||
|
||||
2016.11.4 fixes this by only templating the userdata_file when it is explicitly
|
||||
configured to do so. This is done by adding a new optional parameter to the
|
||||
cloud profile called ``userdata_template``. This option is used in the same way
|
||||
as the ``template`` argument in :py:func:`file.managed
|
||||
<salt.states.file.managed>` states, it is simply set to the desired templating
|
||||
renderer:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-ec2-config:
|
||||
# Pass userdata to the instance to be created
|
||||
userdata_file: /etc/salt/my-userdata-file
|
||||
userdata_template: jinja
|
||||
|
||||
If no ``userdata_template`` option is set in the cloud profile, then
|
||||
salt-cloud will check for the presence of the master configuration parameter
|
||||
:conf_master:`userdata_renderer`. If this is also not set, then no templating
|
||||
will be performed on the userdata_file.
|
||||
|
||||
In addition, the other cloud drivers which support setting a ``userdata_file``
|
||||
(:mod:`azurearm <salt.cloud.clouds.azurearm>`, :mod:`nova
|
||||
<salt.cloud.clouds.nova>`, and :mod:`openstack <salt.cloud.clouds.openstack>`)
|
||||
have had templating support added to bring them to feature parity with the ec2
|
||||
driver's implementation of the ``userdata_file`` option.
|
||||
|
|
|
@ -22,7 +22,10 @@ import logging
|
|||
# Import third party libs
|
||||
# pylint: disable=import-error
|
||||
try:
|
||||
from Crypto.Util import asn1
|
||||
try:
|
||||
from Cryptodome.Util import asn1
|
||||
except ImportError:
|
||||
from Crypto.Util import asn1
|
||||
import OpenSSL
|
||||
HAS_DEPS = True
|
||||
except ImportError:
|
||||
|
|
167
salt/cache/__init__.py
vendored
167
salt/cache/__init__.py
vendored
|
@ -7,13 +7,31 @@ Loader mechanism for caching data, with data expiration, etc.
|
|||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import time
|
||||
|
||||
# Import Salt libs
|
||||
import salt.config
|
||||
from salt.ext import six
|
||||
from salt.payload import Serial
|
||||
from salt.utils.odict import OrderedDict
|
||||
import salt.loader
|
||||
import salt.syspaths
|
||||
from salt.payload import Serial
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def factory(opts, **kwargs):
|
||||
'''
|
||||
Creates and returns the cache class.
|
||||
If memory caching is enabled by opts MemCache class will be instantiated.
|
||||
If not Cache class will be returned.
|
||||
'''
|
||||
if opts.get('memcache_expire_seconds', 0):
|
||||
cls = MemCache
|
||||
else:
|
||||
cls = Cache
|
||||
return cls(opts, **kwargs)
|
||||
|
||||
|
||||
class Cache(object):
|
||||
|
@ -49,7 +67,7 @@ class Cache(object):
|
|||
Key name is a string identifier of a data container (like a file inside a
|
||||
directory) which will hold the data.
|
||||
'''
|
||||
def __init__(self, opts, cachedir=None):
|
||||
def __init__(self, opts, cachedir=None, **kwargs):
|
||||
self.opts = opts
|
||||
if cachedir is None:
|
||||
self.cachedir = opts.get('cachedir', salt.syspaths.CACHE_DIR)
|
||||
|
@ -58,11 +76,20 @@ class Cache(object):
|
|||
self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS)
|
||||
self.serial = Serial(opts)
|
||||
self._modules = None
|
||||
self._kwargs = kwargs
|
||||
|
||||
def __lazy_init(self):
|
||||
self._modules = salt.loader.cache(self.opts, self.serial)
|
||||
fun = '{0}.init_kwargs'.format(self.driver)
|
||||
if fun in self.modules:
|
||||
self._kwargs = self.modules[fun](self._kwargs)
|
||||
else:
|
||||
self._kwargs = {}
|
||||
|
||||
@property
|
||||
def modules(self):
|
||||
if self._modules is None:
|
||||
self._modules = salt.loader.cache(self.opts, self.serial)
|
||||
self.__lazy_init()
|
||||
return self._modules
|
||||
|
||||
def cache(self, bank, key, fun, loop_fun=None, **kwargs):
|
||||
|
@ -124,11 +151,8 @@ class Cache(object):
|
|||
Raises an exception if cache driver detected an error accessing data
|
||||
in the cache backend (auth, permissions, etc).
|
||||
'''
|
||||
fun = '{0}.{1}'.format(self.driver, 'store')
|
||||
try:
|
||||
return self.modules[fun](bank, key, data, self.cachedir)
|
||||
except TypeError:
|
||||
return self.modules[fun](bank, key, data)
|
||||
fun = '{0}.store'.format(self.driver)
|
||||
return self.modules[fun](bank, key, data, **self._kwargs)
|
||||
|
||||
def fetch(self, bank, key):
|
||||
'''
|
||||
|
@ -151,11 +175,8 @@ class Cache(object):
|
|||
Raises an exception if cache driver detected an error accessing data
|
||||
in the cache backend (auth, permissions, etc).
|
||||
'''
|
||||
fun = '{0}.{1}'.format(self.driver, 'fetch')
|
||||
try:
|
||||
return self.modules[fun](bank, key, self.cachedir)
|
||||
except TypeError:
|
||||
return self.modules[fun](bank, key)
|
||||
fun = '{0}.fetch'.format(self.driver)
|
||||
return self.modules[fun](bank, key, **self._kwargs)
|
||||
|
||||
def updated(self, bank, key):
|
||||
'''
|
||||
|
@ -178,11 +199,8 @@ class Cache(object):
|
|||
Raises an exception if cache driver detected an error accessing data
|
||||
in the cache backend (auth, permissions, etc).
|
||||
'''
|
||||
fun = '{0}.{1}'.format(self.driver, 'updated')
|
||||
try:
|
||||
return self.modules[fun](bank, key, self.cachedir)
|
||||
except TypeError:
|
||||
return self.modules[fun](bank, key)
|
||||
fun = '{0}.updated'.format(self.driver)
|
||||
return self.modules[fun](bank, key, **self._kwargs)
|
||||
|
||||
def flush(self, bank, key=None):
|
||||
'''
|
||||
|
@ -202,13 +220,10 @@ class Cache(object):
|
|||
Raises an exception if cache driver detected an error accessing data
|
||||
in the cache backend (auth, permissions, etc).
|
||||
'''
|
||||
fun = '{0}.{1}'.format(self.driver, 'flush')
|
||||
try:
|
||||
return self.modules[fun](bank, key=key, cachedir=self.cachedir)
|
||||
except TypeError:
|
||||
return self.modules[fun](bank, key=key)
|
||||
fun = '{0}.flush'.format(self.driver)
|
||||
return self.modules[fun](bank, key=key, **self._kwargs)
|
||||
|
||||
def list(self, bank):
|
||||
def ls(self, bank):
|
||||
'''
|
||||
Lists entries stored in the specified bank.
|
||||
|
||||
|
@ -224,11 +239,10 @@ class Cache(object):
|
|||
Raises an exception if cache driver detected an error accessing data
|
||||
in the cache backend (auth, permissions, etc).
|
||||
'''
|
||||
fun = '{0}.{1}'.format(self.driver, 'list')
|
||||
try:
|
||||
return self.modules[fun](bank, self.cachedir)
|
||||
except TypeError:
|
||||
return self.modules[fun](bank)
|
||||
fun = '{0}.ls'.format(self.driver)
|
||||
return self.modules[fun](bank, **self._kwargs)
|
||||
|
||||
list = ls
|
||||
|
||||
def contains(self, bank, key=None):
|
||||
'''
|
||||
|
@ -252,8 +266,93 @@ class Cache(object):
|
|||
Raises an exception if cache driver detected an error accessing data
|
||||
in the cache backend (auth, permissions, etc).
|
||||
'''
|
||||
fun = '{0}.{1}'.format(self.driver, 'contains')
|
||||
try:
|
||||
return self.modules[fun](bank, key, self.cachedir)
|
||||
except TypeError:
|
||||
return self.modules[fun](bank, key)
|
||||
fun = '{0}.contains'.format(self.driver)
|
||||
return self.modules[fun](bank, key, **self._kwargs)
|
||||
|
||||
|
||||
class MemCache(Cache):
|
||||
'''
|
||||
Short-lived in-memory cache store keeping values on time and/or size (count)
|
||||
basis.
|
||||
'''
|
||||
# {<storage_id>: odict({<key>: [atime, data], ...}), ...}
|
||||
data = {}
|
||||
|
||||
def __init__(self, opts, **kwargs):
|
||||
super(MemCache, self).__init__(opts, **kwargs)
|
||||
self.expire = opts.get('memcache_expire_seconds', 10)
|
||||
self.max = opts.get('memcache_max_items', 1024)
|
||||
self.cleanup = opts.get('memcache_full_cleanup', False)
|
||||
self.debug = opts.get('memcache_debug', False)
|
||||
if self.debug:
|
||||
self.call = 0
|
||||
self.hit = 0
|
||||
self._storage = None
|
||||
|
||||
@classmethod
|
||||
def __cleanup(cls, expire):
|
||||
now = time.time()
|
||||
for storage in six.itervalues(cls.data):
|
||||
for key, data in list(storage.items()):
|
||||
if data[0] + expire < now:
|
||||
del storage[key]
|
||||
else:
|
||||
break
|
||||
|
||||
def _get_storage_id(self):
|
||||
fun = '{0}.storage_id'.format(self.driver)
|
||||
if fun in self.modules:
|
||||
return self.modules[fun](self.kwargs)
|
||||
else:
|
||||
return self.driver
|
||||
|
||||
@property
|
||||
def storage(self):
|
||||
if self._storage is None:
|
||||
storage_id = self._get_storage_id()
|
||||
if storage_id not in MemCache.data:
|
||||
MemCache.data[storage_id] = OrderedDict()
|
||||
self._storage = MemCache.data[storage_id]
|
||||
return self._storage
|
||||
|
||||
def fetch(self, bank, key):
|
||||
if self.debug:
|
||||
self.call += 1
|
||||
now = time.time()
|
||||
record = self.storage.pop((bank, key), None)
|
||||
# Have a cached value for the key
|
||||
if record is not None and record[0] + self.expire >= now:
|
||||
if self.debug:
|
||||
self.hit += 1
|
||||
log.debug('MemCache stats (call/hit/rate): '
|
||||
'{0}/{1}/{2}'.format(self.call,
|
||||
self.hit,
|
||||
float(self.hit) / self.call))
|
||||
# update atime and return
|
||||
record[0] = now
|
||||
self.storage[(bank, key)] = record
|
||||
return record[1]
|
||||
|
||||
# Have no value for the key or value is expired
|
||||
data = super(MemCache, self).fetch(bank, key)
|
||||
if len(self.storage) >= self.max:
|
||||
if self.cleanup:
|
||||
MemCache.__cleanup(self.expire)
|
||||
if len(self.storage) >= self.max:
|
||||
self.storage.popitem(last=False)
|
||||
self.storage[(bank, key)] = [now, data]
|
||||
return data
|
||||
|
||||
def store(self, bank, key, data):
|
||||
self.storage.pop((bank, key), None)
|
||||
super(MemCache, self).store(bank, key, data)
|
||||
if len(self.storage) >= self.max:
|
||||
if self.cleanup:
|
||||
MemCache.__cleanup(self.expire)
|
||||
if len(self.storage) >= self.max:
|
||||
self.storage.popitem(last=False)
|
||||
self.storage[(bank, key)] = [time.time(), data]
|
||||
|
||||
def flush(self, bank, key=None):
|
||||
self.storage.pop((bank, key), None)
|
||||
super(MemCache, self).flush(bank, key)
|
||||
|
|
10
salt/cache/consul.py
vendored
10
salt/cache/consul.py
vendored
|
@ -54,9 +54,6 @@ except ImportError:
|
|||
|
||||
from salt.exceptions import SaltCacheError
|
||||
|
||||
# Don't shadow built-ins
|
||||
__func_alias__ = {'list_': 'list'}
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
api = None
|
||||
|
||||
|
@ -64,6 +61,8 @@ api = None
|
|||
# Define the module's virtual name
|
||||
__virtualname__ = 'consul'
|
||||
|
||||
__func_alias__ = {'ls': 'list'}
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
|
@ -140,7 +139,7 @@ def flush(bank, key=None):
|
|||
)
|
||||
|
||||
|
||||
def list_(bank):
|
||||
def ls(bank):
|
||||
'''
|
||||
Return an iterable object containing all entries stored in the specified bank.
|
||||
'''
|
||||
|
@ -164,9 +163,6 @@ def list_(bank):
|
|||
return keys
|
||||
|
||||
|
||||
getlist = list_
|
||||
|
||||
|
||||
def contains(bank, key):
|
||||
'''
|
||||
Checks if the specified bank contains the specified key.
|
||||
|
|
26
salt/cache/localfs.py
vendored
26
salt/cache/localfs.py
vendored
|
@ -21,11 +21,24 @@ from salt.exceptions import SaltCacheError
|
|||
import salt.utils
|
||||
import salt.utils.atomicfile
|
||||
|
||||
# Don't shadow built-ins
|
||||
__func_alias__ = {'list_': 'list'}
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__func_alias__ = {'list': 'ls'}
|
||||
|
||||
|
||||
def __cachedir(kwargs=None):
|
||||
if kwargs and 'cachedir' in kwargs:
|
||||
return kwargs['cachedir']
|
||||
return __opts__.get('cachedir', salt.syspaths.CACHE_DIR)
|
||||
|
||||
|
||||
def init_kwargs(kwargs):
|
||||
return {'cachedir': __cachedir(kwargs)}
|
||||
|
||||
|
||||
def get_storage_id(kwargs):
|
||||
return ('localfs', __cachedir(kwargs))
|
||||
|
||||
|
||||
def store(bank, key, data, cachedir):
|
||||
'''
|
||||
|
@ -108,7 +121,7 @@ def flush(bank, key=None, cachedir=None):
|
|||
Remove the key from the cache bank with all the key content.
|
||||
'''
|
||||
if cachedir is None:
|
||||
cachedir = __opts__['cachedir']
|
||||
cachedir = __cachedir()
|
||||
|
||||
try:
|
||||
if key is None:
|
||||
|
@ -130,7 +143,7 @@ def flush(bank, key=None, cachedir=None):
|
|||
return True
|
||||
|
||||
|
||||
def list_(bank, cachedir):
|
||||
def ls(bank, cachedir):
|
||||
'''
|
||||
Return an iterable object containing all entries stored in the specified bank.
|
||||
'''
|
||||
|
@ -154,9 +167,6 @@ def list_(bank, cachedir):
|
|||
return ret
|
||||
|
||||
|
||||
getlist = list_
|
||||
|
||||
|
||||
def contains(bank, key, cachedir):
|
||||
'''
|
||||
Checks if the specified bank contains the specified key.
|
||||
|
|
|
@ -1575,7 +1575,7 @@ class LocalClient(object):
|
|||
if connected_minions is None:
|
||||
connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids()
|
||||
if self.opts['minion_data_cache'] \
|
||||
and salt.cache.Cache(self.opts).contains('minions/{0}'.format(id_), 'data') \
|
||||
and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \
|
||||
and connected_minions \
|
||||
and id_ not in connected_minions:
|
||||
|
||||
|
|
|
@ -185,10 +185,10 @@ def prep_trans_tar(opts, file_client, chunks, file_refs, pillar=None, id_=None):
|
|||
if id_ is None:
|
||||
id_ = ''
|
||||
try:
|
||||
cachedir = os.path.join(opts['cachedir'], 'salt-ssh', id_).rstrip(os.sep)
|
||||
cachedir = os.path.join('salt-ssh', id_).rstrip(os.sep)
|
||||
except AttributeError:
|
||||
# Minion ID should always be a str, but don't let an int break this
|
||||
cachedir = os.path.join(opts['cachedir'], 'salt-ssh', str(id_)).rstrip(os.sep)
|
||||
cachedir = os.path.join('salt-ssh', str(id_)).rstrip(os.sep)
|
||||
|
||||
for saltenv in file_refs:
|
||||
# Location where files in this saltenv will be cached
|
||||
|
|
|
@ -41,9 +41,12 @@ from salt.template import compile_template
|
|||
|
||||
# Import third party libs
|
||||
try:
|
||||
import Crypto.Random
|
||||
import Cryptodome.Random
|
||||
except ImportError:
|
||||
pass # pycrypto < 2.1
|
||||
try:
|
||||
import Crypto.Random
|
||||
except ImportError:
|
||||
pass # pycrypto < 2.1
|
||||
import yaml
|
||||
import salt.ext.six as six
|
||||
from salt.ext.six.moves import input # pylint: disable=import-error,redefined-builtin
|
||||
|
|
|
@ -953,8 +953,13 @@ def request_instance(call=None, kwargs=None): # pylint: disable=unused-argument
|
|||
with salt.utils.fopen(userdata_file, 'r') as fh_:
|
||||
userdata = fh_.read()
|
||||
|
||||
userdata = salt.utils.cloud.userdata_template(__opts__, vm_, userdata)
|
||||
|
||||
if userdata is not None:
|
||||
os_kwargs['custom_data'] = base64.b64encode(userdata)
|
||||
try:
|
||||
os_kwargs['custom_data'] = base64.b64encode(userdata)
|
||||
except Exception as exc:
|
||||
log.exception('Failed to encode userdata: %s', exc)
|
||||
|
||||
iface_data = create_interface(kwargs=vm_)
|
||||
vm_['iface_id'] = iface_data['id']
|
||||
|
|
|
@ -93,8 +93,6 @@ import salt.utils
|
|||
from salt._compat import ElementTree as ET
|
||||
import salt.utils.http as http
|
||||
import salt.utils.aws as aws
|
||||
import salt.loader
|
||||
from salt.template import compile_template
|
||||
|
||||
# Import salt.cloud libs
|
||||
import salt.utils.cloud
|
||||
|
@ -1790,18 +1788,13 @@ def request_instance(vm_=None, call=None):
|
|||
with salt.utils.fopen(userdata_file, 'r') as fh_:
|
||||
userdata = fh_.read()
|
||||
|
||||
if userdata is not None:
|
||||
render_opts = __opts__.copy()
|
||||
render_opts.update(vm_)
|
||||
renderer = __opts__.get('renderer', 'yaml_jinja')
|
||||
rend = salt.loader.render(render_opts, {})
|
||||
blacklist = __opts__['renderer_blacklist']
|
||||
whitelist = __opts__['renderer_whitelist']
|
||||
userdata = compile_template(
|
||||
':string:', rend, renderer, blacklist, whitelist, input_data=userdata,
|
||||
)
|
||||
userdata = salt.utils.cloud.userdata_template(__opts__, vm_, userdata)
|
||||
|
||||
params[spot_prefix + 'UserData'] = base64.b64encode(userdata)
|
||||
if userdata is not None:
|
||||
try:
|
||||
params[spot_prefix + 'UserData'] = base64.b64encode(userdata)
|
||||
except Exception as exc:
|
||||
log.exception('Failed to encode userdata: %s', exc)
|
||||
|
||||
vm_size = config.get_cloud_config_value(
|
||||
'size', vm_, __opts__, search_global=False
|
||||
|
|
|
@ -645,12 +645,17 @@ def request_instance(vm_=None, call=None):
|
|||
kwargs['files'][src_path] = files[src_path]
|
||||
|
||||
userdata_file = config.get_cloud_config_value(
|
||||
'userdata_file', vm_, __opts__, search_global=False
|
||||
'userdata_file', vm_, __opts__, search_global=False, default=None
|
||||
)
|
||||
|
||||
if userdata_file is not None:
|
||||
with salt.utils.fopen(userdata_file, 'r') as fp:
|
||||
kwargs['userdata'] = fp.read()
|
||||
try:
|
||||
with salt.utils.fopen(userdata_file, 'r') as fp_:
|
||||
kwargs['userdata'] = salt.utils.cloud.userdata_template(
|
||||
__opts__, vm_, fp_.read()
|
||||
)
|
||||
except Exception as exc:
|
||||
log.exception(
|
||||
'Failed to read userdata from %s: %s', userdata_file, exc)
|
||||
|
||||
kwargs['config_drive'] = config.get_cloud_config_value(
|
||||
'config_drive', vm_, __opts__, search_global=False
|
||||
|
|
|
@ -526,12 +526,17 @@ def request_instance(vm_=None, call=None):
|
|||
kwargs['ex_files'][src_path] = fp_.read()
|
||||
|
||||
userdata_file = config.get_cloud_config_value(
|
||||
'userdata_file', vm_, __opts__, search_global=False
|
||||
'userdata_file', vm_, __opts__, search_global=False, default=None
|
||||
)
|
||||
|
||||
if userdata_file is not None:
|
||||
with salt.utils.fopen(userdata_file, 'r') as fp:
|
||||
kwargs['ex_userdata'] = fp.read()
|
||||
try:
|
||||
with salt.utils.fopen(userdata_file, 'r') as fp_:
|
||||
kwargs['ex_userdata'] = salt.utils.cloud.userdata_template(
|
||||
__opts__, vm_, fp_.read()
|
||||
)
|
||||
except Exception as exc:
|
||||
log.exception(
|
||||
'Failed to read userdata from %s: %s', userdata_file, exc)
|
||||
|
||||
config_drive = config.get_cloud_config_value(
|
||||
'config_drive', vm_, __opts__, default=None, search_global=False
|
||||
|
|
|
@ -982,6 +982,14 @@ VALID_OPTS = {
|
|||
|
||||
# Minion data cache driver (one of satl.cache.* modules)
|
||||
'cache': str,
|
||||
# Enables a fast in-memory cache booster and sets the expiration time.
|
||||
'memcache_expire_seconds': int,
|
||||
# Set a memcache limit in items (bank + key) per cache storage (driver + driver_opts).
|
||||
'memcache_max_items': int,
|
||||
# Each time a cache storage got full cleanup all the expired items not just the oldest one.
|
||||
'memcache_full_cleanup': bool,
|
||||
# Enable collecting the memcache stats and log it on `debug` log level.
|
||||
'memcache_debug': bool,
|
||||
|
||||
# Thin and minimal Salt extra modules
|
||||
'thin_extra_mods': str,
|
||||
|
@ -1562,6 +1570,10 @@ DEFAULT_MASTER_OPTS = {
|
|||
'python2_bin': 'python2',
|
||||
'python3_bin': 'python3',
|
||||
'cache': 'localfs',
|
||||
'memcache_expire_seconds': 0,
|
||||
'memcache_max_items': 1024,
|
||||
'memcache_full_cleanup': False,
|
||||
'memcache_debug': False,
|
||||
'thin_extra_mods': '',
|
||||
'min_extra_mods': '',
|
||||
'ssl': None,
|
||||
|
|
|
@ -24,15 +24,25 @@ import getpass
|
|||
import salt.ext.six as six
|
||||
from salt.ext.six.moves import zip # pylint: disable=import-error,redefined-builtin
|
||||
try:
|
||||
from Crypto.Cipher import AES, PKCS1_OAEP
|
||||
from Crypto.Hash import SHA
|
||||
from Crypto.PublicKey import RSA
|
||||
from Crypto.Signature import PKCS1_v1_5
|
||||
# let this be imported, if possible
|
||||
import Crypto.Random # pylint: disable=W0611
|
||||
from Cryptodome.Cipher import AES, PKCS1_OAEP
|
||||
from Cryptodome.Hash import SHA
|
||||
from Cryptodome.PublicKey import RSA
|
||||
from Cryptodome.Signature import PKCS1_v1_5
|
||||
import Cryptodome.Random # pylint: disable=W0611
|
||||
CDOME = True
|
||||
except ImportError:
|
||||
# No need for crypt in local mode
|
||||
pass
|
||||
CDOME = False
|
||||
if not CDOME:
|
||||
try:
|
||||
from Crypto.Cipher import AES, PKCS1_OAEP
|
||||
from Crypto.Hash import SHA
|
||||
from Crypto.PublicKey import RSA
|
||||
from Crypto.Signature import PKCS1_v1_5
|
||||
# let this be imported, if possible
|
||||
import Crypto.Random # pylint: disable=W0611
|
||||
except ImportError:
|
||||
# No need for crypt in local mode
|
||||
pass
|
||||
|
||||
# Import salt libs
|
||||
import salt.defaults.exitcodes
|
||||
|
|
|
@ -457,7 +457,7 @@ class RemoteFuncs(object):
|
|||
states=False,
|
||||
rend=False)
|
||||
self.__setup_fileserver()
|
||||
self.cache = salt.cache.Cache(opts)
|
||||
self.cache = salt.cache.factory(opts)
|
||||
|
||||
def __setup_fileserver(self):
|
||||
'''
|
||||
|
|
|
@ -883,6 +883,8 @@ def _ps(osdata):
|
|||
'/proc/[0-9]*/status | sed -e \"s=/proc/\\([0-9]*\\)/.*=\\1=\") '
|
||||
'| awk \'{ $7=\"\"; print }\''
|
||||
)
|
||||
elif osdata['os_family'] == 'AIX':
|
||||
grains['ps'] = '/usr/bin/ps auxww'
|
||||
else:
|
||||
grains['ps'] = 'ps -efHww'
|
||||
return grains
|
||||
|
|
12
salt/key.py
12
salt/key.py
|
@ -495,10 +495,10 @@ class Key(object):
|
|||
for minion in os.listdir(m_cache):
|
||||
if minion not in minions and minion not in preserve_minions:
|
||||
shutil.rmtree(os.path.join(m_cache, minion))
|
||||
cache = salt.cache.Cache(self.opts)
|
||||
clist = cache.list(self.ACC)
|
||||
cache = salt.cache.factory(self.opts)
|
||||
clist = cache.ls(self.ACC)
|
||||
if clist:
|
||||
for minion in cache.list(self.ACC):
|
||||
for minion in clist:
|
||||
if minion not in minions and minion not in preserve_minions:
|
||||
cache.flush('{0}/{1}'.format(self.ACC, minion))
|
||||
|
||||
|
@ -973,10 +973,10 @@ class RaetKey(Key):
|
|||
for minion in os.listdir(m_cache):
|
||||
if minion not in minions:
|
||||
shutil.rmtree(os.path.join(m_cache, minion))
|
||||
cache = salt.cache.Cache(self.opts)
|
||||
clist = cache.list(self.ACC)
|
||||
cache = salt.cache.factory(self.opts)
|
||||
clist = cache.ls(self.ACC)
|
||||
if clist:
|
||||
for minion in cache.list(self.ACC):
|
||||
for minion in clist:
|
||||
if minion not in minions and minion not in preserve_minions:
|
||||
cache.flush('{0}/{1}'.format(self.ACC, minion))
|
||||
|
||||
|
|
|
@ -19,7 +19,11 @@ import logging
|
|||
import multiprocessing
|
||||
|
||||
# Import third party libs
|
||||
from Crypto.PublicKey import RSA
|
||||
try:
|
||||
from Cryptodome.PublicKey import RSA
|
||||
except ImportError:
|
||||
# Fall back to pycrypto
|
||||
from Crypto.PublicKey import RSA
|
||||
# pylint: disable=import-error,no-name-in-module,redefined-builtin
|
||||
import salt.ext.six as six
|
||||
from salt.ext.six.moves import range
|
||||
|
|
|
@ -14,6 +14,7 @@ import salt.utils
|
|||
import salt.ext.six.moves.http_client # pylint: disable=import-error,redefined-builtin,no-name-in-module
|
||||
from salt.ext.six.moves import urllib # pylint: disable=no-name-in-module
|
||||
from salt.ext.six.moves.urllib.error import HTTPError, URLError # pylint: disable=no-name-in-module
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
# Import 3rd party libs
|
||||
try:
|
||||
|
@ -295,13 +296,23 @@ def _get_artifact_metadata_url(artifactory_url, repository, group_id, artifact_i
|
|||
|
||||
|
||||
def _get_artifact_metadata_xml(artifactory_url, repository, group_id, artifact_id, headers):
|
||||
artifact_metadata_url = _get_artifact_metadata_url(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id)
|
||||
|
||||
artifact_metadata_url = _get_artifact_metadata_url(
|
||||
artifactory_url=artifactory_url,
|
||||
repository=repository,
|
||||
group_id=group_id,
|
||||
artifact_id=artifact_id
|
||||
)
|
||||
|
||||
try:
|
||||
request = urllib.request.Request(artifact_metadata_url, None, headers)
|
||||
artifact_metadata_xml = urllib.request.urlopen(request).read()
|
||||
except HTTPError as http_error:
|
||||
message = 'Could not fetch data from url: {url}, HTTPError: {error}'
|
||||
raise Exception(message.format(url=artifact_metadata_url, error=http_error))
|
||||
except (HTTPError, URLError) as err:
|
||||
message = 'Could not fetch data from url: {0}. ERROR: {1}'.format(
|
||||
artifact_metadata_url,
|
||||
err
|
||||
)
|
||||
raise CommandExecutionError(message)
|
||||
|
||||
log.debug('artifact_metadata_xml=%s', artifact_metadata_xml)
|
||||
return artifact_metadata_xml
|
||||
|
@ -334,13 +345,25 @@ def _get_snapshot_version_metadata_url(artifactory_url, repository, group_id, ar
|
|||
|
||||
|
||||
def _get_snapshot_version_metadata_xml(artifactory_url, repository, group_id, artifact_id, version, headers):
|
||||
snapshot_version_metadata_url = _get_snapshot_version_metadata_url(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version)
|
||||
|
||||
snapshot_version_metadata_url = _get_snapshot_version_metadata_url(
|
||||
artifactory_url=artifactory_url,
|
||||
repository=repository,
|
||||
group_id=group_id,
|
||||
artifact_id=artifact_id,
|
||||
version=version
|
||||
)
|
||||
|
||||
try:
|
||||
request = urllib.request.Request(snapshot_version_metadata_url, None, headers)
|
||||
snapshot_version_metadata_xml = urllib.request.urlopen(request).read()
|
||||
except HTTPError as http_error:
|
||||
message = 'Could not fetch data from url: {url}, HTTPError: {error}'
|
||||
raise Exception(message.format(url=snapshot_version_metadata_url, error=http_error))
|
||||
except (HTTPError, URLError) as err:
|
||||
message = 'Could not fetch data from url: {0}. ERROR: {1}'.format(
|
||||
snapshot_version_metadata_url,
|
||||
err
|
||||
)
|
||||
raise CommandExecutionError(message)
|
||||
|
||||
log.debug('snapshot_version_metadata_xml=%s', snapshot_version_metadata_xml)
|
||||
return snapshot_version_metadata_xml
|
||||
|
||||
|
@ -378,13 +401,23 @@ def __get_latest_version_url(artifactory_url, repository, group_id, artifact_id)
|
|||
|
||||
|
||||
def __find_latest_version(artifactory_url, repository, group_id, artifact_id, headers):
|
||||
latest_version_url = __get_latest_version_url(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id)
|
||||
|
||||
latest_version_url = __get_latest_version_url(
|
||||
artifactory_url=artifactory_url,
|
||||
repository=repository,
|
||||
group_id=group_id,
|
||||
artifact_id=artifact_id
|
||||
)
|
||||
|
||||
try:
|
||||
request = urllib.request.Request(latest_version_url, None, headers)
|
||||
version = urllib.request.urlopen(request).read()
|
||||
except HTTPError as http_error:
|
||||
message = 'Could not fetch data from url: {url}, HTTPError: {error}'
|
||||
raise Exception(message.format(url=latest_version_url, error=http_error))
|
||||
except (HTTPError, URLError) as err:
|
||||
message = 'Could not fetch data from url: {0}. ERROR: {1}'.format(
|
||||
latest_version_url,
|
||||
err
|
||||
)
|
||||
raise CommandExecutionError(message)
|
||||
|
||||
log.debug("Response of: %s", version)
|
||||
|
||||
|
|
|
@ -37,6 +37,27 @@ def __virtual__():
|
|||
return True
|
||||
|
||||
|
||||
def _parse_numbers(text):
|
||||
'''
|
||||
Convert a string to a number, allowing for a K|M|G|T postfix, 32.8K.
|
||||
Returns a decimal number if the string is a real number,
|
||||
or the string unchanged otherwise.
|
||||
'''
|
||||
if text.isdigit():
|
||||
return decimal.Decimal(text)
|
||||
|
||||
try:
|
||||
postPrefixes = {'K': '10E3', 'M': '10E6', 'G': '10E9', 'T': '10E12', 'P': '10E15', 'E': '10E18', 'Z': '10E21', 'Y': '10E24'}
|
||||
if text[-1] in postPrefixes.keys():
|
||||
v = decimal.Decimal(text[:-1])
|
||||
v = v * decimal.Decimal(postPrefixes[text[-1]])
|
||||
return v
|
||||
else:
|
||||
return decimal.Decimal(text)
|
||||
except ValueError:
|
||||
return text
|
||||
|
||||
|
||||
def _clean_flags(args, caller):
|
||||
'''
|
||||
Sanitize flags passed into df
|
||||
|
@ -723,6 +744,9 @@ def iostat(interval=1, count=5, disks=None):
|
|||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -733,6 +757,8 @@ def iostat(interval=1, count=5, disks=None):
|
|||
return _iostat_linux(interval, count, disks)
|
||||
elif salt.utils.is_freebsd():
|
||||
return _iostat_fbsd(interval, count, disks)
|
||||
elif salt.utils.is_aix():
|
||||
return _iostat_aix(interval, count, disks)
|
||||
|
||||
|
||||
def _iostats_dict(header, stats):
|
||||
|
@ -843,3 +869,103 @@ def _iostat_linux(interval, count, disks):
|
|||
iostats[disk] = _iostats_dict(dev_header, stats)
|
||||
|
||||
return iostats
|
||||
|
||||
|
||||
def _iostat_aix(interval, count, disks):
|
||||
'''
|
||||
AIX support to gather and return (averaged) IO stats.
|
||||
'''
|
||||
log.debug('DGM disk iostat entry')
|
||||
|
||||
if disks is None:
|
||||
iostat_cmd = 'iostat -dD {0} {1} '.format(interval, count)
|
||||
elif isinstance(disks, six.string_types):
|
||||
iostat_cmd = 'iostat -dD {0} {1} {2}'.format(disks, interval, count)
|
||||
else:
|
||||
iostat_cmd = 'iostat -dD {0} {1} {2}'.format(' '.join(disks), interval, count)
|
||||
|
||||
ret = {}
|
||||
procn = None
|
||||
fields = []
|
||||
disk_name = ''
|
||||
disk_mode = ''
|
||||
dev_stats = collections.defaultdict(list)
|
||||
for line in __salt__['cmd.run'](iostat_cmd).splitlines():
|
||||
# Note: iostat -dD is per-system
|
||||
#
|
||||
#root@l490vp031_pub:~/devtest# iostat -dD hdisk6 1 3
|
||||
#
|
||||
#System configuration: lcpu=8 drives=1 paths=2 vdisks=2
|
||||
#
|
||||
#hdisk6 xfer: %tm_act bps tps bread bwrtn
|
||||
# 0.0 0.0 0.0 0.0 0.0
|
||||
# read: rps avgserv minserv maxserv timeouts fails
|
||||
# 0.0 0.0 0.0 0.0 0 0
|
||||
# write: wps avgserv minserv maxserv timeouts fails
|
||||
# 0.0 0.0 0.0 0.0 0 0
|
||||
# queue: avgtime mintime maxtime avgwqsz avgsqsz sqfull
|
||||
# 0.0 0.0 0.0 0.0 0.0 0.0
|
||||
#--------------------------------------------------------------------------------
|
||||
#
|
||||
#hdisk6 xfer: %tm_act bps tps bread bwrtn
|
||||
# 9.6 16.4K 4.0 16.4K 0.0
|
||||
# read: rps avgserv minserv maxserv timeouts fails
|
||||
# 4.0 4.9 0.3 9.9 0 0
|
||||
# write: wps avgserv minserv maxserv timeouts fails
|
||||
# 0.0 0.0 0.0 0.0 0 0
|
||||
# queue: avgtime mintime maxtime avgwqsz avgsqsz sqfull
|
||||
# 0.0 0.0 0.0 0.0 0.0 0.0
|
||||
#--------------------------------------------------------------------------------
|
||||
#
|
||||
#hdisk6 xfer: %tm_act bps tps bread bwrtn
|
||||
# 0.0 0.0 0.0 0.0 0.0
|
||||
# read: rps avgserv minserv maxserv timeouts fails
|
||||
# 0.0 0.0 0.3 9.9 0 0
|
||||
# write: wps avgserv minserv maxserv timeouts fails
|
||||
# 0.0 0.0 0.0 0.0 0 0
|
||||
# queue: avgtime mintime maxtime avgwqsz avgsqsz sqfull
|
||||
# 0.0 0.0 0.0 0.0 0.0 0.0
|
||||
#--------------------------------------------------------------------------------
|
||||
if not line or line.startswith('System') or line.startswith('-----------'):
|
||||
continue
|
||||
|
||||
if not re.match(r'\s', line):
|
||||
#seen disk name
|
||||
dsk_comps = line.split(':')
|
||||
dsk_firsts = dsk_comps[0].split()
|
||||
disk_name = dsk_firsts[0]
|
||||
disk_mode = dsk_firsts[1]
|
||||
fields = dsk_comps[1].split()
|
||||
if disk_name not in dev_stats.keys():
|
||||
dev_stats[disk_name] = []
|
||||
procn = len(dev_stats[disk_name])
|
||||
dev_stats[disk_name].append({})
|
||||
dev_stats[disk_name][procn][disk_mode] = {}
|
||||
dev_stats[disk_name][procn][disk_mode]['fields'] = fields
|
||||
dev_stats[disk_name][procn][disk_mode]['stats'] = []
|
||||
continue
|
||||
|
||||
if ':' in line:
|
||||
comps = line.split(':')
|
||||
fields = comps[1].split()
|
||||
disk_mode = comps[0].lstrip()
|
||||
if disk_mode not in dev_stats[disk_name][0].keys():
|
||||
dev_stats[disk_name][0][disk_mode] = {}
|
||||
dev_stats[disk_name][0][disk_mode]['fields'] = fields
|
||||
dev_stats[disk_name][0][disk_mode]['stats'] = []
|
||||
else:
|
||||
line = line.split()
|
||||
stats = [_parse_numbers(x) for x in line[:]]
|
||||
dev_stats[disk_name][0][disk_mode]['stats'].append(stats)
|
||||
|
||||
iostats = {}
|
||||
|
||||
for disk, list_modes in dev_stats.items():
|
||||
iostats[disk] = {}
|
||||
for modes in list_modes:
|
||||
for disk_mode in modes.keys():
|
||||
fields = modes[disk_mode]['fields']
|
||||
stats = modes[disk_mode]['stats']
|
||||
iostats[disk][disk_mode] = _iostats_dict(fields, stats)
|
||||
|
||||
return iostats
|
||||
|
|
|
@ -652,7 +652,6 @@ def active_tcp():
|
|||
# lets use netstat to mimic linux as close as possible
|
||||
ret = {}
|
||||
for connection in _netstat_aix():
|
||||
## TBD need to deliver AIX output in consumable fashion
|
||||
if not connection['proto'].startswith('tcp'):
|
||||
continue
|
||||
if connection['state'] != 'ESTABLISHED':
|
||||
|
|
|
@ -9,6 +9,7 @@ from __future__ import absolute_import
|
|||
import datetime
|
||||
import os
|
||||
import re
|
||||
import logging
|
||||
import fnmatch
|
||||
import collections
|
||||
import copy
|
||||
|
@ -40,6 +41,9 @@ __func_alias__ = {
|
|||
}
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Not all functions supported by Windows
|
||||
|
@ -65,10 +69,57 @@ def _number(text):
|
|||
return text
|
||||
|
||||
|
||||
def _get_boot_time_aix():
|
||||
'''
|
||||
Return the number of seconds since boot time on AIX
|
||||
|
||||
t=$(LC_ALL=POSIX ps -o etime= -p 1)
|
||||
d=0 h=0
|
||||
case $t in *-*) d=${t%%-*}; t=${t#*-};; esac
|
||||
case $t in *:*:*) h=${t%%:*}; t=${t#*:};; esac
|
||||
s=$((d*86400 + h*3600 + ${t%%:*}*60 + ${t#*:}))
|
||||
|
||||
t is 7-20:46:46
|
||||
'''
|
||||
boot_secs = 0
|
||||
res = __salt__['cmd.run_all']('ps -o etime= -p 1')
|
||||
if res['retcode'] > 0:
|
||||
raise CommandExecutionError('Unable to find boot_time for pid 1.')
|
||||
bt_time = res['stdout']
|
||||
days = bt_time.split('-')
|
||||
hms = days[1].split(':')
|
||||
boot_secs = _number(days[0]) * 86400 + _number(hms[0]) * 3600 + _number(hms[1]) * 60 + _number(hms[2])
|
||||
return boot_secs
|
||||
|
||||
|
||||
def _aix_loadavg():
|
||||
'''
|
||||
Return the load average on AIX
|
||||
'''
|
||||
# 03:42PM up 9 days, 20:41, 2 users, load average: 0.28, 0.47, 0.69
|
||||
uptime = __salt__['cmd.run']('uptime')
|
||||
ldavg = uptime.split('load average')
|
||||
load_avg = ldavg[1].split()
|
||||
return {'1-min': load_avg[1].strip(','),
|
||||
'5-min': load_avg[2].strip(','),
|
||||
'15-min': load_avg[3]}
|
||||
|
||||
|
||||
def _aix_nproc():
|
||||
'''
|
||||
Return the maximun number of PROCESSES allowed per user on AIX
|
||||
'''
|
||||
nprocs = __salt__['cmd.run']('lsattr -E -l sys0 | grep maxuproc', python_shell=True).split()
|
||||
return _number(nprocs[1])
|
||||
|
||||
|
||||
def procs():
|
||||
'''
|
||||
Return the process data
|
||||
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -148,6 +199,9 @@ def uptime():
|
|||
.. versionchanged:: 2016.11.0
|
||||
Support for OpenBSD, FreeBSD, NetBSD, MacOS, and Solaris
|
||||
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -183,6 +237,8 @@ def uptime():
|
|||
data = bt_data.split("{")[-1].split("}")[0].strip().replace(' ', '')
|
||||
uptime = dict([(k, int(v,)) for k, v in [p.strip().split('=') for p in data.split(',')]])
|
||||
seconds = int(curr_seconds - uptime['sec'])
|
||||
elif salt.utils.is_aix():
|
||||
seconds = _get_boot_time_aix()
|
||||
else:
|
||||
return __salt__['cmd.run']('uptime')
|
||||
|
||||
|
@ -211,6 +267,9 @@ def loadavg():
|
|||
'''
|
||||
Return the load averages for this minion
|
||||
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -219,6 +278,9 @@ def loadavg():
|
|||
|
||||
:raises CommandExecutionError: If the system cannot report loadaverages to Python
|
||||
'''
|
||||
if __grains__['kernel'] == 'AIX':
|
||||
return _aix_loadavg()
|
||||
|
||||
try:
|
||||
load_avg = os.getloadavg()
|
||||
except AttributeError:
|
||||
|
@ -233,6 +295,9 @@ def cpustats():
|
|||
'''
|
||||
Return the CPU stats for this minion
|
||||
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -308,11 +373,45 @@ def cpustats():
|
|||
ret[_number(cpu[0])][fields[i]] = _number(cpu[i])
|
||||
return ret
|
||||
|
||||
def aix_cpustats():
|
||||
'''
|
||||
AIX specific implementation of cpustats
|
||||
'''
|
||||
ret = {}
|
||||
ret['mpstat'] = []
|
||||
procn = None
|
||||
fields = []
|
||||
for line in __salt__['cmd.run']('mpstat -a').splitlines():
|
||||
if not line:
|
||||
continue
|
||||
procn = len(ret['mpstat'])
|
||||
if line.startswith('System'):
|
||||
comps = line.split(':')
|
||||
ret['mpstat'].append({})
|
||||
ret['mpstat'][procn]['system'] = {}
|
||||
cpu_comps = comps[1].split()
|
||||
for i in range(0, len(cpu_comps)):
|
||||
cpu_vals = cpu_comps[i].split('=')
|
||||
ret['mpstat'][procn]['system'][cpu_vals[0]] = cpu_vals[1]
|
||||
|
||||
if line.startswith('cpu'):
|
||||
fields = line.split()
|
||||
continue
|
||||
|
||||
if fields:
|
||||
cpustat = line.split()
|
||||
ret[_number(cpustat[0])] = {}
|
||||
for i in range(1, len(fields)-1):
|
||||
ret[_number(cpustat[0])][fields[i]] = _number(cpustat[i])
|
||||
|
||||
return ret
|
||||
|
||||
# dict that return a function that does the right thing per platform
|
||||
get_version = {
|
||||
'Linux': linux_cpustats,
|
||||
'FreeBSD': freebsd_cpustats,
|
||||
'SunOS': sunos_cpustats,
|
||||
'AIX': aix_cpustats,
|
||||
}
|
||||
|
||||
errmsg = 'This method is unsupported on the current operating system!'
|
||||
|
@ -323,6 +422,9 @@ def meminfo():
|
|||
'''
|
||||
Return the memory info for this minion
|
||||
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -370,10 +472,91 @@ def meminfo():
|
|||
sysctlvmtot = [x for x in sysctlvmtot if x]
|
||||
ret['vm.vmtotal'] = sysctlvmtot
|
||||
return ret
|
||||
|
||||
def aix_meminfo():
|
||||
'''
|
||||
AIX specific implementation of meminfo
|
||||
'''
|
||||
ret = {}
|
||||
ret['svmon'] = []
|
||||
ret['vmstat'] = []
|
||||
procn = None
|
||||
fields = []
|
||||
pagesize_flag = False
|
||||
for line in __salt__['cmd.run']('svmon -G').splitlines():
|
||||
# Note: svmon is per-system
|
||||
# size inuse free pin virtual mmode
|
||||
#memory 1048576 1039740 8836 285078 474993 Ded
|
||||
#pg space 917504 2574
|
||||
#
|
||||
# work pers clnt other
|
||||
#pin 248379 0 2107 34592
|
||||
#in use 474993 0 564747
|
||||
#
|
||||
#PageSize PoolSize inuse pgsp pin virtual
|
||||
#s 4 KB - 666956 2574 60726 102209
|
||||
#m 64 KB - 23299 0 14022 23299
|
||||
if not line:
|
||||
continue
|
||||
|
||||
if re.match(r'\s', line):
|
||||
# assume fields line
|
||||
fields = line.split()
|
||||
continue
|
||||
|
||||
if line.startswith('memory') or line.startswith('pin'):
|
||||
procn = len(ret['svmon'])
|
||||
ret['svmon'].append({})
|
||||
comps = line.split()
|
||||
ret['svmon'][procn][comps[0]] = {}
|
||||
for i in range(0, len(fields)):
|
||||
if len(comps) > i + 1:
|
||||
ret['svmon'][procn][comps[0]][fields[i]] = comps[i+1]
|
||||
continue
|
||||
|
||||
if line.startswith('pg space') or line.startswith('in use'):
|
||||
procn = len(ret['svmon'])
|
||||
ret['svmon'].append({})
|
||||
comps = line.split()
|
||||
pg_space = '{0} {1}'.format(comps[0], comps[1])
|
||||
ret['svmon'][procn][pg_space] = {}
|
||||
for i in range(0, len(fields)):
|
||||
if len(comps) > i + 2:
|
||||
ret['svmon'][procn][pg_space][fields[i]] = comps[i+2]
|
||||
continue
|
||||
|
||||
if line.startswith('PageSize'):
|
||||
fields = line.split()
|
||||
pagesize_flag = False
|
||||
continue
|
||||
|
||||
if pagesize_flag:
|
||||
procn = len(ret['svmon'])
|
||||
ret['svmon'].append({})
|
||||
comps = line.split()
|
||||
ret['svmon'][procn][comps[0]] = {}
|
||||
for i in range(0, len(fields)):
|
||||
if len(comps) > i:
|
||||
ret['svmon'][procn][comps[0]][fields[i]] = comps[i]
|
||||
continue
|
||||
|
||||
for line in __salt__['cmd.run']('vmstat -v').splitlines():
|
||||
# Note: vmstat is per-system
|
||||
if not line:
|
||||
continue
|
||||
|
||||
procn = len(ret['vmstat'])
|
||||
ret['vmstat'].append({})
|
||||
comps = line.lstrip().split(' ', 1)
|
||||
ret['vmstat'][procn][comps[1]] = comps[0]
|
||||
|
||||
return ret
|
||||
|
||||
# dict that return a function that does the right thing per platform
|
||||
get_version = {
|
||||
'Linux': linux_meminfo,
|
||||
'FreeBSD': freebsd_meminfo,
|
||||
'AIX': aix_meminfo,
|
||||
}
|
||||
|
||||
errmsg = 'This method is unsupported on the current operating system!'
|
||||
|
@ -382,8 +565,11 @@ def meminfo():
|
|||
|
||||
def cpuinfo():
|
||||
'''
|
||||
..versionchanged:: 2016.3.2
|
||||
Return the CPU info for this minion
|
||||
.. versionchanged:: 2016.3.2
|
||||
Return the CPU info for this minion
|
||||
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -494,12 +680,82 @@ def cpuinfo():
|
|||
ret['psrinfo'][procn]['clock'] = "{0} {1}".format(line[10], line[11][:-1])
|
||||
return ret
|
||||
|
||||
def aix_cpuinfo():
|
||||
'''
|
||||
AIX specific cpuinfo implementation
|
||||
'''
|
||||
ret = {}
|
||||
ret['prtconf'] = []
|
||||
ret['lparstat'] = []
|
||||
procn = None
|
||||
for line in __salt__['cmd.run']('prtconf | grep -i "Processor"', python_shell=True).splitlines():
|
||||
# Note: prtconf is per-system and not per-cpu
|
||||
# Output Example:
|
||||
#prtconf | grep -i "Processor"
|
||||
#Processor Type: PowerPC_POWER7
|
||||
#Processor Implementation Mode: POWER 7
|
||||
#Processor Version: PV_7_Compat
|
||||
#Number Of Processors: 2
|
||||
#Processor Clock Speed: 3000 MHz
|
||||
# Model Implementation: Multiple Processor, PCI bus
|
||||
# + proc0 Processor
|
||||
# + proc4 Processor
|
||||
if not line:
|
||||
continue
|
||||
procn = len(ret['prtconf'])
|
||||
if line.startswith('Processor') or line.startswith('Number'):
|
||||
ret['prtconf'].append({})
|
||||
comps = line.split(':')
|
||||
comps[0] = comps[0].rstrip()
|
||||
ret['prtconf'][procn][comps[0]] = comps[1]
|
||||
else:
|
||||
continue
|
||||
|
||||
for line in __salt__['cmd.run']('prtconf | grep "CPU"', python_shell=True).splitlines():
|
||||
# Note: prtconf is per-system and not per-cpu
|
||||
# Output Example:
|
||||
#CPU Type: 64-bit
|
||||
if not line:
|
||||
continue
|
||||
procn = len(ret['prtconf'])
|
||||
if line.startswith('CPU'):
|
||||
ret['prtconf'].append({})
|
||||
comps = line.split(':')
|
||||
comps[0] = comps[0].rstrip()
|
||||
ret['prtconf'][procn][comps[0]] = comps[1]
|
||||
else:
|
||||
continue
|
||||
|
||||
for line in __salt__['cmd.run']('lparstat -i | grep CPU', python_shell=True).splitlines():
|
||||
# Note: lparstat is per-system and not per-cpu
|
||||
# Output Example:
|
||||
#Online Virtual CPUs : 2
|
||||
#Maximum Virtual CPUs : 2
|
||||
#Minimum Virtual CPUs : 1
|
||||
#Maximum Physical CPUs in system : 32
|
||||
#Active Physical CPUs in system : 32
|
||||
#Active CPUs in Pool : 32
|
||||
#Shared Physical CPUs in system : 32
|
||||
#Physical CPU Percentage : 25.00%
|
||||
#Desired Virtual CPUs : 2
|
||||
if not line:
|
||||
continue
|
||||
|
||||
procn = len(ret['lparstat'])
|
||||
ret['lparstat'].append({})
|
||||
comps = line.split(':')
|
||||
comps[0] = comps[0].rstrip()
|
||||
ret['lparstat'][procn][comps[0]] = comps[1]
|
||||
|
||||
return ret
|
||||
|
||||
# dict that returns a function that does the right thing per platform
|
||||
get_version = {
|
||||
'Linux': linux_cpuinfo,
|
||||
'FreeBSD': bsd_cpuinfo,
|
||||
'OpenBSD': bsd_cpuinfo,
|
||||
'SunOS': sunos_cpuinfo,
|
||||
'AIX': aix_cpuinfo,
|
||||
}
|
||||
|
||||
errmsg = 'This method is unsupported on the current operating system!'
|
||||
|
@ -508,8 +764,11 @@ def cpuinfo():
|
|||
|
||||
def diskstats():
|
||||
'''
|
||||
..versionchanged:: 2016.3.2
|
||||
Return the disk stats for this minion
|
||||
.. versionchanged:: 2016.3.2
|
||||
Return the disk stats for this minion
|
||||
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -565,11 +824,67 @@ def diskstats():
|
|||
ret[comps[0]][metric] = _number(value)
|
||||
return ret
|
||||
|
||||
def aix_diskstats():
|
||||
'''
|
||||
AIX specific implementation of diskstats
|
||||
'''
|
||||
ret = {}
|
||||
procn = None
|
||||
fields = []
|
||||
disk_name = ''
|
||||
disk_mode = ''
|
||||
for line in __salt__['cmd.run']('iostat -dDV').splitlines():
|
||||
# Note: iostat -dDV is per-system
|
||||
#
|
||||
#System configuration: lcpu=8 drives=1 paths=2 vdisks=2
|
||||
#
|
||||
#hdisk0 xfer: %tm_act bps tps bread bwrtn
|
||||
# 0.0 0.8 0.0 0.0 0.8
|
||||
# read: rps avgserv minserv maxserv timeouts fails
|
||||
# 0.0 2.5 0.3 12.4 0 0
|
||||
# write: wps avgserv minserv maxserv timeouts fails
|
||||
# 0.0 0.3 0.2 0.7 0 0
|
||||
# queue: avgtime mintime maxtime avgwqsz avgsqsz sqfull
|
||||
# 0.3 0.0 5.3 0.0 0.0 0.0
|
||||
#--------------------------------------------------------------------------------
|
||||
if not line or line.startswith('System') or line.startswith('-----------'):
|
||||
continue
|
||||
|
||||
if not re.match(r'\s', line):
|
||||
#have new disk
|
||||
dsk_comps = line.split(':')
|
||||
dsk_firsts = dsk_comps[0].split()
|
||||
disk_name = dsk_firsts[0]
|
||||
disk_mode = dsk_firsts[1]
|
||||
fields = dsk_comps[1].split()
|
||||
ret[disk_name] = []
|
||||
|
||||
procn = len(ret[disk_name])
|
||||
ret[disk_name].append({})
|
||||
ret[disk_name][procn][disk_mode] = {}
|
||||
continue
|
||||
|
||||
if ':' in line:
|
||||
comps = line.split(':')
|
||||
fields = comps[1].split()
|
||||
disk_mode = comps[0].lstrip()
|
||||
procn = len(ret[disk_name])
|
||||
ret[disk_name].append({})
|
||||
ret[disk_name][procn][disk_mode] = {}
|
||||
else:
|
||||
comps = line.split()
|
||||
for i in range(0, len(fields)):
|
||||
if len(comps) > i:
|
||||
ret[disk_name][procn][disk_mode][fields[i]] = comps[i]
|
||||
|
||||
return ret
|
||||
|
||||
# dict that return a function that does the right thing per platform
|
||||
get_version = {
|
||||
'Linux': linux_diskstats,
|
||||
'FreeBSD': generic_diskstats,
|
||||
'SunOS': generic_diskstats,
|
||||
'AIX': aix_diskstats,
|
||||
}
|
||||
|
||||
errmsg = 'This method is unsupported on the current operating system!'
|
||||
|
@ -654,8 +969,11 @@ def diskusage(*args):
|
|||
|
||||
def vmstats():
|
||||
'''
|
||||
..versionchanged:: 2016.3.2
|
||||
Return the virtual memory stats for this minion
|
||||
.. versionchanged:: 2016.3.2
|
||||
Return the virtual memory stats for this minion
|
||||
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -692,12 +1010,14 @@ def vmstats():
|
|||
if comps[0].isdigit():
|
||||
ret[' '.join(comps[1:])] = _number(comps[0].strip())
|
||||
return ret
|
||||
|
||||
# dict that returns a function that does the right thing per platform
|
||||
get_version = {
|
||||
'Linux': linux_vmstats,
|
||||
'FreeBSD': generic_vmstats,
|
||||
'OpenBSD': generic_vmstats,
|
||||
'SunOS': generic_vmstats,
|
||||
'AIX': generic_vmstats,
|
||||
}
|
||||
|
||||
errmsg = 'This method is unsupported on the current operating system!'
|
||||
|
@ -708,12 +1028,18 @@ def nproc():
|
|||
'''
|
||||
Return the number of processing units available on this system
|
||||
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' status.nproc
|
||||
'''
|
||||
if __grains__['kernel'] == 'AIX':
|
||||
return _aix_nproc()
|
||||
|
||||
try:
|
||||
return _number(__salt__['cmd.run']('nproc').strip())
|
||||
except ValueError:
|
||||
|
@ -724,6 +1050,9 @@ def netstats():
|
|||
'''
|
||||
Return the network stats for this minion
|
||||
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -732,7 +1061,7 @@ def netstats():
|
|||
'''
|
||||
def linux_netstats():
|
||||
'''
|
||||
freebsd specific netstats implementation
|
||||
linux specific netstats implementation
|
||||
'''
|
||||
ret = {}
|
||||
try:
|
||||
|
@ -797,11 +1126,41 @@ def netstats():
|
|||
ret[line[3]] = line[5]
|
||||
return ret
|
||||
|
||||
def aix_netstats():
|
||||
'''
|
||||
AIX specific netstats implementation
|
||||
'''
|
||||
ret = {}
|
||||
fields = []
|
||||
procn = None
|
||||
proto_name = None
|
||||
for line in __salt__['cmd.run']('netstat -s').splitlines():
|
||||
if not line:
|
||||
continue
|
||||
|
||||
if not re.match(r'\s', line) and ':' in line:
|
||||
comps = line.split(':')
|
||||
proto_name = comps[0]
|
||||
ret[proto_name] = []
|
||||
procn = len(ret[proto_name])
|
||||
ret[proto_name].append({})
|
||||
continue
|
||||
else:
|
||||
comps = line.split()
|
||||
comps[0] = comps[0].strip()
|
||||
if comps[0].isdigit():
|
||||
ret[proto_name][procn][' '.join(comps[1:])] = _number(comps[0])
|
||||
else:
|
||||
continue
|
||||
|
||||
return ret
|
||||
|
||||
# dict that returns a function that does the right thing per platform
|
||||
get_version = {
|
||||
'Linux': linux_netstats,
|
||||
'FreeBSD': freebsd_netstats,
|
||||
'SunOS': sunos_netstats,
|
||||
'AIX': aix_netstats,
|
||||
}
|
||||
|
||||
errmsg = 'This method is unsupported on the current operating system!'
|
||||
|
@ -810,8 +1169,11 @@ def netstats():
|
|||
|
||||
def netdev():
|
||||
'''
|
||||
..versionchanged:: 2016.3.2
|
||||
Return the network device stats for this minion
|
||||
.. versionchanged:: 2016.3.2
|
||||
Return the network device stats for this minion
|
||||
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -910,11 +1272,75 @@ def netdev():
|
|||
|
||||
return ret
|
||||
|
||||
def aix_netdev():
|
||||
'''
|
||||
AIX specific implementation of netdev
|
||||
'''
|
||||
ret = {}
|
||||
fields = []
|
||||
procn = None
|
||||
for dev in __grains__['ip4_interfaces'].keys() + __grains__['ip6_interfaces'].keys():
|
||||
# fetch device info
|
||||
#root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -i -n -I en0 -f inet6
|
||||
#Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
|
||||
#en0 1500 link#3 e2.eb.32.42.84.c 10029668 0 446490 0 0
|
||||
#en0 1500 172.29.128 172.29.149.95 10029668 0 446490 0 0
|
||||
#root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -i -n -I en0 -f inet6
|
||||
#Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
|
||||
#en0 1500 link#3 e2.eb.32.42.84.c 10029731 0 446499 0 0
|
||||
|
||||
netstat_ipv4 = __salt__['cmd.run']('netstat -i -n -I {dev} -f inet'.format(dev=dev)).splitlines()
|
||||
netstat_ipv6 = __salt__['cmd.run']('netstat -i -n -I {dev} -f inet6'.format(dev=dev)).splitlines()
|
||||
|
||||
# add data
|
||||
ret[dev] = []
|
||||
|
||||
for line in netstat_ipv4:
|
||||
if line.startswith('Name'):
|
||||
fields = line.split()
|
||||
continue
|
||||
|
||||
comps = line.split()
|
||||
if len(comps) < 3:
|
||||
raise CommandExecutionError('Insufficent data returned by command to process \'{0}\''.format(line))
|
||||
|
||||
if comps[2].startswith('link'):
|
||||
continue
|
||||
|
||||
procn = len(ret[dev])
|
||||
ret[dev].append({})
|
||||
ret[dev][procn]['ipv4'] = {}
|
||||
for i in range(1, len(fields)):
|
||||
if len(comps) > i:
|
||||
ret[dev][procn]['ipv4'][fields[i]] = comps[i]
|
||||
|
||||
for line in netstat_ipv6:
|
||||
if line.startswith('Name'):
|
||||
fields = line.split()
|
||||
continue
|
||||
|
||||
comps = line.split()
|
||||
if len(comps) < 3:
|
||||
raise CommandExecutionError('Insufficent data returned by command to process \'{0}\''.format(line))
|
||||
|
||||
if comps[2].startswith('link'):
|
||||
continue
|
||||
|
||||
procn = len(ret[dev])
|
||||
ret[dev].append({})
|
||||
ret[dev][procn]['ipv6'] = {}
|
||||
for i in range(1, len(fields)):
|
||||
if len(comps) > i:
|
||||
ret[dev][procn]['ipv6'][fields[i]] = comps[i]
|
||||
|
||||
return ret
|
||||
|
||||
# dict that returns a function that does the right thing per platform
|
||||
get_version = {
|
||||
'Linux': linux_netdev,
|
||||
'FreeBSD': freebsd_netdev,
|
||||
'SunOS': sunos_netdev,
|
||||
'AIX': aix_netdev,
|
||||
}
|
||||
|
||||
errmsg = 'This method is unsupported on the current operating system!'
|
||||
|
@ -979,6 +1405,9 @@ def pid(sig):
|
|||
a Python-compatible regular expression to return all pids of
|
||||
processes matching the regexp.
|
||||
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -1005,6 +1434,9 @@ def version():
|
|||
'''
|
||||
Return the system version for this minion
|
||||
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -1025,6 +1457,7 @@ def version():
|
|||
get_version = {
|
||||
'Linux': linux_version,
|
||||
'FreeBSD': lambda: __salt__['cmd.run']('sysctl -n kern.version'),
|
||||
'AIX': lambda: __salt__['cmd.run']('oslevel -s'),
|
||||
}
|
||||
|
||||
errmsg = 'This method is unsupported on the current operating system!'
|
||||
|
@ -1040,6 +1473,9 @@ def master(master=None, connected=True):
|
|||
run via a scheduled job from the minion. If master_ip is an FQDN/Hostname,
|
||||
it must be resolvable to a valid IPv4 address.
|
||||
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
|
|
@ -56,6 +56,7 @@ def sync_all(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
|
|||
ret['utils'] = sync_utils(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
|
||||
ret['sdb'] = sync_sdb(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
|
||||
ret['cache'] = sync_cache(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
|
||||
ret['tops'] = sync_tops(saltenv=saltenv)
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -399,6 +400,25 @@ def sync_sdb(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
|
|||
extmod_blacklist=extmod_blacklist)[0]
|
||||
|
||||
|
||||
def sync_tops(saltenv='base'):
|
||||
'''
|
||||
.. versionadded:: 2016.3.7,2016.11.4,Nitrogen
|
||||
|
||||
Sync master_tops modules from ``salt://_tops`` to the master
|
||||
|
||||
saltenv : base
|
||||
The fileserver environment from which to sync. To sync from more than
|
||||
one environment, pass a comma-separated list.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run saltutil.sync_tops
|
||||
'''
|
||||
return salt.utils.extmods.sync(__opts__, 'tops', saltenv=saltenv)[0]
|
||||
|
||||
|
||||
def sync_cache(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
|
||||
'''
|
||||
.. versionadded:: Nitrogen
|
||||
|
|
|
@ -2008,8 +2008,7 @@ class State(object):
|
|||
tag = _gen_tag(low)
|
||||
if self.opts.get('test', False):
|
||||
return False
|
||||
if (low.get('failhard', False) or self.opts['failhard']
|
||||
and tag in running):
|
||||
if (low.get('failhard', False) or self.opts['failhard']) and tag in running:
|
||||
if running[tag]['result'] is None:
|
||||
return False
|
||||
return not running[tag]['result']
|
||||
|
@ -2700,22 +2699,17 @@ class BaseHighState(object):
|
|||
envs.extend([x for x in list(self.opts['file_roots'])
|
||||
if x not in envs])
|
||||
env_order = self.opts.get('env_order', [])
|
||||
# Remove duplicates while preserving the order
|
||||
members = set()
|
||||
env_order = [env for env in env_order if not (env in members or members.add(env))]
|
||||
client_envs = self.client.envs()
|
||||
if env_order and client_envs:
|
||||
client_env_list = self.client.envs()
|
||||
env_intersection = set(env_order).intersection(client_env_list)
|
||||
final_list = []
|
||||
for ord_env in env_order:
|
||||
if ord_env in env_intersection and ord_env not in final_list:
|
||||
final_list.append(ord_env)
|
||||
return final_list
|
||||
return [env for env in env_order if env in client_envs]
|
||||
|
||||
elif env_order:
|
||||
return env_order
|
||||
else:
|
||||
for cenv in client_envs:
|
||||
if cenv not in envs:
|
||||
envs.append(cenv)
|
||||
envs.extend([env for env in client_envs if env not in envs])
|
||||
return envs
|
||||
|
||||
def get_tops(self):
|
||||
|
@ -2784,7 +2778,7 @@ class BaseHighState(object):
|
|||
tops[saltenv].append({})
|
||||
log.debug('No contents loaded for env: {0}'.format(saltenv))
|
||||
|
||||
if found > 1 and merging_strategy == 'merge':
|
||||
if found > 1 and merging_strategy == 'merge' and not self.opts.get('env_order', None):
|
||||
log.warning(
|
||||
'top_file_merging_strategy is set to \'%s\' and '
|
||||
'multiple top files were found. Merging order is not '
|
||||
|
|
|
@ -85,19 +85,20 @@ def downloaded(name, artifact, target_dir='/tmp', target_file=None):
|
|||
|
||||
try:
|
||||
fetch_result = __fetch_from_artifactory(artifact, target_dir, target_file)
|
||||
log.debug("fetch_result=%s", str(fetch_result))
|
||||
|
||||
ret['result'] = fetch_result['status']
|
||||
ret['comment'] = fetch_result['comment']
|
||||
ret['changes'] = fetch_result['changes']
|
||||
log.debug("ret=%s", str(ret))
|
||||
|
||||
return ret
|
||||
except Exception as exc:
|
||||
ret['result'] = False
|
||||
ret['comment'] = exc
|
||||
ret['comment'] = str(exc)
|
||||
return ret
|
||||
|
||||
log.debug("fetch_result=%s", str(fetch_result))
|
||||
|
||||
ret['result'] = fetch_result['status']
|
||||
ret['comment'] = fetch_result['comment']
|
||||
ret['changes'] = fetch_result['changes']
|
||||
log.debug("ret=%s", str(ret))
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def __fetch_from_artifactory(artifact, target_dir, target_file):
|
||||
if ('latest_snapshot' in artifact and artifact['latest_snapshot']) or artifact['version'] == 'latest_snapshot':
|
||||
|
|
|
@ -15,10 +15,9 @@ import logging
|
|||
import salt.utils
|
||||
import salt.utils.files
|
||||
import salt.utils.stringio
|
||||
from salt._compat import string_io
|
||||
|
||||
# Import 3rd-party libs
|
||||
import salt.ext.six as six
|
||||
from salt.ext.six.moves import StringIO
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -82,7 +81,9 @@ def compile_template(template,
|
|||
# Get the list of render funcs in the render pipe line.
|
||||
render_pipe = template_shebang(template, renderers, default, blacklist, whitelist, input_data)
|
||||
|
||||
input_data = string_io(input_data)
|
||||
windows_newline = '\r\n' in input_data
|
||||
|
||||
input_data = StringIO(input_data)
|
||||
for render, argline in render_pipe:
|
||||
if salt.utils.stringio.is_readable(input_data):
|
||||
input_data.seek(0) # pylint: disable=no-member
|
||||
|
@ -113,6 +114,23 @@ def compile_template(template,
|
|||
template,
|
||||
ret.read())) # pylint: disable=no-member
|
||||
ret.seek(0) # pylint: disable=no-member
|
||||
|
||||
# Preserve newlines from original template
|
||||
if windows_newline:
|
||||
if salt.utils.stringio.is_readable(ret):
|
||||
is_stringio = True
|
||||
contents = ret.read()
|
||||
else:
|
||||
is_stringio = False
|
||||
contents = ret
|
||||
|
||||
if isinstance(contents, six.string_types):
|
||||
if '\r\n' not in contents:
|
||||
contents = contents.replace('\n', '\r\n')
|
||||
ret = StringIO(contents) if is_stringio else contents
|
||||
else:
|
||||
if is_stringio:
|
||||
ret.seek(0)
|
||||
return ret
|
||||
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ class ThorState(salt.state.HighState):
|
|||
opts['file_client'] = 'local'
|
||||
self.opts = opts
|
||||
if opts.get('minion_data_cache'):
|
||||
self.cache = salt.cache.Cache(opts)
|
||||
self.cache = salt.cache.factory(opts)
|
||||
salt.state.HighState.__init__(self, self.opts, loader='thorium')
|
||||
|
||||
self.returners = salt.loader.returners(self.opts, {})
|
||||
|
@ -69,11 +69,11 @@ class ThorState(salt.state.HighState):
|
|||
cache = {'grains': {}, 'pillar': {}}
|
||||
if self.grains or self.pillar:
|
||||
if self.opts.get('minion_data_cache'):
|
||||
minions = self.cache.list('minions')
|
||||
minions = self.cache.ls('minions')
|
||||
if not minions:
|
||||
return cache
|
||||
for minion in minions:
|
||||
total = salt.cache.fetch('minions/{0}'.format(minion), 'data')
|
||||
total = self.cache.fetch('minions/{0}'.format(minion), 'data')
|
||||
|
||||
if 'pillar' in total:
|
||||
if self.pillar_keys:
|
||||
|
|
|
@ -21,8 +21,12 @@ from salt.utils.cache import CacheCli
|
|||
|
||||
# Import Third Party Libs
|
||||
import tornado.gen
|
||||
from Crypto.Cipher import PKCS1_OAEP
|
||||
from Crypto.PublicKey import RSA
|
||||
try:
|
||||
from Cryptodome.Cipher import PKCS1_OAEP
|
||||
from Cryptodome.PublicKey import RSA
|
||||
except ImportError:
|
||||
from Crypto.Cipher import PKCS1_OAEP
|
||||
from Crypto.PublicKey import RSA
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
|
|
@ -50,7 +50,10 @@ else:
|
|||
# pylint: enable=import-error,no-name-in-module
|
||||
|
||||
# Import third party libs
|
||||
from Crypto.Cipher import PKCS1_OAEP
|
||||
try:
|
||||
from Cryptodome.Cipher import PKCS1_OAEP
|
||||
except ImportError:
|
||||
from Crypto.Cipher import PKCS1_OAEP
|
||||
|
||||
if six.PY3 and salt.utils.is_windows():
|
||||
USE_LOAD_BALANCER = True
|
||||
|
|
|
@ -47,7 +47,10 @@ import tornado.concurrent
|
|||
|
||||
# Import third party libs
|
||||
import salt.ext.six as six
|
||||
from Crypto.Cipher import PKCS1_OAEP
|
||||
try:
|
||||
from Cryptodome.Cipher import PKCS1_OAEP
|
||||
except ImportError:
|
||||
from Crypto.Cipher import PKCS1_OAEP
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ import json
|
|||
import logging
|
||||
import numbers
|
||||
import os
|
||||
import posixpath
|
||||
import random
|
||||
import re
|
||||
import shlex
|
||||
|
@ -40,7 +41,6 @@ from salt.ext.six.moves.urllib.parse import urlparse # pylint: disable=no-name-
|
|||
# pylint: disable=redefined-builtin
|
||||
from salt.ext.six.moves import range
|
||||
from salt.ext.six.moves import zip
|
||||
from salt.ext.six.moves import map
|
||||
from stat import S_IMODE
|
||||
# pylint: enable=import-error,redefined-builtin
|
||||
|
||||
|
@ -912,21 +912,32 @@ def backup_minion(path, bkroot):
|
|||
os.chmod(bkpath, fstat.st_mode)
|
||||
|
||||
|
||||
def path_join(*parts):
|
||||
def path_join(*parts, **kwargs):
|
||||
'''
|
||||
This functions tries to solve some issues when joining multiple absolute
|
||||
paths on both *nix and windows platforms.
|
||||
|
||||
See tests/unit/utils/path_join_test.py for some examples on what's being
|
||||
talked about here.
|
||||
|
||||
The "use_posixpath" kwarg can be be used to force joining using poxixpath,
|
||||
which is useful for Salt fileserver paths on Windows masters.
|
||||
'''
|
||||
if six.PY3:
|
||||
new_parts = []
|
||||
for part in parts:
|
||||
new_parts.append(to_str(part))
|
||||
parts = new_parts
|
||||
|
||||
kwargs = salt.utils.clean_kwargs(**kwargs)
|
||||
use_posixpath = kwargs.pop('use_posixpath', False)
|
||||
if kwargs:
|
||||
invalid_kwargs(kwargs)
|
||||
|
||||
pathlib = posixpath if use_posixpath else os.path
|
||||
|
||||
# Normalize path converting any os.sep as needed
|
||||
parts = [os.path.normpath(p) for p in parts]
|
||||
parts = [pathlib.normpath(p) for p in parts]
|
||||
|
||||
try:
|
||||
root = parts.pop(0)
|
||||
|
@ -937,14 +948,9 @@ def path_join(*parts):
|
|||
if not parts:
|
||||
ret = root
|
||||
else:
|
||||
if is_windows():
|
||||
if len(root) == 1:
|
||||
root += ':'
|
||||
root = root.rstrip(os.sep) + os.sep
|
||||
|
||||
stripped = [p.lstrip(os.sep) for p in parts]
|
||||
try:
|
||||
ret = os.path.join(root, *stripped)
|
||||
ret = pathlib.join(root, *stripped)
|
||||
except UnicodeDecodeError:
|
||||
# This is probably Python 2 and one of the parts contains unicode
|
||||
# characters in a bytestring. First try to decode to the system
|
||||
|
@ -954,13 +960,13 @@ def path_join(*parts):
|
|||
except NameError:
|
||||
enc = sys.stdin.encoding or sys.getdefaultencoding()
|
||||
try:
|
||||
ret = os.path.join(root.decode(enc),
|
||||
ret = pathlib.join(root.decode(enc),
|
||||
*[x.decode(enc) for x in stripped])
|
||||
except UnicodeDecodeError:
|
||||
# Last resort, try UTF-8
|
||||
ret = os.path.join(root.decode('UTF-8'),
|
||||
ret = pathlib.join(root.decode('UTF-8'),
|
||||
*[x.decode('UTF-8') for x in stripped])
|
||||
return os.path.normpath(ret)
|
||||
return pathlib.normpath(ret)
|
||||
|
||||
|
||||
def pem_finger(path=None, key=None, sum_type='sha256'):
|
||||
|
|
|
@ -55,6 +55,8 @@ except ImportError:
|
|||
import salt.crypt
|
||||
import salt.client
|
||||
import salt.config
|
||||
import salt.loader
|
||||
import salt.template
|
||||
import salt.utils
|
||||
import salt.utils.event
|
||||
from salt.utils import vt
|
||||
|
@ -3248,3 +3250,51 @@ def check_key_path_and_mode(provider, key_path):
|
|||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def userdata_template(opts, vm_, userdata):
|
||||
'''
|
||||
Use the configured templating engine to template the userdata file
|
||||
'''
|
||||
# No userdata, no need to template anything
|
||||
if userdata is None:
|
||||
return userdata
|
||||
|
||||
userdata_template = salt.config.get_cloud_config_value(
|
||||
'userdata_template', vm_, opts, search_global=False, default=None
|
||||
)
|
||||
if userdata_template is False:
|
||||
return userdata
|
||||
# Use the cloud profile's userdata_template, otherwise get it from the
|
||||
# master configuration file.
|
||||
renderer = opts.get('userdata_template') \
|
||||
if userdata_template is None \
|
||||
else userdata_template
|
||||
if renderer is None:
|
||||
return userdata
|
||||
else:
|
||||
render_opts = opts.copy()
|
||||
render_opts.update(vm_)
|
||||
rend = salt.loader.render(render_opts, {})
|
||||
blacklist = opts['renderer_blacklist']
|
||||
whitelist = opts['renderer_whitelist']
|
||||
templated = salt.template.compile_template(
|
||||
':string:',
|
||||
rend,
|
||||
renderer,
|
||||
blacklist,
|
||||
whitelist,
|
||||
input_data=userdata,
|
||||
)
|
||||
if not isinstance(templated, six.string_types):
|
||||
# template renderers like "jinja" should return a StringIO
|
||||
try:
|
||||
templated = ''.join(templated.readlines())
|
||||
except AttributeError:
|
||||
log.warning(
|
||||
'Templated userdata resulted in non-string result (%s), '
|
||||
'converting to string', templated
|
||||
)
|
||||
templated = str(templated)
|
||||
|
||||
return templated
|
||||
|
|
|
@ -988,7 +988,8 @@ class GitPython(GitProvider):
|
|||
relpath = lambda path: os.path.relpath(path, self.root(tgt_env))
|
||||
else:
|
||||
relpath = lambda path: path
|
||||
add_mountpoint = lambda path: salt.utils.path_join(self.mountpoint(tgt_env), path)
|
||||
add_mountpoint = lambda path: salt.utils.path_join(
|
||||
self.mountpoint(tgt_env), path, use_posixpath=True)
|
||||
for blob in tree.traverse():
|
||||
if isinstance(blob, git.Tree):
|
||||
ret.add(add_mountpoint(relpath(blob.path)))
|
||||
|
@ -1060,7 +1061,8 @@ class GitPython(GitProvider):
|
|||
relpath = lambda path: os.path.relpath(path, self.root(tgt_env))
|
||||
else:
|
||||
relpath = lambda path: path
|
||||
add_mountpoint = lambda path: salt.utils.path_join(self.mountpoint(tgt_env), path)
|
||||
add_mountpoint = lambda path: salt.utils.path_join(
|
||||
self.mountpoint(tgt_env), path, use_posixpath=True)
|
||||
for file_blob in tree.traverse():
|
||||
if not isinstance(file_blob, git.Blob):
|
||||
continue
|
||||
|
@ -1102,7 +1104,8 @@ class GitPython(GitProvider):
|
|||
stream.seek(0)
|
||||
link_tgt = stream.read()
|
||||
stream.close()
|
||||
path = salt.utils.path_join(os.path.dirname(path), link_tgt)
|
||||
path = salt.utils.path_join(
|
||||
os.path.dirname(path), link_tgt, use_posixpath=True)
|
||||
else:
|
||||
blob = file_blob
|
||||
if isinstance(blob, git.Tree):
|
||||
|
@ -1457,9 +1460,14 @@ class Pygit2(GitProvider):
|
|||
blob = self.repo[entry.oid]
|
||||
if not isinstance(blob, pygit2.Tree):
|
||||
continue
|
||||
blobs.append(salt.utils.path_join(prefix, entry.name))
|
||||
blobs.append(
|
||||
salt.utils.path_join(prefix, entry.name, use_posixpath=True)
|
||||
)
|
||||
if len(blob):
|
||||
_traverse(blob, blobs, salt.utils.path_join(prefix, entry.name))
|
||||
_traverse(
|
||||
blob, blobs, salt.utils.path_join(
|
||||
prefix, entry.name, use_posixpath=True)
|
||||
)
|
||||
|
||||
ret = set()
|
||||
tree = self.get_tree(tgt_env)
|
||||
|
@ -1479,7 +1487,8 @@ class Pygit2(GitProvider):
|
|||
blobs = []
|
||||
if len(tree):
|
||||
_traverse(tree, blobs, self.root(tgt_env))
|
||||
add_mountpoint = lambda path: salt.utils.path_join(self.mountpoint(tgt_env), path)
|
||||
add_mountpoint = lambda path: salt.utils.path_join(
|
||||
self.mountpoint(tgt_env), path, use_posixpath=True)
|
||||
for blob in blobs:
|
||||
ret.add(add_mountpoint(relpath(blob)))
|
||||
if self.mountpoint(tgt_env):
|
||||
|
@ -1567,13 +1576,17 @@ class Pygit2(GitProvider):
|
|||
continue
|
||||
obj = self.repo[entry.oid]
|
||||
if isinstance(obj, pygit2.Blob):
|
||||
repo_path = salt.utils.path_join(prefix, entry.name)
|
||||
repo_path = salt.utils.path_join(
|
||||
prefix, entry.name, use_posixpath=True)
|
||||
blobs.setdefault('files', []).append(repo_path)
|
||||
if stat.S_ISLNK(tree[entry.name].filemode):
|
||||
link_tgt = self.repo[tree[entry.name].oid].data
|
||||
blobs.setdefault('symlinks', {})[repo_path] = link_tgt
|
||||
elif isinstance(obj, pygit2.Tree):
|
||||
_traverse(obj, blobs, salt.utils.path_join(prefix, entry.name))
|
||||
_traverse(
|
||||
obj, blobs, salt.utils.path_join(
|
||||
prefix, entry.name, use_posixpath=True)
|
||||
)
|
||||
|
||||
files = set()
|
||||
symlinks = {}
|
||||
|
@ -1597,7 +1610,8 @@ class Pygit2(GitProvider):
|
|||
blobs = {}
|
||||
if len(tree):
|
||||
_traverse(tree, blobs, self.root(tgt_env))
|
||||
add_mountpoint = lambda path: salt.utils.path_join(self.mountpoint(tgt_env), path)
|
||||
add_mountpoint = lambda path: salt.utils.path_join(
|
||||
self.mountpoint(tgt_env), path, use_posixpath=True)
|
||||
for repo_path in blobs.get('files', []):
|
||||
files.add(add_mountpoint(relpath(repo_path)))
|
||||
for repo_path, link_tgt in six.iteritems(blobs.get('symlinks', {})):
|
||||
|
@ -1629,7 +1643,8 @@ class Pygit2(GitProvider):
|
|||
# the symlink and set path to the location indicated
|
||||
# in the blob data.
|
||||
link_tgt = self.repo[entry.oid].data
|
||||
path = salt.utils.path_join(os.path.dirname(path), link_tgt)
|
||||
path = salt.utils.path_join(
|
||||
os.path.dirname(path), link_tgt, use_posixpath=True)
|
||||
else:
|
||||
blob = self.repo[entry.oid]
|
||||
if isinstance(blob, pygit2.Tree):
|
||||
|
@ -1829,8 +1844,7 @@ class GitBase(object):
|
|||
self.role)
|
||||
self.remote_root = salt.utils.path_join(self.cache_root, 'remotes')
|
||||
self.env_cache = salt.utils.path_join(self.cache_root, 'envs.p')
|
||||
self.hash_cachedir = salt.utils.path_join(
|
||||
self.cache_root, 'hash')
|
||||
self.hash_cachedir = salt.utils.path_join(self.cache_root, 'hash')
|
||||
self.file_list_cachedir = salt.utils.path_join(
|
||||
self.opts['cachedir'], 'file_lists', self.role)
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ class MasterPillarUtil(object):
|
|||
self.use_cached_pillar = use_cached_pillar
|
||||
self.grains_fallback = grains_fallback
|
||||
self.pillar_fallback = pillar_fallback
|
||||
self.cache = salt.cache.Cache(opts)
|
||||
self.cache = salt.cache.factory(opts)
|
||||
log.debug(
|
||||
'Init settings: tgt: \'{0}\', tgt_type: \'{1}\', saltenv: \'{2}\', '
|
||||
'use_cached_grains: {3}, use_cached_pillar: {4}, '
|
||||
|
@ -122,7 +122,7 @@ class MasterPillarUtil(object):
|
|||
'and enfore_mine_cache are both disabled.')
|
||||
return mine_data
|
||||
if not minion_ids:
|
||||
minion_ids = self.cache.list('minions')
|
||||
minion_ids = self.cache.ls('minions')
|
||||
for minion_id in minion_ids:
|
||||
if not salt.utils.verify.valid_id(self.opts, minion_id):
|
||||
continue
|
||||
|
@ -141,7 +141,7 @@ class MasterPillarUtil(object):
|
|||
'enabled.')
|
||||
return grains, pillars
|
||||
if not minion_ids:
|
||||
minion_ids = self.cache.list('minions')
|
||||
minion_ids = self.cache.ls('minions')
|
||||
for minion_id in minion_ids:
|
||||
if not salt.utils.verify.valid_id(self.opts, minion_id):
|
||||
continue
|
||||
|
@ -364,7 +364,7 @@ class MasterPillarUtil(object):
|
|||
# in the same file, 'data.p'
|
||||
grains, pillars = self._get_cached_minion_data(*minion_ids)
|
||||
try:
|
||||
c_minions = self.cache.list('minions')
|
||||
c_minions = self.cache.ls('minions')
|
||||
for minion_id in minion_ids:
|
||||
if not salt.utils.verify.valid_id(self.opts, minion_id):
|
||||
continue
|
||||
|
|
|
@ -73,9 +73,9 @@ def get_minion_data(minion, opts):
|
|||
grains = None
|
||||
pillar = None
|
||||
if opts.get('minion_data_cache', False):
|
||||
cache = salt.cache.Cache(opts)
|
||||
cache = salt.cache.factory(opts)
|
||||
if minion is None:
|
||||
for id_ in cache.list('minions'):
|
||||
for id_ in cache.ls('minions'):
|
||||
data = cache.fetch('minions/{0}'.format(id_), 'data')
|
||||
if data is None:
|
||||
continue
|
||||
|
@ -180,7 +180,7 @@ class CkMinions(object):
|
|||
def __init__(self, opts):
|
||||
self.opts = opts
|
||||
self.serial = salt.payload.Serial(opts)
|
||||
self.cache = salt.cache.Cache(opts)
|
||||
self.cache = salt.cache.factory(opts)
|
||||
# TODO: this is actually an *auth* check
|
||||
if self.opts.get('transport', 'zeromq') in ('zeromq', 'tcp'):
|
||||
self.acc = 'minions'
|
||||
|
@ -344,13 +344,13 @@ class CkMinions(object):
|
|||
if greedy:
|
||||
minions = self._pki_minions()
|
||||
elif cache_enabled:
|
||||
minions = self.cache.list('minions')
|
||||
minions = self.cache.ls('minions')
|
||||
else:
|
||||
return []
|
||||
|
||||
if cache_enabled:
|
||||
if greedy:
|
||||
cminions = self.cache.list('minions')
|
||||
cminions = self.cache.ls('minions')
|
||||
else:
|
||||
cminions = minions
|
||||
if cminions is None:
|
||||
|
@ -414,7 +414,7 @@ class CkMinions(object):
|
|||
mlist.append(fn_)
|
||||
return mlist
|
||||
elif cache_enabled:
|
||||
return self.cache.list('minions')
|
||||
return self.cache.ls('minions')
|
||||
else:
|
||||
return list()
|
||||
|
||||
|
@ -576,7 +576,7 @@ class CkMinions(object):
|
|||
'''
|
||||
minions = set()
|
||||
if self.opts.get('minion_data_cache', False):
|
||||
search = self.cache.list('minions')
|
||||
search = self.cache.ls('minions')
|
||||
if search is None:
|
||||
return minions
|
||||
addrs = salt.utils.network.local_port_tcp(int(self.opts['publish_port']))
|
||||
|
@ -1095,7 +1095,7 @@ def mine_get(tgt, fun, tgt_type='glob', opts=None):
|
|||
minions = checker.check_minions(
|
||||
tgt,
|
||||
tgt_type)
|
||||
cache = salt.cache.Cache(opts)
|
||||
cache = salt.cache.factory(opts)
|
||||
for minion in minions:
|
||||
mdata = cache.fetch('minions/{0}'.format(minion), 'mine')
|
||||
if mdata is None:
|
||||
|
|
|
@ -1250,6 +1250,8 @@ def _remotes_on(port, which_end):
|
|||
return _openbsd_remotes_on(port, which_end)
|
||||
if salt.utils.is_windows():
|
||||
return _windows_remotes_on(port, which_end)
|
||||
if salt.utils.is_aix():
|
||||
return _aix_remotes_on(port, which_end)
|
||||
|
||||
return _linux_remotes_on(port, which_end)
|
||||
|
||||
|
@ -1560,3 +1562,53 @@ def _linux_remotes_on(port, which_end):
|
|||
remotes.add(rhost.strip("[]"))
|
||||
|
||||
return remotes
|
||||
|
||||
|
||||
def _aix_remotes_on(port, which_end):
|
||||
'''
|
||||
AIX specific helper function.
|
||||
Returns set of ipv4 host addresses of remote established connections
|
||||
on local or remote tcp port.
|
||||
|
||||
Parses output of shell 'netstat' to get connections
|
||||
|
||||
root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n
|
||||
Active Internet connections
|
||||
Proto Recv-Q Send-Q Local Address Foreign Address (state)
|
||||
tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED
|
||||
tcp4 0 0 127.0.0.1.9514 *.* LISTEN
|
||||
tcp4 0 0 127.0.0.1.9515 *.* LISTEN
|
||||
tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED
|
||||
tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED
|
||||
tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED
|
||||
tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED
|
||||
tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED
|
||||
tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED
|
||||
tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED
|
||||
tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED
|
||||
tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED
|
||||
tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED
|
||||
tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED
|
||||
tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED
|
||||
'''
|
||||
remotes = set()
|
||||
try:
|
||||
data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version
|
||||
except subprocess.CalledProcessError:
|
||||
log.error('Failed netstat')
|
||||
raise
|
||||
|
||||
lines = salt.utils.to_str(data).split('\n')
|
||||
for line in lines:
|
||||
if 'ESTABLISHED' not in line:
|
||||
continue
|
||||
chunks = line.split()
|
||||
local_host, local_port = chunks[3].rsplit('.', 1)
|
||||
remote_host, remote_port = chunks[4].rsplit('.', 1)
|
||||
|
||||
if which_end == 'remote_port' and int(remote_port) != port:
|
||||
continue
|
||||
if which_end == 'local_port' and int(local_port) != port:
|
||||
continue
|
||||
remotes.add(remote_host)
|
||||
return remotes
|
||||
|
|
|
@ -12,7 +12,10 @@ import random
|
|||
|
||||
# Import 3rd-party libs
|
||||
try:
|
||||
import Crypto.Random # pylint: disable=E0611
|
||||
try:
|
||||
import Cryptodome.Random as CRand # pylint: disable=E0611
|
||||
except ImportError:
|
||||
import Crypto.Random as CRand # pylint: disable=E0611
|
||||
HAS_RANDOM = True
|
||||
except ImportError:
|
||||
HAS_RANDOM = False
|
||||
|
@ -40,7 +43,7 @@ def secure_password(length=20, use_random=True):
|
|||
pw += re.sub(
|
||||
r'\W',
|
||||
'',
|
||||
salt.utils.to_str(Crypto.Random.get_random_bytes(1))
|
||||
salt.utils.to_str(CRand.get_random_bytes(1))
|
||||
)
|
||||
else:
|
||||
pw += random.SystemRandom().choice(string.ascii_letters + string.digits)
|
||||
|
|
|
@ -134,6 +134,7 @@ class RSAX931Verifier(object):
|
|||
:param str pubdata: The RSA public key in PEM format
|
||||
'''
|
||||
pubdata = salt.utils.to_bytes(pubdata, 'ascii')
|
||||
pubdata = pubdata.replace('RSA ', '')
|
||||
self._bio = libcrypto.BIO_new_mem_buf(pubdata, len(pubdata))
|
||||
self._rsa = c_void_p(libcrypto.RSA_new())
|
||||
if not libcrypto.PEM_read_bio_RSA_PUBKEY(self._bio, pointer(self._rsa), None, None):
|
||||
|
|
|
@ -66,7 +66,7 @@ class SaltStackVersion(object):
|
|||
r'\.(?P<minor>[\d]{1,2})'
|
||||
r'(?:\.(?P<bugfix>[\d]{0,2}))?'
|
||||
r'(?:\.(?P<mbugfix>[\d]{0,2}))?'
|
||||
r'(?:(?P<pre_type>rc|a|b|alpha|beta)(?P<pre_num>[\d]{1}))?'
|
||||
r'(?:(?P<pre_type>rc|a|b|alpha|beta|nb)(?P<pre_num>[\d]{1}))?'
|
||||
r'(?:(?:.*)-(?P<noc>(?:[\d]+|n/a))-(?P<sha>[a-z0-9]{8}))?'
|
||||
)
|
||||
git_sha_regex = re.compile(r'(?P<sha>[a-z0-9]{7})')
|
||||
|
@ -580,6 +580,7 @@ def dependency_information(include_salt_cloud=False):
|
|||
('msgpack-python', 'msgpack', 'version'),
|
||||
('msgpack-pure', 'msgpack_pure', 'version'),
|
||||
('pycrypto', 'Crypto', '__version__'),
|
||||
('pycryptodome', 'Cryptodome', 'version_info'),
|
||||
('libnacl', 'libnacl', '__version__'),
|
||||
('PyYAML', 'yaml', '__version__'),
|
||||
('ioflo', 'ioflo', '__version__'),
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
a:
|
||||
test.show_notification:
|
||||
- name: a
|
||||
- text: message
|
||||
- require:
|
||||
- test: b
|
||||
- order: 1
|
||||
- failhard: True
|
||||
|
||||
b:
|
||||
test.fail_with_changes:
|
||||
- name: b
|
||||
- failhard: True
|
|
@ -1254,3 +1254,23 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
|
|||
self.assertIn('Attempt 4:', state_run[retry_state]['comment'])
|
||||
self.assertNotIn('Attempt 15:', state_run[retry_state]['comment'])
|
||||
self.assertEqual(state_run[retry_state]['result'], True)
|
||||
|
||||
def test_issue_38683_require_order_failhard_combination(self):
|
||||
'''
|
||||
This tests the case where require, order, and failhard are all used together in a state definition.
|
||||
|
||||
Previously, the order option, which used in tandem with require and failhard, would cause the state
|
||||
compiler to stacktrace. This exposed a logic error in the ``check_failhard`` function of the state
|
||||
compiler. With the logic error resolved, this test should now pass.
|
||||
|
||||
See https://github.com/saltstack/salt/issues/38683 for more information.
|
||||
'''
|
||||
state_run = self.run_function(
|
||||
'state.sls',
|
||||
mods='requisites.require_order_failhard_combo'
|
||||
)
|
||||
state_id = 'test_|-b_|-b_|-fail_with_changes'
|
||||
|
||||
self.assertIn(state_id, state_run)
|
||||
self.assertEqual(state_run[state_id]['comment'], 'Failure!')
|
||||
self.assertFalse(state_run[state_id]['result'])
|
||||
|
|
20
tests/unit/cache/test_localfs.py
vendored
20
tests/unit/cache/test_localfs.py
vendored
|
@ -209,28 +209,28 @@ class LocalFSTest(TestCase, LoaderModuleMockMixin):
|
|||
'''
|
||||
self.assertRaises(SaltCacheError, localfs.flush, bank='', key='key', cachedir='/var/cache/salt')
|
||||
|
||||
# 'list' function tests: 3
|
||||
# 'ls' function tests: 3
|
||||
|
||||
@patch('os.path.isdir', MagicMock(return_value=False))
|
||||
def test_list_no_base_dir(self):
|
||||
def test_ls_no_base_dir(self):
|
||||
'''
|
||||
Tests that the list function returns an empty list if the bank directory
|
||||
Tests that the ls function returns an empty list if the bank directory
|
||||
doesn't exist.
|
||||
'''
|
||||
self.assertEqual(localfs.list_(bank='', cachedir=''), [])
|
||||
self.assertEqual(localfs.ls(bank='', cachedir=''), [])
|
||||
|
||||
@patch('os.path.isdir', MagicMock(return_value=True))
|
||||
@patch('os.listdir', MagicMock(side_effect=OSError))
|
||||
def test_list_error_raised_no_bank_directory_access(self):
|
||||
def test_ls_error_raised_no_bank_directory_access(self):
|
||||
'''
|
||||
Tests that a SaltCacheError is raised when there is a problem accessing the
|
||||
cache bank directory.
|
||||
'''
|
||||
self.assertRaises(SaltCacheError, localfs.list_, bank='', cachedir='')
|
||||
self.assertRaises(SaltCacheError, localfs.ls, bank='', cachedir='')
|
||||
|
||||
def test_list_success(self):
|
||||
def test_ls_success(self):
|
||||
'''
|
||||
Tests the return of the list function containing bank entries.
|
||||
Tests the return of the ls function containing bank entries.
|
||||
'''
|
||||
# Create a temporary cache dir
|
||||
tmp_dir = tempfile.mkdtemp(dir=TMP)
|
||||
|
@ -238,9 +238,9 @@ class LocalFSTest(TestCase, LoaderModuleMockMixin):
|
|||
# Use the helper function to create the cache file using localfs.store()
|
||||
self._create_tmp_cache_file(tmp_dir, salt.payload.Serial(self))
|
||||
|
||||
# Now test the return of the list function
|
||||
# Now test the return of the ls function
|
||||
with patch.dict(localfs.__opts__, {'cachedir': tmp_dir}):
|
||||
self.assertEqual(localfs.list_(bank='bank', cachedir=tmp_dir), ['key'])
|
||||
self.assertEqual(localfs.ls(bank='bank', cachedir=tmp_dir), ['key'])
|
||||
|
||||
# 'contains' function tests: 1
|
||||
|
||||
|
|
|
@ -2,18 +2,24 @@
|
|||
|
||||
# import Python Libs
|
||||
from __future__ import absolute_import
|
||||
import tempfile
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from tests.support.mixins import LoaderModuleMockMixin
|
||||
from tests.support.unit import skipIf, TestCase
|
||||
from tests.support.mock import (
|
||||
MagicMock,
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON
|
||||
NO_MOCK_REASON,
|
||||
patch
|
||||
)
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.modules.ssh as ssh
|
||||
from salt.exceptions import CommandExecutionError
|
||||
import salt.utils
|
||||
|
||||
ssh.__salt__ = {}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
|
@ -69,3 +75,61 @@ class SSHAuthKeyTestCase(TestCase, LoaderModuleMockMixin):
|
|||
# Inserting invalid public key should be rejected
|
||||
invalid_key = 'AAAAB3NzaC1kc3MAAACBAL0sQ9fJ5bYTEyY' # missing padding
|
||||
self.assertEqual(ssh.set_auth_key('user', invalid_key), 'Invalid public key')
|
||||
|
||||
def test_replace_auth_key(self):
|
||||
'''
|
||||
Test the _replace_auth_key with some different authorized_keys examples
|
||||
'''
|
||||
# First test a known working example, gathered from the authorized_keys file
|
||||
# in the integration test files.
|
||||
enc = 'ssh-rsa'
|
||||
key = 'AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+' \
|
||||
'PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNl' \
|
||||
'GEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWp' \
|
||||
'XLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal' \
|
||||
'72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi' \
|
||||
'/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ=='
|
||||
options = 'command="/usr/local/lib/ssh-helper"'
|
||||
email = 'github.com'
|
||||
|
||||
# Write out the authorized key to a temporary file
|
||||
temp_file = tempfile.NamedTemporaryFile(delete=False, mode='w+')
|
||||
temp_file.write('{0} {1} {2} {3}'.format(options, enc, key, email))
|
||||
temp_file.close()
|
||||
|
||||
with patch.dict(ssh.__salt__, {'user.info': MagicMock(return_value={})}):
|
||||
with patch('salt.modules.ssh._get_config_file', MagicMock(return_value=temp_file.name)):
|
||||
ssh._replace_auth_key('foo', key, config=temp_file.name)
|
||||
|
||||
# The previous authorized key should have been replaced by the simpler one
|
||||
with salt.utils.fopen(temp_file.name) as _fh:
|
||||
file_txt = _fh.read()
|
||||
self.assertIn(enc, file_txt)
|
||||
self.assertIn(key, file_txt)
|
||||
self.assertNotIn(options, file_txt)
|
||||
self.assertNotIn(email, file_txt)
|
||||
|
||||
# Now test a very simple key using ecdsa instead of ssh-rsa and with multiple options
|
||||
enc = 'ecdsa-sha2-nistp256'
|
||||
key = 'abcxyz'
|
||||
|
||||
with salt.utils.fopen(temp_file.name, 'a') as _fh:
|
||||
_fh.write('{0} {1}'.format(enc, key))
|
||||
|
||||
# Replace the simple key from before with the more complicated options + new email
|
||||
# Option example is taken from Pull Request #39855
|
||||
options = ['no-port-forwarding', 'no-agent-forwarding', 'no-X11-forwarding',
|
||||
'command="echo \'Please login as the user \"ubuntu\" rather than the user \"root\".\'']
|
||||
email = 'foo@example.com'
|
||||
|
||||
with patch.dict(ssh.__salt__, {'user.info': MagicMock(return_value={})}):
|
||||
with patch('salt.modules.ssh._get_config_file', MagicMock(return_value=temp_file.name)):
|
||||
ssh._replace_auth_key('foo', key, enc=enc, comment=email, options=options, config=temp_file.name)
|
||||
|
||||
# Assert that the new line was added as-is to the file
|
||||
with salt.utils.fopen(temp_file.name) as _fh:
|
||||
file_txt = _fh.read()
|
||||
self.assertIn(enc, file_txt)
|
||||
self.assertIn(key, file_txt)
|
||||
self.assertIn('{0} '.format(','.join(options)), file_txt)
|
||||
self.assertIn(email, file_txt)
|
||||
|
|
|
@ -54,4 +54,4 @@ class ArtifactoryTestCase(TestCase, LoaderModuleMockMixin):
|
|||
MagicMock(side_effect=Exception('error'))):
|
||||
ret = artifactory.downloaded(name, artifact)
|
||||
self.assertEqual(ret['result'], False)
|
||||
self.assertEqual(repr(ret['comment']), repr(Exception('error')))
|
||||
self.assertEqual(ret['comment'], 'error')
|
||||
|
|
|
@ -7,10 +7,12 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing libs
|
||||
from tests.support.unit import TestCase
|
||||
from tests.support.unit import skipIf, TestCase
|
||||
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock
|
||||
|
||||
# Import Salt libs
|
||||
from salt import template
|
||||
from salt.ext.six.moves import StringIO
|
||||
|
||||
|
||||
class TemplateTestCase(TestCase):
|
||||
|
@ -26,6 +28,57 @@ class TemplateTestCase(TestCase):
|
|||
ret = template.compile_template(['1', '2', '3'], None, None, None, None)
|
||||
self.assertDictEqual(ret, {})
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
def test_compile_template_preserves_windows_newlines(self):
|
||||
'''
|
||||
Test to ensure that a file with Windows newlines, when rendered by a
|
||||
template renderer, does not eat the CR character.
|
||||
'''
|
||||
def _get_rend(renderer, value):
|
||||
'''
|
||||
We need a new MagicMock each time since we're dealing with StringIO
|
||||
objects which are read like files.
|
||||
'''
|
||||
return {renderer: MagicMock(return_value=StringIO(value))}
|
||||
|
||||
input_data_windows = 'foo\r\nbar\r\nbaz\r\n'
|
||||
input_data_non_windows = input_data_windows.replace('\r\n', '\n')
|
||||
renderer = 'test'
|
||||
blacklist = whitelist = []
|
||||
|
||||
ret = template.compile_template(
|
||||
':string:',
|
||||
_get_rend(renderer, input_data_non_windows),
|
||||
renderer,
|
||||
blacklist,
|
||||
whitelist,
|
||||
input_data=input_data_windows).read()
|
||||
# Even though the mocked renderer returned a string without the windows
|
||||
# newlines, the compiled template should still have them.
|
||||
self.assertEqual(ret, input_data_windows)
|
||||
|
||||
# Now test that we aren't adding them in unnecessarily.
|
||||
ret = template.compile_template(
|
||||
':string:',
|
||||
_get_rend(renderer, input_data_non_windows),
|
||||
renderer,
|
||||
blacklist,
|
||||
whitelist,
|
||||
input_data=input_data_non_windows).read()
|
||||
self.assertEqual(ret, input_data_non_windows)
|
||||
|
||||
# Finally, ensure that we're not unnecessarily replacing the \n with
|
||||
# \r\n in the event that the renderer returned a string with the
|
||||
# windows newlines intact.
|
||||
ret = template.compile_template(
|
||||
':string:',
|
||||
_get_rend(renderer, input_data_windows),
|
||||
renderer,
|
||||
blacklist,
|
||||
whitelist,
|
||||
input_data=input_data_windows).read()
|
||||
self.assertEqual(ret, input_data_windows)
|
||||
|
||||
def test_check_render_pipe_str(self):
|
||||
'''
|
||||
Check that all renderers specified in the pipe string are available.
|
||||
|
|
|
@ -46,7 +46,6 @@ class PathJoinTestCase(TestCase):
|
|||
((r'c:\\', r'\temp', r'\foo'), 'c:\\temp\\foo'),
|
||||
(('c:', r'\temp', r'\foo', 'bar'), 'c:\\temp\\foo\\bar'),
|
||||
(('c:', r'\temp', r'\foo\bar'), 'c:\\temp\\foo\\bar'),
|
||||
(('c', r'\temp', r'\foo\bar'), 'c:\\temp\\foo\\bar')
|
||||
)
|
||||
|
||||
@skipIf(True, 'Skipped until properly mocked')
|
||||
|
|
Loading…
Add table
Reference in a new issue