mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge pull request #37773 from rallytime/merge-2016.11
[2016.11] Merge forward from 2016.3 to 2016.11
This commit is contained in:
commit
3835f91d99
4 changed files with 118 additions and 37 deletions
|
@ -8,7 +8,7 @@ The OpenNebula cloud module is used to control access to an OpenNebula cloud.
|
|||
.. versionadded:: 2014.7.0
|
||||
|
||||
:depends: lxml
|
||||
:depends: OpenNebula installation running version ``4.14``.
|
||||
:depends: OpenNebula installation running version ``4.14`` or later.
|
||||
|
||||
Use of this module requires the ``xml_rpc``, ``user``, and ``password``
|
||||
parameters to be set.
|
||||
|
@ -86,6 +86,7 @@ try:
|
|||
except ImportError:
|
||||
HAS_XML_LIBS = False
|
||||
|
||||
|
||||
# Get Logging Started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -147,7 +148,8 @@ def avail_images(call=None):
|
|||
|
||||
server, user, password = _get_xml_rpc()
|
||||
auth = ':'.join([user, password])
|
||||
image_pool = server.one.imagepool.info(auth, -1, -1, -1)[1]
|
||||
|
||||
image_pool = server.one.imagepool.info(auth, -2, -1, -1)[1]
|
||||
|
||||
images = {}
|
||||
for image in _get_xml(image_pool):
|
||||
|
@ -358,7 +360,7 @@ def list_security_groups(call=None):
|
|||
|
||||
server, user, password = _get_xml_rpc()
|
||||
auth = ':'.join([user, password])
|
||||
secgroup_pool = server.one.secgrouppool.info(auth, -1, -1, -1)[1]
|
||||
secgroup_pool = server.one.secgrouppool.info(auth, -2, -1, -1)[1]
|
||||
|
||||
groups = {}
|
||||
for group in _get_xml(secgroup_pool):
|
||||
|
@ -386,7 +388,7 @@ def list_templates(call=None):
|
|||
|
||||
server, user, password = _get_xml_rpc()
|
||||
auth = ':'.join([user, password])
|
||||
template_pool = server.one.templatepool.info(auth, -1, -1, -1)[1]
|
||||
template_pool = server.one.templatepool.info(auth, -2, -1, -1)[1]
|
||||
|
||||
templates = {}
|
||||
for template in _get_xml(template_pool):
|
||||
|
@ -414,7 +416,7 @@ def list_vns(call=None):
|
|||
|
||||
server, user, password = _get_xml_rpc()
|
||||
auth = ':'.join([user, password])
|
||||
vn_pool = server.one.vnpool.info(auth, -1, -1, -1)[1]
|
||||
vn_pool = server.one.vnpool.info(auth, -2, -1, -1)[1]
|
||||
|
||||
vns = {}
|
||||
for v_network in _get_xml(vn_pool):
|
||||
|
@ -498,6 +500,33 @@ def stop(name, call=None):
|
|||
return vm_action(name, kwargs={'action': 'stop'}, call=call)
|
||||
|
||||
|
||||
def get_one_version(kwargs=None, call=None):
|
||||
'''
|
||||
Returns the OpenNebula version.
|
||||
|
||||
.. versionadded:: 2016.3.5
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f get_one_version one_provider_name
|
||||
'''
|
||||
|
||||
if call == 'action':
|
||||
raise SaltCloudSystemExit(
|
||||
'The get_cluster_id function must be called with -f or --function.'
|
||||
)
|
||||
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
|
||||
server, user, password = _get_xml_rpc()
|
||||
auth = ':'.join([user, password])
|
||||
|
||||
return server.one.system.version(auth)[1]
|
||||
|
||||
|
||||
def get_cluster_id(kwargs=None, call=None):
|
||||
'''
|
||||
Returns a cluster's ID from the given cluster name.
|
||||
|
@ -4421,7 +4450,7 @@ def _list_nodes(full=False):
|
|||
server, user, password = _get_xml_rpc()
|
||||
auth = ':'.join([user, password])
|
||||
|
||||
vm_pool = server.one.vmpool.info(auth, -1, -1, -1, -1)[1]
|
||||
vm_pool = server.one.vmpool.info(auth, -2, -1, -1, -1)[1]
|
||||
|
||||
vms = {}
|
||||
for vm in _get_xml(vm_pool):
|
||||
|
|
|
@ -21,7 +21,11 @@ from salt.exceptions import CommandExecutionError
|
|||
__proxyenabled__ = ['*']
|
||||
|
||||
|
||||
def get(key, default=KeyError, merge=False, delimiter=DEFAULT_TARGET_DELIM):
|
||||
def get(key,
|
||||
default=KeyError,
|
||||
merge=False,
|
||||
delimiter=DEFAULT_TARGET_DELIM,
|
||||
saltenv=None):
|
||||
'''
|
||||
.. versionadded:: 0.14
|
||||
|
||||
|
@ -54,6 +58,22 @@ def get(key, default=KeyError, merge=False, delimiter=DEFAULT_TARGET_DELIM):
|
|||
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
saltenv
|
||||
If specified, this function will query the master to generate fresh
|
||||
pillar data on the fly, specifically from the requested pillar
|
||||
environment. Note that this can produce different pillar data than
|
||||
executing this function without an environment, as its normal behavior
|
||||
is just to return a value from minion's pillar data in memory (which
|
||||
can be sourced from more than one pillar environment).
|
||||
|
||||
Using this argument will not affect the pillar data in memory. It will
|
||||
however be slightly slower and use more resources on the master due to
|
||||
the need for the master to generate and send the minion fresh pillar
|
||||
data. This tradeoff in performance however allows for the use case
|
||||
where pillar data is desired only from a single environment.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -64,13 +84,15 @@ def get(key, default=KeyError, merge=False, delimiter=DEFAULT_TARGET_DELIM):
|
|||
if default is KeyError:
|
||||
default = ''
|
||||
opt_merge_lists = __opts__.get('pillar_merge_lists', False)
|
||||
pillar_dict = __pillar__ if saltenv is None else items(saltenv=saltenv)
|
||||
|
||||
if merge:
|
||||
ret = salt.utils.traverse_dict_and_list(__pillar__, key, {}, delimiter)
|
||||
ret = salt.utils.traverse_dict_and_list(pillar_dict, key, {}, delimiter)
|
||||
if isinstance(ret, collections.Mapping) and \
|
||||
isinstance(default, collections.Mapping):
|
||||
return salt.utils.dictupdate.update(default, ret, merge_lists=opt_merge_lists)
|
||||
|
||||
ret = salt.utils.traverse_dict_and_list(__pillar__,
|
||||
ret = salt.utils.traverse_dict_and_list(pillar_dict,
|
||||
key,
|
||||
default,
|
||||
delimiter)
|
||||
|
@ -88,7 +110,7 @@ def items(*args, **kwargs):
|
|||
Contrast with :py:func:`raw` which returns the pillar data that is
|
||||
currently loaded into the minion.
|
||||
|
||||
pillar : none
|
||||
pillar
|
||||
if specified, allows for a dictionary of pillar data to be made
|
||||
available to pillar and ext_pillar rendering. these pillar variables
|
||||
will also override any variables of the same name in pillar or
|
||||
|
@ -96,6 +118,15 @@ def items(*args, **kwargs):
|
|||
|
||||
.. versionadded:: 2015.5.0
|
||||
|
||||
saltenv
|
||||
Pass a specific pillar environment from which to compile pillar data.
|
||||
If unspecified, the minion's :conf_minion:`environment` option is used,
|
||||
and if that also is not specified then all configured pillar
|
||||
environments will be merged into a single pillar dictionary and
|
||||
returned.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -110,7 +141,7 @@ def items(*args, **kwargs):
|
|||
__opts__,
|
||||
__grains__,
|
||||
__opts__['id'],
|
||||
__opts__['environment'],
|
||||
kwargs.get('saltenv') or __opts__['environment'],
|
||||
pillar=kwargs.get('pillar'))
|
||||
|
||||
return pillar.compile_pillar()
|
||||
|
@ -191,7 +222,7 @@ def item(*args, **kwargs):
|
|||
|
||||
Return one or more pillar entries
|
||||
|
||||
pillar : none
|
||||
pillar
|
||||
if specified, allows for a dictionary of pillar data to be made
|
||||
available to pillar and ext_pillar rendering. these pillar variables
|
||||
will also override any variables of the same name in pillar or
|
||||
|
|
|
@ -3,15 +3,37 @@
|
|||
Simple returner for Couchbase. Optional configuration
|
||||
settings are listed below, along with sane defaults.
|
||||
|
||||
couchbase.host: 'salt'
|
||||
couchbase.port: 8091
|
||||
couchbase.bucket: 'salt'
|
||||
couchbase.skip_verify_views: False
|
||||
.. code-block:: yaml
|
||||
|
||||
To use the couchbase returner, append '--return couchbase' to the salt command. ex:
|
||||
couchbase.host: 'salt'
|
||||
couchbase.port: 8091
|
||||
couchbase.bucket: 'salt'
|
||||
couchbase.ttl: 24
|
||||
couchbase.password: 'password'
|
||||
couchbase.skip_verify_views: False
|
||||
|
||||
To use the couchbase returner, append '--return couchbase' to the salt command. ex:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' test.ping --return couchbase
|
||||
|
||||
To use the alternative configuration, append '--return_config alternative' to the salt command.
|
||||
|
||||
.. versionadded:: 2015.5.0
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' test.ping --return couchbase --return_config alternative
|
||||
|
||||
To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' test.ping --return couchbase --return_kwargs '{"bucket": "another-salt"}'
|
||||
|
||||
|
||||
All of the return data will be stored in documents as follows:
|
||||
|
||||
|
@ -24,10 +46,11 @@ nocache: should we not cache the return data
|
|||
JID/MINION_ID
|
||||
=============
|
||||
return: return_data
|
||||
out: out_data
|
||||
full_ret: full load of job return
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
try:
|
||||
|
@ -70,7 +93,8 @@ def _get_options():
|
|||
'''
|
||||
return {'host': __opts__.get('couchbase.host', 'salt'),
|
||||
'port': __opts__.get('couchbase.port', 8091),
|
||||
'bucket': __opts__.get('couchbase.bucket', 'salt')}
|
||||
'bucket': __opts__.get('couchbase.bucket', 'salt'),
|
||||
'password': __opts__.get('couchbase.password', '')}
|
||||
|
||||
|
||||
def _get_connection():
|
||||
|
@ -80,9 +104,16 @@ def _get_connection():
|
|||
global COUCHBASE_CONN
|
||||
if COUCHBASE_CONN is None:
|
||||
opts = _get_options()
|
||||
COUCHBASE_CONN = couchbase.Couchbase.connect(host=opts['host'],
|
||||
port=opts['port'],
|
||||
bucket=opts['bucket'])
|
||||
if opts['password']:
|
||||
COUCHBASE_CONN = couchbase.Couchbase.connect(host=opts['host'],
|
||||
port=opts['port'],
|
||||
bucket=opts['bucket'],
|
||||
password=opts['password'])
|
||||
else:
|
||||
COUCHBASE_CONN = couchbase.Couchbase.connect(host=opts['host'],
|
||||
port=opts['port'],
|
||||
bucket=opts['bucket'])
|
||||
|
||||
return COUCHBASE_CONN
|
||||
|
||||
|
||||
|
@ -117,7 +148,7 @@ def _get_ttl():
|
|||
'''
|
||||
Return the TTL that we should store our objects with
|
||||
'''
|
||||
return __opts__['keep_jobs'] * 60 * 60 # keep_jobs is in hours
|
||||
return __opts__.get('couchbase.ttl', 24) * 60 * 60 # keep_jobs is in hours
|
||||
|
||||
|
||||
#TODO: add to returner docs-- this is a new one
|
||||
|
@ -150,25 +181,14 @@ def prep_jid(nocache=False, passed_jid=None):
|
|||
|
||||
def returner(load):
|
||||
'''
|
||||
Return data to the local job cache
|
||||
Return data to couchbase bucket
|
||||
'''
|
||||
cb_ = _get_connection()
|
||||
try:
|
||||
jid_doc = cb_.get(load['jid'])
|
||||
if jid_doc.value['nocache'] is True:
|
||||
return
|
||||
except couchbase.exceptions.NotFoundError:
|
||||
log.error(
|
||||
'An inconsistency occurred, a job was received with a job id '
|
||||
'that is not present in the local cache: {jid}'.format(**load)
|
||||
)
|
||||
return False
|
||||
|
||||
hn_key = '{0}/{1}'.format(load['jid'], load['id'])
|
||||
try:
|
||||
ret_doc = {'return': load['return']}
|
||||
if 'out' in load:
|
||||
ret_doc['out'] = load['out']
|
||||
ret_doc = {'return': load['return'],
|
||||
'full_ret': json.dumps(load)}
|
||||
|
||||
cb_.add(hn_key,
|
||||
ret_doc,
|
||||
|
|
|
@ -132,6 +132,7 @@ __all__ = [
|
|||
'BASE_FILE_ROOTS_DIR',
|
||||
'BASE_PILLAR_ROOTS_DIR',
|
||||
'BASE_MASTER_ROOTS_DIR',
|
||||
'BASE_THORIUM_ROOTS_DIR',
|
||||
'LOGS_DIR',
|
||||
'PIDFILE_DIR',
|
||||
'INSTALL_DIR',
|
||||
|
|
Loading…
Add table
Reference in a new issue