Merge remote-tracking branch 'upstream/2015.2' into merge-forward-develop

Conflicts:
	salt/cloud/clouds/linodepy.py
	salt/master.py
	salt/modules/boto_route53.py
	salt/modules/virt.py
	salt/renderers/gpg.py
	salt/runners/doc.py
	salt/runners/jobs.py
	salt/runners/manage.py
	salt/runners/virt.py
	salt/utils/event.py
	salt/utils/http.py
This commit is contained in:
Colton Myers 2015-03-16 15:12:02 -06:00
commit fd93491a5f
45 changed files with 1404 additions and 1475 deletions

View file

@ -1832,7 +1832,7 @@ There are additional details at :ref:`salt-pillars`
``ext_pillar_first``
--------------------
.. versionadded:: 2015.2
.. versionadded:: 2015.2.0
The ext_pillar_first option allows for external pillar sources to populate
before file system pillar. This allows for targeting file system pillar from
@ -1885,7 +1885,7 @@ strategy between different sources. It accepts 3 values:
* aggregate:
instructs aggregation of elements between sources that use the #!yamlex rendered.
instructs aggregation of elements between sources that use the #!yamlex renderer.
For example, these two documents:

View file

@ -6,8 +6,27 @@ Linode is a public cloud provider with a focus on Linux instances.
Dependencies
============
* linode-python >= 1.1.1
OR
* Libcloud >= 0.13.2
This driver supports accessing Linode via linode-python or Apache Libcloud.
Linode-python is recommended, it is more full-featured than Libcloud. In
particular using linode-python enables stopping, starting, and cloning
machines.
Driver selection is automatic. If linode-python is present it will be used.
If it is absent, salt-cloud will fall back to Libcloud. If neither are present
salt-cloud will abort.
NOTE: linode-python 1.1.1 or later is recommended. Earlier versions of linode-python
should work but leak sensitive information into the debug logs.
Linode-python can be downloaded from
https://github.com/tjfontaine/linode-python or installed via pip.
Configuration
=============
Linode requires a single API key, but the default root password for new
@ -100,3 +119,34 @@ command:
uuid:
8457f92eaffc92b7666b6734a96ad7abe1a8a6dd
...SNIP...
Cloning
=======
When salt-cloud accesses Linode via linode-python it can clone machines.
It is safest to clone a stopped machine. To stop a machine run
.. code-block:: bash
salt-cloud -a stop machine_to_clone
To create a new machine based on another machine, add an entry to your linode
cloud profile that looks like this:
.. code-block:: yaml
li-clone:
provider: linode
clonefrom: machine_to_clone
script_args: -C
Then run salt-cloud as normal, specifying `-p li-clone`. The profile name can
be anything--it doesn't have to be `li-clone`.
`Clonefrom:` is the name of an existing machine in Linode from which to clone.
`Script_args: -C` is necessary to avoid re-deploying Salt via salt-bootstrap.
`-C` will just re-deploy keys so the new minion will not have a duplicate key
or minion_id on the master.

View file

@ -36,6 +36,9 @@ Misc Fixes/Additions
the :py:mod:`cmd module <salt.cmd.cmdmod>`. (:issue:`21122`)
- Fixed bug where TCP keepalive was not being sent on the defined interval on
the return port (4506) from minion to master. (:issue: `21465`)
- LocalClient may now optionally raise SaltClientError exceptions. If using
this class directly, checking for and handling this exception is recommended.
(:issue: `21501`)
Deprecations
============

View file

@ -204,7 +204,7 @@ class SaltCMD(parsers.SaltCMDOptionParser):
if retcodes.count(0) < len(retcodes):
sys.exit(11)
except (SaltInvocationError, EauthAuthenticationError) as exc:
except (SaltInvocationError, EauthAuthenticationError, SaltClientError) as exc:
ret = str(exc)
out = ''
self._output_ret(ret, out)

View file

@ -21,6 +21,7 @@ The data structure needs to be:
# Import python libs
from __future__ import absolute_import, print_function
import os
import sys
import time
import copy
import errno
@ -45,7 +46,8 @@ import salt.utils.verify
import salt.utils.jid
import salt.syspaths as syspaths
from salt.exceptions import (
EauthAuthenticationError, SaltInvocationError, SaltReqTimeoutError, SaltClientError
EauthAuthenticationError, SaltInvocationError, SaltReqTimeoutError,
SaltClientError, PublishError
)
# Import third party libs
@ -213,7 +215,7 @@ class LocalClient(object):
if not pub_data:
# Failed to autnenticate, this could be a bunch of things
raise EauthAuthenticationError(
'Failed to authenticate! This is most likely because this '
'Failed to authenticate! This is most likely because this '
'user is not permitted to execute commands, but there is a '
'small possibility that a disk error occurred (check '
'disk/inode usage).'
@ -268,15 +270,24 @@ class LocalClient(object):
# Subscribe to all events and subscribe as early as possible
self.event.subscribe(jid)
pub_data = self.pub(
tgt,
fun,
arg,
expr_form,
ret,
jid=jid,
timeout=self._get_timeout(timeout),
**kwargs)
try:
pub_data = self.pub(
tgt,
fun,
arg,
expr_form,
ret,
jid=jid,
timeout=self._get_timeout(timeout),
**kwargs)
except SaltClientError:
# Re-raise error with specific message
raise SaltClientError(
'The salt master could not be contacted. Is master running?'
)
except Exception as general_exception:
# Convert to generic client error and pass along mesasge
raise SaltClientError(general_exception)
return self._check_pub_data(pub_data)
@ -1417,7 +1428,7 @@ class LocalClient(object):
'Unable to connect to the salt master publisher at '
'{0}'.format(self.opts['sock_dir'])
)
return {'jid': '0', 'minions': []}
raise SaltClientError
payload_kwargs = self._prep_pub(
tgt,
@ -1438,11 +1449,11 @@ class LocalClient(object):
try:
payload = channel.send(payload_kwargs, timeout=timeout)
except SaltReqTimeoutError:
log.error(
'Salt request timed out. If this error persists, '
raise SaltReqTimeoutError(
'Salt request timed out. The master is not responding. '
'If this error persists after verifying the master is up, '
'worker_threads may need to be increased.'
)
return {}
if not payload:
# The master key could have changed out from under us! Regen
@ -1453,8 +1464,13 @@ class LocalClient(object):
self.key = key
payload_kwargs['key'] = self.key
payload = channel.send(payload_kwargs)
if not payload:
return payload
error = payload.pop('error', None)
if error is not None:
raise PublishError(error)
if not payload:
return payload
# We have the payload, let's get rid of the channel fast(GC'ed faster)
del channel

View file

@ -435,6 +435,9 @@ class AsyncClientMixin(object):
'''
Print all of the events with the prefix 'tag'
'''
if not isinstance(event, dict):
return
# if we are "quiet", don't print
if self.opts.get('quiet', False):
return
@ -443,14 +446,18 @@ class AsyncClientMixin(object):
if suffix in ('new',):
return
# Check if ouputter was passed in the return data. If this is the case,
# then the return data will be a dict two keys: 'data' and 'outputter'
if isinstance(event['return'], dict) \
and set(event['return']) == set(('data', 'outputter')):
event_data = event['return']['data']
outputter = event['return']['outputter']
outputter = self.opts.get('output', event.get('outputter', None))
# if this is a ret, we have our own set of rules
if suffix == 'ret':
# Check if ouputter was passed in the return data. If this is the case,
# then the return data will be a dict two keys: 'data' and 'outputter'
if isinstance(event.get('return'), dict) \
and set(event['return']) == set(('data', 'outputter')):
event_data = event['return']['data']
outputter = event['return']['outputter']
else:
event_data = event['return']
else:
event_data = event['return']
outputter = None
event_data = {'suffix': suffix, 'event': event}
salt.output.display_output(event_data, outputter, self.opts)

File diff suppressed because it is too large Load diff

View file

@ -1,942 +0,0 @@
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using linode-python bindings
=================================================
The Linode cloud module is used to control access to the Linode VPS system
Use of this module only requires the ``apikey`` parameter.
:depends: linode-python >= 1.0
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/linodepy.conf``:
.. code-block:: yaml
my-linode-config:
# Linode account api key
apikey: JVkbSJDGHSDKUKSDJfhsdklfjgsjdkflhjlsdfffhgdgjkenrtuinv
provider: linodepy
This provider supports cloning existing Linodes. To clone,
add a profile with a ``clonefrom`` key, and a ``script_args: -C``.
``Clonefrom`` should be the name of the that is the source for the clone.
``script_args: -C`` passes a -C to the bootstrap script, which only configures
the minion and doesn't try to install a new copy of salt-minion. This way the
minion gets new keys and the keys get pre-seeded on the master, and the
/etc/salt/minion file has the right 'id:' declaration.
Cloning requires a post 2015-02-01 salt-bootstrap.
'''
from __future__ import absolute_import
# pylint: disable=E0102
# Import python libs
import copy
import pprint
import time
from os.path import exists, expanduser
# Import Salt Libs
import salt.ext.six as six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module
# Import linode-python
try:
import linode
import linode.api
HAS_LINODEPY = True
except ImportError:
HAS_LINODEPY = False
# Import salt cloud libs
from salt.cloud.exceptions import SaltCloudConfigError
from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401
from salt.utils import namespaced_function
# Get logging started
log = logging.getLogger(__name__)
# Human-readable status fields
LINODE_STATUS = {
'-2': 'Boot Failed (not in use)',
'-1': 'Being Created',
'0': 'Brand New',
'1': 'Running',
'2': 'Powered Off',
'3': 'Shutting Down (not in use)',
'4': 'Saved to Disk (not in use)',
}
# Redirect linode functions to this module namespace
#get_size = namespaced_function(get_size, globals())
#get_image = namespaced_function(get_image, globals())
# avail_locations = namespaced_function(avail_locations, globals())
# avail_images = namespaced_function(avail_distributions, globals())
# avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
# destroy = namespaced_function(destroy, globals())
# list_nodes = namespaced_function(list_nodes, globals())
# list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
# get_node = namespaced_function(get_node, globals())
# Borrowed from Apache Libcloud
class NodeAuthSSHKey(object):
"""
An SSH key to be installed for authentication to a node.
This is the actual contents of the users ssh public key which will
normally be installed as root's public key on the node.
>>> pubkey = '...' # read from file
>>> from libcloud.compute.base import NodeAuthSSHKey
>>> k = NodeAuthSSHKey(pubkey)
>>> k
<NodeAuthSSHKey>
"""
def __init__(self, pubkey):
"""
:param pubkey: Public key matetiral.
:type pubkey: ``str``
"""
self.pubkey = pubkey
def __repr__(self):
return '<NodeAuthSSHKey>'
class NodeAuthPassword(object):
"""
A password to be used for authentication to a node.
"""
def __init__(self, password, generated=False):
"""
:param password: Password.
:type password: ``str``
:type generated: ``True`` if this password was automatically generated,
``False`` otherwise.
"""
self.password = password
self.generated = generated
def __repr__(self):
return '<NodeAuthPassword>'
# Only load in this module if the LINODE configurations are in place
def __virtual__():
'''
Set up the libcloud functions and check for Linode configurations.
'''
if not HAS_LINODEPY:
return False
if get_configured_provider() is False:
return False
return True
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'linodepy',
('apikey',)
)
def get_conn():
'''
Return a conn object for the passed VM data
'''
return linode.api.Api(key=config.get_cloud_config_value(
'apikey',
get_configured_provider(),
__opts__, search_global=False)
)
def get_image(conn, vm_):
'''
Return a single image from the Linode API
'''
images = avail_images(conn)
return images[vm_['image']]['id']
def get_size(conn, vm_):
'''
Return available size from Linode (Linode calls them "plans")
'''
sizes = avail_sizes(conn)
return sizes[vm_['size']]
def avail_sizes(conn=None):
'''
Return available sizes ("plans" in LinodeSpeak)
'''
if not conn:
conn = get_conn()
sizes = {}
for plan in conn.avail_linodeplans():
key = plan['LABEL']
sizes[key] = {}
sizes[key]['id'] = plan['PLANID']
sizes[key]['extra'] = plan
sizes[key]['bandwidth'] = plan['XFER']
sizes[key]['disk'] = plan['DISK']
sizes[key]['price'] = plan['HOURLY']*24*30
sizes[key]['ram'] = plan['RAM']
return sizes
def avail_locations(conn=None):
'''
return available datacenter locations
'''
if not conn:
conn = get_conn()
locations = {}
for dc in conn.avail_datacenters():
key = dc['LOCATION']
locations[key] = {}
locations[key]['id'] = dc['DATACENTERID']
locations[key]['abbreviation'] = dc['ABBR']
return locations
def avail_images(conn=None):
'''
Return available images
'''
if not conn:
conn = get_conn()
images = {}
for d in conn.avail_distributions():
images[d['LABEL']] = {}
images[d['LABEL']]['id'] = d['DISTRIBUTIONID']
images[d['LABEL']]['extra'] = d
return images
def get_ips(conn=None, LinodeID=None):
'''
Return IP addresses, both public and provate
'''
if not conn:
conn = get_conn()
ips = conn.linode_ip_list(LinodeID=LinodeID)
all_ips = {'public_ips': [], 'private_ips': []}
for i in ips:
if i['ISPUBLIC']:
key = 'public_ips'
else:
key = 'private_ips'
all_ips[key].append(i['IPADDRESS'])
return all_ips
def linodes(full=False, include_ips=False, conn=None):
'''
Return data on all nodes
'''
if not conn:
conn = get_conn()
nodes = conn.linode_list()
results = {}
for n in nodes:
thisnode = {}
thisnode['id'] = n['LINODEID']
thisnode['image'] = None
thisnode['name'] = n['LABEL']
thisnode['size'] = n['TOTALRAM']
thisnode['state'] = n['STATUS']
thisnode['private_ips'] = []
thisnode['public_ips'] = []
thisnode['state'] = LINODE_STATUS[str(n['STATUS'])]
if include_ips:
thisnode = dict(list(thisnode.items()) +
list(get_ips(conn, n['LINODEID']).items()))
if full:
thisnode['extra'] = n
results[n['LABEL']] = thisnode
return results
def stop(*args, **kwargs):
'''
Execute a "stop" action on a VM in Linode.
'''
conn = get_conn()
node = get_node(name=args[0])
if not node:
node = get_node(LinodeID=args[0])
if node['state'] == 'Powered Off':
return {'success': True, 'state': 'Stopped',
'msg': 'Machine already stopped'}
result = conn.linode_shutdown(LinodeID=node['id'])
if waitfor_job(LinodeID=node['id'], JobID=result['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def start(*args, **kwargs):
'''
Execute a "start" action on a VM in Linode.
'''
conn = get_conn()
node = get_node(name=args[0])
if not node:
node = get_node(LinodeID=args[0])
if not node:
return False
if node['state'] == 'Running':
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
result = conn.linode_boot(LinodeID=node['id'])
if waitfor_job(LinodeID=node['id'], JobID=result['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def clone(*args, **kwargs):
'''
Clone an existing Linode
'''
conn = get_conn()
node = get_node(name=args[0], full=True)
if not node:
node = get_node(LinodeID=args[0], full=True)
if len(args) > 1:
actionargs = args[1]
if 'target' not in actionargs:
log.debug('Tried to clone but target not specified.')
return False
result = conn.linode_clone(LinodeID=node['id'],
DatacenterID=node['extra']['DATACENTERID'],
PlanID=node['extra']['PLANID'])
conn.linode_update(LinodeID=result['LinodeID'],
Label=actionargs['target'])
# Boot!
if 'boot' not in actionargs:
bootit = True
else:
bootit = actionargs['boot']
if bootit:
bootjob_status = conn.linode_boot(LinodeID=result['LinodeID'])
waitfor_job(LinodeID=result['LinodeID'], JobID=bootjob_status['JobID'])
node_data = get_node(name=actionargs['target'], full=True)
log.info('Cloned Cloud VM {0} to {1}'.format(args[0], actionargs['target']))
log.debug(
'{0!r} VM creation details:\n{1}'.format(
args[0], pprint.pformat(node_data)
)
)
return node_data
def list_nodes():
'''
Return basic data on nodes
'''
return linodes(full=False, include_ips=True)
def list_nodes_full():
'''
Return all data on nodes
'''
return linodes(full=True, include_ips=True)
def get_node(LinodeID=None, name=None, full=False):
'''
Return information on a single node
'''
c = get_conn()
linode_list = linodes(full=full, conn=c)
for l, d in six.iteritems(linode_list):
if LinodeID:
if d['id'] == LinodeID:
d = dict(list(d.items()) + list(get_ips(conn=c, LinodeID=d['id']).items()))
return d
if name:
if d['name'] == name:
d = dict(list(d.items()) + list(get_ips(conn=c, LinodeID=d['id']).items()))
return d
return None
def get_disk_size(vm_, size, swap):
'''
Return the size of of the root disk in MB
'''
conn = get_conn()
vmsize = get_size(conn, vm_)
disksize = int(vmsize['disk']) * 1024
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disksize - swap
)
def destroy(vm_):
conn = get_conn()
machines = linodes(full=False, include_ips=False)
return conn.linode_delete(LinodeID=machines[vm_]['id'], skipChecks=True)
def get_location(conn, vm_):
'''
Return the node location to use.
Linode wants a location id, which is an integer, when creating a new VM
To be flexible, let the user specify any of location id, abbreviation, or
full name of the location ("Fremont, CA, USA") in the config file)
'''
locations = avail_locations(conn)
# Default to Dallas if not otherwise set
loc = config.get_cloud_config_value('location', vm_, __opts__, default=2)
# Was this an id that matches something in locations?
if str(loc) not in [locations[k]['id'] for k in locations]:
# No, let's try to match it against the full name and the abbreviation and return the id
for key in locations:
if str(loc).lower() in (key,
str(locations[key]['id']).lower(),
str(locations[key]['abbreviation']).lower()):
return locations[key]['id']
else:
return loc
# No match. Return None, cloud provider will use a default or throw an exception
return None
def get_password(vm_):
'''
Return the password to use
'''
return config.get_cloud_config_value(
'password', vm_, __opts__, default=config.get_cloud_config_value(
'passwd', vm_, __opts__, search_global=False
), search_global=False
)
def get_pubkey(vm_):
'''
Return the SSH pubkey to use
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False)
def get_auth(vm_):
'''
Return either NodeAuthSSHKey or NodeAuthPassword, preferring
NodeAuthSSHKey if both are provided.
'''
if get_pubkey(vm_) is not None:
return NodeAuthSSHKey(get_pubkey(vm_))
elif get_password(vm_) is not None:
return NodeAuthPassword(get_password(vm_))
else:
raise SaltCloudConfigError(
'The Linode driver requires either a password or ssh_pubkey with '
'corresponding ssh_private_key.')
def get_ssh_key_filename(vm_):
'''
Return path to filename if get_auth() returns a NodeAuthSSHKey.
'''
key_filename = config.get_cloud_config_value(
'ssh_key_file', vm_, __opts__,
default=config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
), search_global=False)
if key_filename is not None and exists(expanduser(key_filename)):
return expanduser(key_filename)
return None
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'private_ip', vm_, __opts__, default=False
)
def get_swap(vm_):
'''
Return the amount of swap space to use in MB
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_kernels(conn=None):
'''
Get Linode's list of kernels available
'''
if not conn:
conn = get_conn()
kernel_response = conn.avail_kernels()
if len(kernel_response['ERRORARRAY']) == 0:
kernels = {}
for k in kernel_response['DATA']:
key = k['LABEL']
kernels[key] = {}
kernels[key]['id'] = k['KERNELID']
kernels[key]['name'] = k['LABEL']
kernels[key]['isvops'] = k['ISVOPS']
kernels[key]['isxen'] = k['ISXEN']
return kernels
else:
log.error("Linode avail_kernels returned {0}".format(kernel_response['ERRORARRAY']))
return None
def get_one_kernel(conn=None, name=None):
'''
Return data on one kernel
name=None returns latest kernel
'''
if not conn:
conn = get_conn()
kernels = get_kernels(conn)
if not name:
name = 'latest 64 bit'
else:
name = name.lower()
for k, v in kernels:
if name in k.lower():
return v
log.error('Did not find a kernel matching {0}'.format(name))
return None
def waitfor_status(conn=None, LinodeID=None, status=None, timeout=300, quiet=True):
'''
Wait for a certain status
'''
if not conn:
conn = get_conn()
if status is None:
status = 'Brand New'
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_node(LinodeID)
if result['state'] == status:
return True
time.sleep(interval)
if not quiet:
log.info('Status for {0} is {1}'.format(LinodeID, result['state']))
else:
log.debug('Status for {0} is {1}'.format(LinodeID, result))
return False
def waitfor_job(conn=None, LinodeID=None, JobID=None, timeout=300, quiet=True):
if not conn:
conn = get_conn()
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
try:
result = conn.linode_job_list(LinodeID=LinodeID, JobID=JobID)
except linode.ApiError as exc:
log.info('Waiting for job {0} on host {1} returned {2}'.format(LinodeID, JobID, exc))
return False
if result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
if not quiet:
log.info('Still waiting on Job {0} for {1}'.format(JobID, LinodeID))
else:
log.debug('Still waiting on Job {0} for {1}'.format(JobID, LinodeID))
return False
def boot(LinodeID=None, configid=None):
'''
Execute a boot sequence on a linode
'''
conn = get_conn()
return conn.linode_boot(LinodeID=LinodeID, ConfigID=configid)
def create_swap_disk(vm_=None, LinodeID=None, swapsize=None):
'''
Create the disk for the linode
'''
conn = get_conn()
if not swapsize:
swapsize = get_swap(vm_)
result = conn.linode_disk_create(LinodeID=LinodeID,
Label='swap',
Size=swapsize,
Type='swap')
return result
def create_disk_from_distro(vm_=None, LinodeID=None, swapsize=None):
'''
Create the disk for the linode
'''
conn = get_conn()
result = conn.linode_disk_createfromdistribution(
LinodeID=LinodeID,
DistributionID=get_image(conn, vm_),
Label='root',
Size=get_disk_size(vm_, get_size(conn, vm_)['disk'], get_swap(vm_)),
rootPass=get_password(vm_),
rootSSHKey=get_pubkey(vm_)
)
return result
def create_config(vm_, LinodeID=None, root_disk_id=None, swap_disk_id=None):
'''
Create a Linode Config
'''
conn = get_conn()
# 138 appears to always be the latest 64-bit kernel for Linux
kernelid = 138
result = conn.linode_config_create(LinodeID=LinodeID,
Label=vm_['name'],
Disklist='{0},{1}'.format(root_disk_id,
swap_disk_id),
KernelID=kernelid,
RootDeviceNum=1,
RootDeviceRO=True,
RunLevel='default',
helper_disableUpdateDB=True,
helper_xen=True,
helper_depmod=True)
return result
def create(vm_):
'''
Create a single VM from a data dict
'''
salt.utils.cloud.fire_event(
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
{
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['provider'],
},
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
conn = get_conn()
if 'clonefrom' in vm_:
kwargs = {
'name': vm_['name'],
'clonefrom': vm_['clonefrom'],
'auth': get_auth(vm_),
'ex_private': get_private_ip(vm_),
}
node_data = clone(vm_['clonefrom'], {'target': vm_['name']})
else:
kwargs = {
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_)['id'],
'location': get_location(conn, vm_),
'auth': get_auth(vm_),
'ex_private': get_private_ip(vm_),
'ex_rsize': get_disk_size(vm_, get_size(conn, vm_)['disk'], get_swap(vm_)),
'ex_swap': get_swap(vm_)
}
# if 'libcloud_args' in vm_:
# kwargs.update(vm_['libcloud_args'])
salt.utils.cloud.fire_event(
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
{'kwargs': {'name': kwargs['name'],
'image': kwargs['image'],
'size': kwargs['size'],
'location': kwargs['location'],
'ex_private': kwargs['ex_private'],
'ex_rsize': kwargs['ex_rsize'],
'ex_swap': kwargs['ex_swap']}},
transport=__opts__['transport']
)
try:
node_data = conn.linode_create(DatacenterID=get_location(conn, vm_),
PlanID=kwargs['size'], PaymentTerm=1)
except Exception as exc:
log.error(
'Error creating {0} on LINODE\n\n'
'The following exception was thrown by linode-python when trying to '
'run the initial deployment: \n{1}'.format(
vm_['name'], str(exc)
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
if not waitfor_status(conn=conn, LinodeID=node_data['LinodeID'], status='Brand New'):
log.error('Error creating {0} on LINODE\n\n'
'while waiting for initial ready status'.format(
vm_['name']
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Set linode name
set_name_result = conn.linode_update(LinodeID=node_data['LinodeID'],
Label=vm_['name'])
log.debug('Set name action for {0} was {1}'.format(vm_['name'],
set_name_result))
# Create disks
log.debug('Creating disks for {0}'.format(node_data['LinodeID']))
swap_result = create_swap_disk(LinodeID=node_data['LinodeID'], swapsize=get_swap(vm_))
root_result = create_disk_from_distro(vm_, LinodeID=node_data['LinodeID'],
swapsize=get_swap(vm_))
# Create config
config_result = create_config(vm_, LinodeID=node_data['LinodeID'],
root_disk_id=root_result['DiskID'],
swap_disk_id=swap_result['DiskID'])
# Boot!
boot_result = boot(LinodeID=node_data['LinodeID'],
configid=config_result['ConfigID'])
if not waitfor_job(conn, LinodeID=node_data['LinodeID'],
JobID=boot_result['JobID']):
log.error('Boot failed for {0}.'.format(node_data))
return False
node_data.update(get_node(node_data['LinodeID']))
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
ret = {}
if config.get_cloud_config_value('deploy', vm_, __opts__) is True:
deploy_script = script(vm_)
deploy_kwargs = {
'opts': __opts__,
'host': node_data['public_ips'][0],
'username': ssh_username,
'password': get_password(vm_),
'script': deploy_script.script,
'name': vm_['name'],
'tmp_dir': config.get_cloud_config_value(
'tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'
),
'deploy_command': config.get_cloud_config_value(
'deploy_command', vm_, __opts__,
default='/tmp/.saltcloud/deploy.sh',
),
'start_action': __opts__['start_action'],
'parallel': __opts__['parallel'],
'sock_dir': __opts__['sock_dir'],
'conf_file': __opts__['conf_file'],
'minion_pem': vm_['priv_key'],
'minion_pub': vm_['pub_key'],
'keep_tmp': __opts__['keep_tmp'],
'preseed_minion_keys': vm_.get('preseed_minion_keys', None),
'sudo': config.get_cloud_config_value(
'sudo', vm_, __opts__, default=(ssh_username != 'root')
),
'sudo_password': config.get_cloud_config_value(
'sudo_password', vm_, __opts__, default=None
),
'tty': config.get_cloud_config_value(
'tty', vm_, __opts__, default=False
),
'display_ssh_output': config.get_cloud_config_value(
'display_ssh_output', vm_, __opts__, default=True
),
'script_args': config.get_cloud_config_value(
'script_args', vm_, __opts__
),
'script_env': config.get_cloud_config_value('script_env', vm_, __opts__),
'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_),
'has_ssh_agent': False
}
if get_ssh_key_filename(vm_) is not None and get_pubkey(vm_) is not None:
deploy_kwargs['key_filename'] = get_ssh_key_filename(vm_)
# Deploy salt-master files, if necessary
if config.get_cloud_config_value('make_master', vm_, __opts__) is True:
deploy_kwargs['make_master'] = True
deploy_kwargs['master_pub'] = vm_['master_pub']
deploy_kwargs['master_pem'] = vm_['master_pem']
master_conf = salt.utils.cloud.master_config(__opts__, vm_)
deploy_kwargs['master_conf'] = master_conf
if master_conf.get('syndic_master', None):
deploy_kwargs['make_syndic'] = True
deploy_kwargs['make_minion'] = config.get_cloud_config_value(
'make_minion', vm_, __opts__, default=True
)
# Check for Windows install params
win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__)
if win_installer:
deploy_kwargs['win_installer'] = win_installer
minion = salt.utils.cloud.minion_config(__opts__, vm_)
deploy_kwargs['master'] = minion['master']
deploy_kwargs['username'] = config.get_cloud_config_value(
'win_username', vm_, __opts__, default='Administrator'
)
deploy_kwargs['password'] = config.get_cloud_config_value(
'win_password', vm_, __opts__, default=''
)
# Store what was used to the deploy the VM
event_kwargs = copy.deepcopy(deploy_kwargs)
del event_kwargs['minion_pem']
del event_kwargs['minion_pub']
del event_kwargs['sudo_password']
if 'password' in event_kwargs:
del event_kwargs['password']
ret['deploy_kwargs'] = event_kwargs
salt.utils.cloud.fire_event(
'event',
'executing deploy script',
'salt/cloud/{0}/deploying'.format(vm_['name']),
{'kwargs': event_kwargs},
transport=__opts__['transport']
)
deployed = False
if win_installer:
deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs)
else:
deployed = salt.utils.cloud.deploy_script(**deploy_kwargs)
if deployed:
log.info('Salt installed on {0}'.format(vm_['name']))
else:
log.error(
'Failed to start Salt on Cloud VM {0}'.format(
vm_['name']
)
)
ret.update(node_data)
log.info('Created Cloud VM {0[name]!r}'.format(vm_))
log.debug(
'{0[name]!r} VM creation details:\n{1}'.format(
vm_, pprint.pformat(node_data)
)
)
salt.utils.cloud.fire_event(
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
{
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['provider'],
},
transport=__opts__['transport']
)
return ret

View file

@ -152,7 +152,6 @@ VALID_OPTS = {
'publish_port': int,
'auth_mode': int,
'pub_hwm': int,
'rep_hwm': int,
'worker_threads': int,
'ret_port': int,
'keep_jobs': int,
@ -446,7 +445,6 @@ DEFAULT_MASTER_OPTS = {
'interface': '0.0.0.0',
'publish_port': '4505',
'pub_hwm': 1000,
'rep_hwm': 50000,
'auth_mode': 1,
'user': 'root',
'worker_threads': 5,

View file

@ -80,6 +80,12 @@ class LoaderError(SaltException):
'''
class PublishError(SaltException):
'''
Problems encountered when trying to publish a command
'''
class MinionError(SaltException):
'''
Minion problems reading uris such as salt:// or http://

View file

@ -640,56 +640,11 @@ class LazyLoader(salt.utils.lazy.LazyDict):
Goals here:
- lazy loading
- minimize disk usage
- singletons (per tag)
# TODO:
- move modules_max_memory into here
- re-enable singletons (TODO: tests)
- singletons (per tag)
'''
instances = {}
def __new__(cls,
module_dirs,
opts=None,
tag='module',
loaded_base_name=None,
mod_type_check=None,
pack=None,
whitelist=None,
virtual_enable=True,
singleton=False,
):
def new_object():
ret = object.__new__(cls)
ret.__singleton_init__(module_dirs,
opts=opts,
tag=tag,
loaded_base_name=loaded_base_name,
mod_type_check=mod_type_check,
pack=pack,
whitelist=whitelist,
virtual_enable=virtual_enable,
singleton=singleton,
)
return ret
if not singleton:
return new_object()
key = (tag,
virtual_enable,
'proxy' in opts,
opts.get('id'),
)
if key not in LazyLoader.instances:
log.debug('Initializing new LazyLoader for {0}'.format(key))
LazyLoader.instances[key] = new_object()
else:
log.debug('Re-using LazyLoader for {0}'.format(key))
return LazyLoader.instances[key]
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self,
module_dirs,
opts=None,
@ -699,23 +654,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
pack=None,
whitelist=None,
virtual_enable=True,
singleton=True,
): # pylint: disable=W0231
self.opts = self.__prep_mod_opts(opts)
self.singleton = singleton
# an init for the singleton instance to call
def __singleton_init__(self,
module_dirs,
opts=None,
tag='module',
loaded_base_name=None,
mod_type_check=None,
pack=None,
whitelist=None,
virtual_enable=True,
singleton=True,
):
super(LazyLoader, self).__init__() # init the lazy loader
self.opts = self.__prep_mod_opts(opts)
@ -869,30 +808,6 @@ class LazyLoader(salt.utils.lazy.LazyDict):
mod_opts[key] = val
return mod_opts
# TODO: stop this? seems bad...
def __getitem__(self, key):
'''
When you get a module, make sure to pack the most recent opts/grains/pillar
This might be problematic, since this means they change after __virtual__,
but its been doing this for a while... so it must be fine :/
'''
mod = super(LazyLoader, self).__getitem__(key)
# if we aren't a singleton, we don't have to worry about these changing
if not self.singleton:
return mod
if '__opts__' in mod.__globals__:
mod.__globals__['__opts__'].update(self.opts)
else:
mod.__globals__['__opts__'] = self.opts
mod.__globals__['__grains__'] = self.grains
mod.__globals__['__pillar__'] = self.pillar
return mod
def _iter_files(self, mod_name):
'''
Iterate over all file_mapping files in order of closeness to mod_name

View file

@ -6,6 +6,7 @@ involves preparing the three listeners and the workers needed by the master.
# Import python libs
from __future__ import absolute_import
from pprint import pformat
import os
import re
import sys
@ -58,7 +59,9 @@ import salt.utils.zeromq
import salt.utils.jid
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.exceptions import FileserverConfigError
from salt.utils.debug import enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack
from salt.utils.debug import (
enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack
)
from salt.utils.event import tagify
from salt.utils.master import ConnectedCache
from salt.utils.cache import CacheCli
@ -543,7 +546,23 @@ class Publisher(multiprocessing.Process):
try:
package = pull_sock.recv()
unpacked_package = salt.payload.unpackage(package)
payload = unpacked_package['payload']
try:
payload = unpacked_package['payload']
except (KeyError,) as exc:
# somehow not packaged !?
if 'enc' in payload and 'load' in payload:
payload = package
else:
try:
log.error(
"Invalid payload: {0}".format(
pformat(unpacked_package), exc_info=True))
except Exception:
# dont fail on a format error here !
# but log something as it is hard to track down
log.error("Received invalid payload", exc_info=True)
raise exc
if self.opts['zmq_filtering']:
# if you have a specific topic list, use that
if 'topic_lst' in unpacked_package:
@ -608,12 +627,6 @@ class ReqServer(object):
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
try:
self.clients.setsockopt(zmq.HWM, self.opts['rep_hwm'])
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
self.clients.setsockopt(zmq.SNDHWM, self.opts['rep_hwm'])
self.clients.setsockopt(zmq.RCVHWM, self.opts['rep_hwm'])
self.workers = self.context.socket(zmq.DEALER)
self.w_uri = 'ipc://{0}'.format(
@ -2174,27 +2187,36 @@ class ClearFuncs(object):
)
return ''
try:
name = self.loadauth.load_name(extra) # The username we are attempting to auth with
groups = self.loadauth.get_groups(extra) # The groups this user belongs to
group_perm_keys = [item for item in self.opts['external_auth'][extra['eauth']] if item.endswith('%')] # The configured auth groups
# The username with which we are attempting to auth
name = self.loadauth.load_name(extra)
# The groups to which this user belongs
groups = self.loadauth.get_groups(extra)
# The configured auth groups
group_perm_keys = [
item for item in self.opts['external_auth'][extra['eauth']]
if item.endswith('%')
]
# First we need to know if the user is allowed to proceed via any of their group memberships.
# First we need to know if the user is allowed to proceed via
# any of their group memberships.
group_auth_match = False
for group_config in group_perm_keys:
group_config = group_config.rstrip('%')
for group in groups:
if group == group_config:
group_auth_match = True
# If a group_auth_match is set it means only that we have a user which matches at least one or more
# of the groups defined in the configuration file.
# If a group_auth_match is set it means only that we have a
# user which matches at least one or more of the groups defined
# in the configuration file.
external_auth_in_db = False
for d in self.opts['external_auth'][extra['eauth']]:
if d.startswith('^'):
external_auth_in_db = True
# If neither a catchall, a named membership or a group membership is found, there is no need
# to continue. Simply deny the user access.
# If neither a catchall, a named membership or a group
# membership is found, there is no need to continue. Simply
# deny the user access.
if not ((name in self.opts['external_auth'][extra['eauth']]) |
('*' in self.opts['external_auth'][extra['eauth']]) |
group_auth_match | external_auth_in_db):
@ -2207,7 +2229,8 @@ class ClearFuncs(object):
)
return ''
# Perform the actual authentication. If we fail here, do not continue.
# Perform the actual authentication. If we fail here, do not
# continue.
if not self.loadauth.time_auth(extra):
log.warning(
'Authentication failure of type "eauth" occurred.'

View file

@ -1051,7 +1051,7 @@ class Minion(MinionBase):
'''
pass
def _handle_clear(self, load):
def _handle_clear(self, load, sig=None):
'''
Handle un-encrypted transmissions
'''
@ -2052,15 +2052,18 @@ class Syndic(Minion):
if field in data:
kwargs[field] = data[field]
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
**kwargs)
try:
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
**kwargs)
except Exception as exc:
log.warning('Unable to forward pub data: {0}'.format(exc))
def _setsockopts(self):
# no filters for syndication masters, unless we want to maintain a
@ -2398,10 +2401,10 @@ class MultiSyndic(MinionBase):
minion['auth_wait'] += self.opts['acceptance_wait_time']
return False
# TODO: Move to an async framework of some type-- channel (the event thing underneath)
# doesn't handle failures well, and will retry 3 times at 60s timeouts-- which all block
# the main thread's execution. For now we just cause failures to kick off threads to look
# for the master to come back up
# TODO: Move to an async framework of some type-- channel (the event thing
# underneath) doesn't handle failures well, and will retry 3 times at 60s
# timeouts-- which all block the main thread's execution. For now we just
# cause failures to kick off threads to look for the master to come back up
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
@ -2496,8 +2499,8 @@ class MultiSyndic(MinionBase):
if socks.get(self.local.event.sub) == zmq.POLLIN:
self._process_event_socket()
if (self.event_forward_timeout is not None and
self.event_forward_timeout < time.time()):
if self.event_forward_timeout is not None \
and self.event_forward_timeout < time.time():
self._forward_events()
# We don't handle ZMQErrors like the other minions
# I've put explicit handling around the receive calls

View file

@ -53,7 +53,9 @@ def tune(device, **kwargs):
switch = kwarg_map[key]
if key != 'read-write':
args.append(switch.replace('set', 'get'))
if kwargs[key] == 'True':
else:
args.append('getro')
if kwargs[key] == 'True' or kwargs[key] is True:
opts += '--{0} '.format(key)
else:
opts += '--{0} {1} '.format(switch, kwargs[key])

View file

@ -218,7 +218,7 @@ def delete_record(name, zone, record_type, identifier=None, all_records=False,
def _wait_for_sync(status, conn, wait_for_sync):
if not wait_for_sync:
return True
retry = 10
retry = 30
i = 0
while i < retry:
log.info('Getting route53 status (attempt {0})'.format(i + 1))

View file

@ -12,7 +12,13 @@ import logging
# Import salt libs
import salt.utils
import salt.utils.cloud
try:
# Gated for salt-ssh (salt.utils.cloud imports msgpack)
import salt.utils.cloud
HAS_CLOUD = True
except ImportError:
HAS_CLOUD = False
import salt._compat
import salt.syspaths as syspaths
import salt.utils.sdb as sdb
@ -399,6 +405,8 @@ def gather_bootstrap_script(bootstrap=None):
salt '*' config.gather_bootstrap_script
'''
if not HAS_CLOUD:
return False, 'config.gather_bootstrap_script is unavailable'
ret = salt.utils.cloud.update_bootstrap(__opts__, url=bootstrap)
if 'Success' in ret and len(ret['Success']['Files updated']) > 0:
return ret['Success']['Files updated'][0]

View file

@ -646,7 +646,7 @@ def _parse_settings_eth(opts, iface_type, enabled, iface):
result['enable_ipv6'] = opts['enable_ipv6']
valid = _CONFIG_TRUE + _CONFIG_FALSE
for opt in ['onparent', 'peerdns', 'slave', 'vlan', 'defroute']:
for opt in ['onparent', 'peerdns', 'slave', 'vlan', 'defroute', 'stp']:
if opt in opts:
if opts[opt] in _CONFIG_TRUE:
result[opt] = 'yes'

View file

@ -24,6 +24,7 @@ import jinja2.exceptions
from xml.dom import minidom
import salt.ext.six as six
from salt.ext.six.moves import StringIO as _StringIO # pylint: disable=import-error
from xml.dom import minidom
try:
import libvirt # pylint: disable=import-error
HAS_LIBVIRT = True

View file

@ -70,7 +70,7 @@ def get_servers():
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
try:
if 'NtpServer' in line:
if line.startswith('NtpServer:'):
_, ntpsvrs = line.rstrip(' (Local)').split(':', 1)
return sorted(ntpsvrs.split())
except ValueError as e:

View file

@ -303,7 +303,7 @@ def disable(name, **kwargs):
salt '*' service.disable <service name>
'''
cmd = ['sc', 'config', name, 'start=', 'demand']
cmd = ['sc', 'config', name, 'start=', 'disabled']
return not __salt__['cmd.retcode'](cmd, python_shell=False)

View file

@ -124,6 +124,30 @@ def lock_holders(path,
ephemeral_lease=False):
'''
Return an un-ordered list of lock holders
path
The path in zookeeper where the lock is
zk_hosts
zookeeper connect string
identifier
Name to identify this minion
max_concurrency
Maximum number of lock holders
timeout
timeout to wait for the lock. A None timeout will block forever
ephemeral_lease
Whether the locks in zookeper should be ephemeral
Example:
... code-block: bash
salt minion zk_concurrency.lock_holders /lock/path host1:1234,host2:1234
'''
zk = _get_zk_conn(zk_hosts)
@ -146,6 +170,33 @@ def lock(path,
):
'''
Get lock (with optional timeout)
path
The path in zookeeper where the lock is
zk_hosts
zookeeper connect string
identifier
Name to identify this minion
max_concurrency
Maximum number of lock holders
timeout
timeout to wait for the lock. A None timeout will block forever
ephemeral_lease
Whether the locks in zookeper should be ephemeral
force
Forcibly acquire the lock regardless of available slots
Example:
... code-block: bash
salt minion zk_concurrency.lock /lock/path host1:1234,host2:1234
'''
zk = _get_zk_conn(zk_hosts)
if path not in SEMAPHORE_MAP:
@ -178,6 +229,30 @@ def unlock(path,
):
'''
Remove lease from semaphore
path
The path in zookeeper where the lock is
zk_hosts
zookeeper connect string
identifier
Name to identify this minion
max_concurrency
Maximum number of lock holders
timeout
timeout to wait for the lock. A None timeout will block forever
ephemeral_lease
Whether the locks in zookeper should be ephemeral
Example:
... code-block: bash
salt minion zk_concurrency.unlock /lock/path host1:1234,host2:1234
'''
# if someone passed in zk_hosts, and the path isn't in SEMAPHORE_MAP, lets
# see if we can find it

View file

@ -16,7 +16,7 @@ to look for Pillar files (such as ``top.sls``).
.. versionchanged:: 2014.7.0
The optional ``root`` parameter will be added.
.. versionchanged:: @TBD
.. versionchanged:: 2015.2.0
The special branch name '__env__' will be replace by the
environment ({{env}})

View file

@ -2,6 +2,9 @@
'''
Retrieve Pillar data by doing a MySQL query
MariaDB provides Python support through the MySQL Python package.
Therefore, you may use this module with both MySQL or MariaDB.
:maturity: new
:depends: python-mysqldb
:platform: all
@ -90,7 +93,7 @@ Depth of 0 translates to the largest depth needed, so 3 in this case.
The legacy compatibility translates to depth 1.
Then they are merged the in a similar way to plain pillar data, in the order
Then they are merged in a similar way to plain pillar data, in the order
returned by MySQL.
Thus subsequent results overwrite previous ones when they collide.
@ -141,9 +144,9 @@ These columns define list grouping
The range for with_lists is 1 to number_of_fields, inclusive.
Numbers outside this range are ignored.
Finally, if you use pass the queries in via a mapping, the key will be the
Finally, if you pass the queries in via a mapping, the key will be the
first level name where as passing them in as a list will place them in the
root. This isolates the query results in to their own subtrees.
root. This isolates the query results into their own subtrees.
This may be a help or hindrance to your aims and can be used as such.
You can basically use any SELECT query that gets you the information, you
@ -265,7 +268,7 @@ class Merger(object):
def extract_queries(self, args, kwargs):
'''
This function normalizes the config block in to a set of queries we
This function normalizes the config block into a set of queries we
can use. The return is a list of consistently laid out dicts.
'''
# Please note the function signature is NOT an error. Neither args, nor
@ -298,7 +301,7 @@ class Merger(object):
(isinstance(x[1], dict) and 'query' in x[1] and len(x[1]['query']))
)]
# Next, turn the whole buffer in to full dicts.
# Next, turn the whole buffer into full dicts.
for qb in qbuffer:
defaults = {'query': '',
'depth': 0,
@ -351,7 +354,7 @@ class Merger(object):
def process_results(self, rows):
'''
This function takes a list of database results and iterates over,
merging them in to a dict form.
merging them into a dict form.
'''
listify = OrderedDict()
listify_dicts = OrderedDict()
@ -424,7 +427,7 @@ class Merger(object):
if ret[self.depth-1] not in listify[id(crd)]:
listify[id(crd)].append(ret[self.depth-1])
crd = crd[ret[self.depth-1]]
# Now for the remaining keys, we put them in to the dict
# Now for the remaining keys, we put them into the dict
for i in range(self.depth, self.num_fields):
nk = self.field_names[i]
# Listify

View file

@ -14,6 +14,7 @@ import salt.wheel
# Import 3rd-party libs
import salt.ext.six as six
from salt.exceptions import SaltClientError
def __virtual__():
@ -66,9 +67,13 @@ def execution():
client = salt.client.get_local_client(__opts__['conf_file'])
docs = {}
for ret in client.cmd_iter('*', 'sys.doc', timeout=__opts__['timeout']):
for val in six.itervalues(ret):
docs.update(val)
try:
for ret in client.cmd_iter('*', 'sys.doc', timeout=__opts__['timeout']):
for v in six.itervalues(ret):
docs.update(v)
except SaltClientError as exc:
print exc
return []
i = itertools.chain.from_iterable([six.iteritems(i) for i in six.itervalues(docs)])
ret = dict(list(i))

View file

@ -18,6 +18,7 @@ import salt.minion
# Import 3rd-party libs
import salt.ext.six as six
from salt.exceptions import SaltClientError
log = logging.getLogger(__name__)
@ -35,7 +36,12 @@ def active(outputter=None, display_progress=False):
'''
ret = {}
client = salt.client.get_local_client(__opts__['conf_file'])
active_ = client.cmd('*', 'saltutil.running', timeout=__opts__['timeout'])
try:
active_ = client.cmd('*', 'saltutil.running', timeout=__opts__['timeout'])
except SaltClientError as client_error:
print(client_error)
return ret
if display_progress:
__jid_event__.fire_event({
'message': 'Attempting to contact minions: {0}'.format(list(active_.keys()))

View file

@ -25,6 +25,7 @@ import salt.utils.minions
import salt.wheel
import salt.version
from salt.utils.event import tagify
from salt.exceptions import SaltClientError
FINGERPRINT_REGEX = re.compile(r'^([a-f0-9]{2}:){15}([a-f0-9]{2})$')
@ -39,13 +40,17 @@ def status(output=True):
salt-run manage.status
'''
ret = {}
client = salt.client.get_local_client(__opts__['conf_file'])
minions = client.cmd('*', 'test.ping', timeout=__opts__['timeout'])
try:
minions = client.cmd('*', 'test.ping', timeout=__opts__['timeout'])
except SaltClientError as client_error:
print(client_error)
return ret
key = salt.key.Key(__opts__)
keys = key.list_keys()
ret = {}
ret['up'] = sorted(minions)
ret['down'] = sorted(set(keys['minions']) - set(minions))
return ret
@ -77,7 +82,11 @@ def key_regen():
salt-run manage.key_regen
'''
client = salt.client.get_local_client(__opts__['conf_file'])
client.cmd('*', 'saltutil.regen_keys')
try:
client.cmd('*', 'saltutil.regen_keys')
except SaltClientError as client_error:
print(client_error)
return False
for root, _, files in os.walk(__opts__['pki_dir']):
for fn_ in files:
@ -562,8 +571,13 @@ def versions():
salt-run manage.versions
'''
ret = {}
client = salt.client.get_local_client(__opts__['conf_file'])
minions = client.cmd('*', 'test.version', timeout=__opts__['timeout'])
try:
minions = client.cmd('*', 'test.version', timeout=__opts__['timeout'])
except SaltClientError as client_error:
print(client_error)
return ret
labels = {
-1: 'Minion requires update',
@ -587,7 +601,6 @@ def versions():
# Add version of Master to output
version_status[2] = master_version.string
ret = {}
for key in version_status:
if key == 2:
ret[labels[key]] = version_status[2]

View file

@ -18,6 +18,7 @@ from __future__ import absolute_import
import salt.client
from salt.ext.six.moves import range
from salt.exceptions import SaltClientError
def hash(*args, **kwargs):
@ -157,13 +158,18 @@ def _get_pool_results(*args, **kwargs):
tgt = args[0]
cmd = args[1]
ret = {}
sort = kwargs.get('survey_sort', 'down')
direction = sort != 'up'
client = salt.client.get_local_client(__opts__['conf_file'])
minions = client.cmd(tgt, cmd, args[2:], timeout=__opts__['timeout'])
ret = {}
try:
minions = client.cmd(tgt, cmd, args[2:], timeout=__opts__['timeout'])
except SaltClientError as client_error:
print(client_error)
return ret
# hash minion return values as a string
for minion in sorted(minions):
digest = hashlib.sha256(str(minions[minion])).hexdigest()

View file

@ -11,6 +11,7 @@ import logging
import salt.client
import salt.utils.virt
import salt.key
from salt.exceptions import SaltClientError
# Import 3rd-party libs
import salt.ext.six as six
@ -64,28 +65,30 @@ def query(hyper=None, quiet=False):
log.warn('\'quiet\' is deprecated. Please migrate to --quiet')
ret = {}
client = salt.client.get_local_client(__opts__['conf_file'])
for info in client.cmd_iter('virtual:physical',
'virt.full_info', expr_form='grain'):
if not info:
continue
if not isinstance(info, dict):
continue
chunk = {}
id_ = next(six.iterkeys(info))
if hyper:
if hyper != id_:
try:
for info in client.cmd_iter('virtual:physical',
'virt.full_info', expr_form='grain'):
if not info:
continue
if not isinstance(info[id_], dict):
continue
if 'ret' not in info[id_]:
continue
if not isinstance(info[id_]['ret'], dict):
continue
chunk[id_] = info[id_]['ret']
ret.update(chunk)
if not quiet:
__jid_event__.fire_event({'data': chunk, 'outputter': 'virt_query'}, 'progress')
if not isinstance(info, dict):
continue
chunk = {}
id_ = next(info.iterkeys())
if hyper:
if hyper != id_:
continue
if not isinstance(info[id_], dict):
continue
if 'ret' not in info[id_]:
continue
if not isinstance(info[id_]['ret'], dict):
continue
chunk[id_] = info[id_]['ret']
ret.update(chunk)
if not quiet:
__jid_event__.fire_event({'data': chunk, 'outputter': 'virt_query'}, 'progress')
except SaltClientError as client_error:
print(client_error)
return ret
@ -223,19 +226,23 @@ def init(
client = salt.client.get_local_client(__opts__['conf_file'])
__jid_event__.fire_event({'message': 'Creating VM {0} on hypervisor {1}'.format(name, hyper)}, 'progress')
cmd_ret = client.cmd_iter(
hyper,
'virt.init',
[
name,
cpu,
mem,
image,
'seed={0}'.format(seed),
'nic={0}'.format(nic),
'install={0}'.format(install),
],
timeout=600)
try:
cmd_ret = client.cmd_iter(
hyper,
'virt.init',
[
name,
cpu,
mem,
image,
'seed={0}'.format(seed),
'nic={0}'.format(nic),
'install={0}'.format(install),
],
timeout=600)
except SaltClientError as client_error:
# Fall through to ret error handling below
print(client_error)
ret = next(cmd_ret)
if not ret:
@ -265,14 +272,17 @@ def reset(name):
__jid_event__.fire_event({'message': 'Failed to find vm {0} to reset'.format(name)}, 'progress')
return 'fail'
hyper = next(six.iterkeys(data))
cmd_ret = client.cmd_iter(
hyper,
'virt.reset',
[name],
timeout=600)
for comp in cmd_ret:
ret.update(comp)
__jid_event__.fire_event({'message': 'Reset VM {0}'.format(name)}, 'progress')
try:
cmd_ret = client.cmd_iter(
hyper,
'virt.reset',
[name],
timeout=600)
for comp in cmd_ret:
ret.update(comp)
__jid_event__.fire_event({'message': 'Reset VM {0}'.format(name)}, 'progress')
except SaltClientError as client_error:
print(client_error)
return ret
@ -290,11 +300,14 @@ def start(name):
if data[hyper][name]['state'] == 'running':
print('VM {0} is already running'.format(name))
return 'bad state'
cmd_ret = client.cmd_iter(
hyper,
'virt.start',
[name],
timeout=600)
try:
cmd_ret = client.cmd_iter(
hyper,
'virt.start',
[name],
timeout=600)
except SaltClientError as client_error:
return 'Virtual machine {0} not started: {1}'. format(name, client_error)
for comp in cmd_ret:
ret.update(comp)
__jid_event__.fire_event({'message': 'Started VM {0}'.format(name)}, 'progress')
@ -315,11 +328,14 @@ def force_off(name):
if data[hyper][name]['state'] == 'shutdown':
print('VM {0} is already shutdown'.format(name))
return'bad state'
cmd_ret = client.cmd_iter(
hyper,
'virt.destroy',
[name],
timeout=600)
try:
cmd_ret = client.cmd_iter(
hyper,
'virt.destroy',
[name],
timeout=600)
except SaltClientError as client_error:
return 'Virtual machine {0} could not be forced off: {1}'.format(name, client_error)
for comp in cmd_ret:
ret.update(comp)
__jid_event__.fire_event({'message': 'Powered off VM {0}'.format(name)}, 'progress')
@ -337,11 +353,15 @@ def purge(name, delete_key=True):
__jid_event__.fire_event({'error': 'Failed to find vm {0} to purge'.format(name)}, 'progress')
return 'fail'
hyper = next(six.iterkeys(data))
cmd_ret = client.cmd_iter(
hyper,
'virt.purge',
[name, True],
timeout=600)
try:
cmd_ret = client.cmd_iter(
hyper,
'virt.purge',
[name, True],
timeout=600)
except SaltClientError as client_error:
return 'Virtual machine {0} could not be purged: {1}'.format(name, client_error)
for comp in cmd_ret:
ret.update(comp)
@ -367,11 +387,14 @@ def pause(name):
if data[hyper][name]['state'] == 'paused':
__jid_event__.fire_event({'error': 'VM {0} is already paused'.format(name)}, 'progress')
return 'bad state'
cmd_ret = client.cmd_iter(
hyper,
'virt.pause',
[name],
timeout=600)
try:
cmd_ret = client.cmd_iter(
hyper,
'virt.pause',
[name],
timeout=600)
except SaltClientError as client_error:
return 'Virtual machine {0} could not be pasued: {1}'.format(name, client_error)
for comp in cmd_ret:
ret.update(comp)
__jid_event__.fire_event({'message': 'Paused VM {0}'.format(name)}, 'progress')
@ -392,11 +415,14 @@ def resume(name):
if data[hyper][name]['state'] != 'paused':
__jid_event__.fire_event({'error': 'VM {0} is not paused'.format(name)}, 'progress')
return 'bad state'
cmd_ret = client.cmd_iter(
hyper,
'virt.resume',
[name],
timeout=600)
try:
cmd_ret = client.cmd_iter(
hyper,
'virt.resume',
[name],
timeout=600)
except SaltClientError as client_error:
return 'Virtual machine {0} could not be resumed: {1}'.format(name, client_error)
for comp in cmd_ret:
ret.update(comp)
__jid_event__.fire_event({'message': 'Resumed VM {0}'.format(name)}, 'progress')
@ -425,10 +451,13 @@ def migrate(name, target=''):
if target not in data:
__jid_event__.fire_event({'error': 'Target hypervisor {0} not found'.format(origin_data)}, 'progress')
return ''
client.cmd(target, 'virt.seed_non_shared_migrate', [disks, True])
jid = client.cmd_async(origin_hyper,
'virt.migrate_non_shared',
[name, target])
try:
client.cmd(target, 'virt.seed_non_shared_migrate', [disks, True])
jid = client.cmd_async(origin_hyper,
'virt.migrate_non_shared',
[name, target])
except SaltClientError as client_error:
return 'Virtual machine {0} could not be migrated: {1}'.format(name, client_error)
msg = ('The migration of virtual machine {0} to hypervisor {1} has begun, '
'and can be tracked via jid {2}. The ``salt-run virt.query`` '

View file

@ -2830,7 +2830,7 @@ class BaseHighState(object):
if state:
self.merge_included_states(highstate, state, errors)
for i, error in enumerate(errors[:]):
if 'is not available on the salt master' in error:
if 'is not available' in error:
# match SLS foobar in environment
this_sls = 'SLS {0} in saltenv'.format(
sls_match)

View file

@ -223,18 +223,23 @@ def extracted(name,
tar_cmd = ['tar']
tar_shortopts = 'x'
tar_longopts = []
tar_afterfile = []
for opt in tar_opts:
if not opt.startswith('-'):
for shortopt in opt:
if shortopt not in ['x', 'f']:
tar_shortopts = tar_shortopts + shortopt
else:
for position, opt in enumerate(tar_opts):
if opt.startswith('-'):
tar_longopts.append(opt)
else:
if position > 0:
tar_afterfile.append(opt)
else:
append_opt = opt
append_opt = append_opt.replace('x', '').replace('f', '')
tar_shortopts = tar_shortopts + append_opt
tar_cmd.append(tar_shortopts)
tar_cmd.extend(tar_longopts)
tar_cmd.extend(['-f', filename])
tar_cmd.extend(tar_afterfile)
results = __salt__['cmd.run_all'](tar_cmd, cwd=name, python_shell=False)
if results['retcode'] != 0:

View file

@ -65,6 +65,11 @@ def tuned(name, **kwargs):
'name': name,
'result': True}
kwarg_map = {'read-ahead': 'getra',
'filesystem-read-ahead': 'getfra',
'read-only': 'getro',
'read-write': 'getro'}
if not __salt__['file.is_blkdev']:
ret['comment'] = ('Changes to {0} cannot be applied. '
'Not a block device. ').format(name)
@ -73,11 +78,30 @@ def tuned(name, **kwargs):
ret['result'] = None
return ret
else:
current = __salt__['blockdev.dump'](name)
changes = __salt__['blockdev.tune'](name, **kwargs)
changeset = {}
for key in kwargs:
if key in kwarg_map:
switch = kwarg_map[key]
if current[switch] != changes[switch]:
if isinstance(kwargs[key], bool):
old = (current[switch] == '1')
new = (changes[switch] == '1')
else:
old = current[switch]
new = changes[switch]
if key == 'read-write':
old = not old
new = not new
changeset[key] = 'Changed from {0} to {1}'.format(old, new)
if changes:
ret['comment'] = ('Block device {0} '
'successfully modified ').format(name)
ret['changes'] = changes
if changeset:
ret['comment'] = ('Block device {0} '
'successfully modified ').format(name)
ret['changes'] = changeset
else:
ret['comment'] = 'Block device {0} already in correct state'.format(name)
else:
ret['comment'] = 'Failed to modify block device {0}'.format(name)
ret['result'] = False

View file

@ -298,11 +298,14 @@ def present(
else:
# Split keyline to get key und commen
keyline = keyline.split(' ')
key_type = keyline[0]
key_value = keyline[1]
key_comment = keyline[2] if len(keyline) > 2 else ''
data = __salt__['ssh.set_auth_key'](
user,
keyline[1],
keyline[0],
keyline[2],
key_value,
key_type,
key_comment,
options or [],
config)
else:

View file

@ -45,7 +45,7 @@ The following example installs all driver updates that don't require a reboot:
.. code-block:: yaml
gryffindor:
win_update.install:
win_update.installed:
- includes:
- driver: True
- software: False

View file

@ -19,10 +19,11 @@ DEVICE={{name}}
{%endif%}{%endif%}{% if srcaddr %}SRCADDR={{srcaddr}}
{%endif%}{% if peerdns %}PEERDNS={{peerdns}}
{%endif%}{% if bridge %}BRIDGE={{bridge}}
{%endif%}{% if delay %}DELAY={{delay}}
{%endif%}{% if stp %}STP={{stp}}
{%endif%}{% if delay or delay == 0 %}DELAY={{delay}}
{%endif%}{% if bonding %}BONDING_OPTS="{%for item in bonding %}{{item}}={{bonding[item]}} {%endfor%}"
{%endif%}{% if my_inner_ipaddr %}MY_INNER_IPADDR={{my_inner_ipaddr}}
{%endif%}{% if my_outer_ipaddr %}MY_OUTER_IPADDR={{my_outer_ipaddr}}
{%endif%}{%if bonding %}BONDING_OPTS="{%for item in bonding %}{{item}}={{bonding[item]}} {%endfor%}"
{%endif%}{% if ethtool %}ETHTOOL_OPTS="{%for item in ethtool %}{{item}} {{ethtool[item]}} {%endfor%}"
{%endif%}{% if domain %}DOMAIN="{{ domain|join(' ') }}"
{% endif %}{% for server in dns -%}

View file

@ -24,10 +24,11 @@
{%endif%}{% if peerdns %}PEERDNS="{{peerdns}}"
{%endif%}{% if defroute %}DEFROUTE="{{defroute}}"
{%endif%}{% if bridge %}BRIDGE="{{bridge}}"
{%endif%}{% if delay %}DELAY="{{delay}}"
{%endif%}{% if stp %}STP="{{stp}}"
{%endif%}{% if delay or delay == 0 %}DELAY="{{delay}}"
{%endif%}{% if bonding %}BONDING_OPTS="{%for item in bonding %}{{item}}={{bonding[item]}} {%endfor%}"
{%endif%}{% if my_inner_ipaddr %}MY_INNER_IPADDR={{my_inner_ipaddr}}
{%endif%}{% if my_outer_ipaddr %}MY_OUTER_IPADDR={{my_outer_ipaddr}}
{%endif%}{%if bonding %}BONDING_OPTS="{%for item in bonding %}{{item}}={{bonding[item]}} {%endfor%}"
{%endif%}{% if ethtool %}ETHTOOL_OPTS="{%for item in ethtool %}{{item}} {{ethtool[item]}} {%endfor%}"
{%endif%}{% if domain %}DOMAIN="{{ domain|join(' ') }}"
{% endif %}{% for server in dns -%}

View file

@ -20,11 +20,12 @@ DEVICE="{{name}}"
{%endif%}{% if peerdns %}PEERDNS="{{peerdns}}"
{%endif%}{% if defroute %}DEFROUTE="{{defroute}}"
{%endif%}{% if bridge %}BRIDGE="{{bridge}}"
{%endif%}{% if delay %}DELAY="{{delay}}"
{%endif%}{% if stp %}STP="{{stp}}"
{%endif%}{% if delay or delay == 0 %}DELAY="{{delay}}"
{%endif%}{% if mtu %}MTU="{{mtu}}"
{%endif%}{% if my_inner_ipaddr %}MY_INNER_IPADDR={{my_inner_ipaddr}}
{%endif%}{% if my_outer_ipaddr %}MY_OUTER_IPADDR={{my_outer_ipaddr}}
{%endif%}{%if bonding %}BONDING_OPTS="{%for item in bonding %}{{item}}={{bonding[item]}} {%endfor%}"
{%endif%}{% if bonding %}BONDING_OPTS="{%for item in bonding %}{{item}}={{bonding[item]}} {%endfor%}"
{%endif%}{% if ethtool %}ETHTOOL_OPTS="{%for item in ethtool %}{{item}} {{ethtool[item]}} {%endfor%}"
{%endif%}{% if domain %}DOMAIN="{{ domain|join(' ') }}"
{% endif %}{% for server in dns -%}

View file

@ -589,7 +589,7 @@ def wait_for_port(host, port=22, timeout=900, gateway=None):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(30)
sock.connect((test_ssh_host, test_ssh_port))
sock.connect((test_ssh_host, int(test_ssh_port)))
# Stop any remaining reads/writes on the socket
sock.shutdown(socket.SHUT_RDWR)
# Close it!

View file

@ -2,24 +2,26 @@
'''
Manage events
Events are all fired off via a zeromq 'pub' socket, and listened to with
local zeromq 'sub' sockets
Events are all fired off via a zeromq 'pub' socket, and listened to with local
zeromq 'sub' sockets
All of the formatting is self contained in the event module, so
we should be able to modify the structure in the future since the same module
used to read events is the same module used to fire off events.
All of the formatting is self contained in the event module, so we should be
able to modify the structure in the future since the same module used to read
events is the same module used to fire off events.
Old style event messages were comprised of two parts delimited
at the 20 char point. The first 20 characters are used for the zeromq
subscriber to match publications and 20 characters was chosen because it was at
the time a few more characters than the length of a jid (Job ID).
Any tags of length less than 20 characters were padded with "|" chars out to
20 characters. Although not explicit, the data for an event comprised a
python dict that was serialized by msgpack.
Old style event messages were comprised of two parts delimited at the 20 char
point. The first 20 characters are used for the zeromq subscriber to match
publications and 20 characters was chosen because it was at the time a few more
characters than the length of a jid (Job ID). Any tags of length less than 20
characters were padded with "|" chars out to 20 characters.
Although not explicit, the data for an event comprised a python dict that was
serialized by msgpack.
New style event messages support event tags longer than 20 characters while
still being backwards compatible with old style tags.
The longer tags better enable name spaced event tags which tend to be longer.
Moreover, the constraint that the event data be a python dict is now an
explicit constraint and fire-event will now raise a ValueError if not. Tags
@ -29,15 +31,14 @@ Since the msgpack dict (map) indicators have values greater than or equal to
0x80 it can be unambiguously determined if the start of data is at char 21
or not.
In the new style:
When the tag is longer than 20 characters, an end of tag string is appended to
the tag given by the string constant TAGEND, that is, two line feeds '\n\n'.
When the tag is less than 20 characters then the tag is padded with pipes
"|" out to 20 characters as before.
When the tag is exactly 20 characters no padded is done.
In the new style, when the tag is longer than 20 characters, an end of tag
string is appended to the tag given by the string constant TAGEND, that is, two
line feeds '\n\n'. When the tag is less than 20 characters then the tag is
padded with pipes "|" out to 20 characters as before. When the tag is exactly
20 characters no padded is done.
The get_event method intelligently figures out if the tag is longer than
20 characters.
The get_event method intelligently figures out if the tag is longer than 20
characters.
The convention for namespacing is to use dot characters "." as the name space
@ -258,6 +259,7 @@ class SaltEvent(object):
self.sub.connect(self.puburi)
self.poller.register(self.sub, zmq.POLLIN)
self.sub.setsockopt(zmq.SUBSCRIBE, '')
self.sub.setsockopt(zmq.LINGER, 5000)
self.cpub = True
def connect_pull(self, timeout=1000):
@ -275,6 +277,7 @@ class SaltEvent(object):
# This is for ZMQ < 2.2 (Caught when ssh'ing into the Jenkins
# CentOS5, which still uses 2.1.9)
pass
self.push.setsockopt(zmq.LINGER, timeout)
self.push.connect(self.pulluri)
self.cpush = True
@ -514,10 +517,8 @@ class SaltEvent(object):
# Wait at most 2.5 secs to send any remaining messages in the
# socket or the context.term() below will hang indefinitely.
# See https://github.com/zeromq/pyzmq/issues/102
self.sub.setsockopt(zmq.LINGER, linger)
self.sub.close()
if self.cpush is True and self.push.closed is False:
self.push.setsockopt(zmq.LINGER, linger)
self.push.close()
# If sockets are not unregistered from a poller, nothing which touches
# that poller gets garbage collected. The Poller itself, its
@ -723,20 +724,22 @@ class EventReturn(multiprocessing.Process):
events = self.event.iter_events(full=True)
self.event.fire_event({}, 'salt/event_listen/start')
event_queue = []
try:
for event in events:
if self._filter(event):
event_queue.append(event)
if len(event_queue) >= self.event_return_queue:
self.minion.returners[
'{0}.event_return'.format(self.opts['event_return'])
](event_queue)
for event in events:
if self._filter(event):
event_queue.append(event)
if len(event_queue) >= self.event_return_queue:
event_return = '{0}.event_return'.format(
self.opts['event_return']
)
if event_return in self.minion.returners:
self.minion.returners[event_return](event_queue)
event_queue = []
except KeyError:
log.error((
'Could not store return for events {0}. Returner {1} '
'not found.'
).format(events, self.opts.get('event_return', None)))
else:
log.error(
'Could not store return for event(s) {0}. Returner '
'\'{1}\' not found.'
.format(event_queue, self.opts['event_return'])
)
def _filter(self, event):
'''

View file

@ -234,12 +234,6 @@ def query(url,
else:
req_kwargs['stream'] = True
result = sess.request(
method, url, params=params, data=data, **req_kwargs
)
if stream is True or handle is True:
return {'handle': result}
# Client-side cert handling
if cert is not None:
if isinstance(cert, six.string_types):
@ -252,6 +246,13 @@ def query(url,
log.error('The client-side certificate path that was passed is '
'not valid: {0}'.format(cert))
result = sess.request(
method, url, params=params, data=data, **req_kwargs
)
result.raise_for_status()
if stream is True or handle is True:
return {'handle': result}
result_status_code = result.status_code
result_headers = result.headers
result_text = result.text
@ -312,10 +313,10 @@ def query(url,
cert_kwargs = {
'host': request.get_host(),
'port': port,
'cert_file': cert[0]
'cert_file': cert_chain[0]
}
if len(cert) > 1:
cert_kwargs['key_file'] = cert[1]
if len(cert_chain) > 1:
cert_kwargs['key_file'] = cert_chain[1]
handlers[0] = salt.ext.six.moves.http_client.HTTPSConnection(**cert_kwargs)
opener = urllib2.build_opener(*handlers)
@ -406,7 +407,7 @@ def query(url,
else:
text = True
if os.path.exists(decode_out):
if decode_out and os.path.exists(decode_out):
with salt.utils.fopen(decode_out, 'w') as dof:
dof.write(result_text)

View file

@ -15,6 +15,9 @@ log = logging.getLogger(__name__)
def store_job(opts, load, event=None, mminion=None):
'''
Store job information using the configured master_job_cache
'''
# If the return data is invalid, just ignore it
if any(key not in load for key in ('return', 'jid', 'id')):
return False
@ -22,27 +25,44 @@ def store_job(opts, load, event=None, mminion=None):
return False
if mminion is None:
mminion = salt.minion.MasterMinion(opts, states=False, rend=False)
job_cache = opts['master_job_cache']
if load['jid'] == 'req':
# The minion is returning a standalone job, request a jobid
load['arg'] = load.get('arg', load.get('fun_args', []))
load['tgt_type'] = 'glob'
load['tgt'] = load['id']
prep_fstr = '{0}.prep_jid'.format(opts['master_job_cache'])
load['jid'] = mminion.returners[prep_fstr](
nocache=load.get('nocache', False))
try:
load['jid'] = mminion.returners[prep_fstr](nocache=load.get('nocache', False))
except KeyError:
emsg = "Returner '{0}' does not support function prep_jid".format(job_cache)
log.error(emsg)
raise KeyError(emsg)
# save the load, since we don't have it
saveload_fstr = '{0}.save_load'.format(opts['master_job_cache'])
mminion.returners[saveload_fstr](load['jid'], load)
saveload_fstr = '{0}.save_load'.format(job_cache)
try:
mminion.returners[saveload_fstr](load['jid'], load)
except KeyError:
emsg = "Returner '{0}' does not support function save_load".format(job_cache)
log.error(emsg)
raise KeyError(emsg)
elif salt.utils.jid.is_jid(load['jid']):
# Store the jid
jidstore_fstr = '{0}.prep_jid'.format(opts['master_job_cache'])
mminion.returners[jidstore_fstr](False, passed_jid=load['jid'])
jidstore_fstr = '{0}.prep_jid'.format(job_cache)
try:
mminion.returners[jidstore_fstr](False, passed_jid=load['jid'])
except KeyError:
emsg = "Returner '{0}' does not support function prep_jid".format(job_cache)
log.error(emsg)
raise KeyError(emsg)
if event:
# If the return data is invalid, just ignore it
log.info('Got return from {id} for job {jid}'.format(**load))
event.fire_event(
load, tagify([load['jid'], 'ret', load['id']], 'job'))
event.fire_event(load, tagify([load['jid'], 'ret', load['id']], 'job'))
event.fire_ret_load(load)
# if you have a job_cache, or an ext_job_cache, don't write to
@ -51,12 +71,18 @@ def store_job(opts, load, event=None, mminion=None):
return
# otherwise, write to the master cache
fstr = '{0}.returner'.format(opts['master_job_cache'])
fstr = '{0}.returner'.format(job_cache)
if 'fun' not in load and load.get('return', {}):
ret_ = load.get('return', {})
if 'fun' in ret_:
load.update({'fun': ret_['fun']})
if 'user' in ret_:
load.update({'user': ret_['user']})
mminion.returners[fstr](load)
try:
mminion.returners[fstr](load)
except KeyError:
emsg = "Returner '{0}' does not support function returner".format(job_cache)
log.error(emsg)
raise KeyError(emsg)
# vim:set et sts=4 ts=4 tw=80:

View file

@ -16,16 +16,11 @@ def verify_fun(lazy_obj, fun):
if not fun:
raise salt.exceptions.SaltInvocationError(
'Must specify a function to run!\n'
'ex: salt-run manage.up'
'ex: manage.up'
)
if fun not in lazy_obj:
try:
lazy_obj[fun]
except KeyError:
# Runner function not found in the LazyLoader object
raise salt.exceptions.CommandExecutionError(
'\'{0}\' is not available'.format(fun)
)
# If the requested function isn't available, lets say why
raise salt.exceptions.CommandExecutionError(lazy_obj.missing_fun_string(fun))
class LazyDict(collections.MutableMapping):

View file

@ -221,7 +221,7 @@ def query(key, keyid, method='GET', params=None, headers=None,
if return_url is True:
return ret, requesturl
else:
if method == 'GET' or method == 'HEAD':
if result.status_code != requests.codes.ok:
return
ret = {'headers': []}
for header in result.headers:

View file

@ -611,6 +611,9 @@ class Schedule(object):
for job, data in six.iteritems(schedule):
if job == 'enabled' or not data:
continue
if not isinstance(data, dict):
log.error('Scheduled job "{0}" should have a dict value, not {1}'.format(job, type(data)))
continue
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
continue

View file

@ -595,7 +595,7 @@ class SaltDistribution(distutils.dist.Distribution):
self.name = 'salt-ssh' if PACKAGED_FOR_SALT_SSH else 'salt'
self.version = __version__ # pylint: disable=undefined-variable
self.salt_version = __version__ # pylint: disable=undefined-variable
self.description = 'Portable, distributed, remote execution and configuration management system'
self.author = 'Thomas S Hatch'
self.author_email = 'thatch45@gmail.com'
@ -627,6 +627,8 @@ class SaltDistribution(distutils.dist.Distribution):
attrvalue = getattr(self, attrname, None)
if attrvalue == 0:
continue
if attrname == 'salt_version':
attrname = 'version'
if hasattr(self.metadata, 'set_{0}'.format(attrname)):
getattr(self.metadata, 'set_{0}'.format(attrname))(attrvalue)
elif hasattr(self.metadata, attrname):

View file

@ -15,7 +15,7 @@ ensure_in_syspath('../')
# Import Salt libs
import integration
from salt import client
from salt.exceptions import EauthAuthenticationError, SaltInvocationError
from salt.exceptions import EauthAuthenticationError, SaltInvocationError, SaltClientError
@skipIf(NO_MOCK, NO_MOCK_REASON)
@ -63,9 +63,7 @@ class LocalClientTestCase(TestCase,
def test_pub(self):
# Make sure we cleanly return if the publisher isn't running
with patch('os.path.exists', return_value=False):
ret = self.client.pub('*', 'test.ping')
expected_ret = {'jid': '0', 'minions': []}
self.assertDictEqual(ret, expected_ret)
self.assertRaises(SaltClientError, lambda: self.client.pub('*', 'test.ping'))
# Check nodegroups behavior
with patch('os.path.exists', return_value=True):